aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-01 22:45:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-01 22:45:46 -0400
commitae982073095a44f004d7ffb9f271077abef9dbcf (patch)
tree26dfda416542c9dc60ab24029c16caecb964d627
parentf1a3c0b933e7ff856223d6fcd7456d403e54e4e5 (diff)
parente625ccec1fa6c24620f38fd72d5b2fd62230ad2b (diff)
Merge tag 'pm+acpi-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI updates from Rafael Wysocki: "From the number of commits perspective, the biggest items are ACPICA and cpufreq changes with the latter taking the lead (over 50 commits). On the cpufreq front, there are many cleanups and minor fixes in the core and governors, driver updates etc. We also have a new cpufreq driver for Mediatek MT8173 chips. ACPICA mostly updates its debug infrastructure and adds a number of fixes and cleanups for a good measure. The Operating Performance Points (OPP) framework is updated with new DT bindings and support for them among other things. We have a few updates of the generic power domains framework and a reorganization of the ACPI device enumeration code and bus type operations. And a lot of fixes and cleanups all over. Included is one branch from the MFD tree as it contains some PM-related driver core and ACPI PM changes a few other commits are based on. Specifics: - ACPICA update to upstream revision 20150818 including method tracing extensions to allow more in-depth AML debugging in the kernel and a number of assorted fixes and cleanups (Bob Moore, Lv Zheng, Markus Elfring). - ACPI sysfs code updates and a documentation update related to AML method tracing (Lv Zheng). - ACPI EC driver fix related to serialized evaluations of _Qxx methods and ACPI tools updates allowing the EC userspace tool to be built from the kernel source (Lv Zheng). - ACPI processor driver updates preparing it for future introduction of CPPC support and ACPI PCC mailbox driver updates (Ashwin Chaugule). - ACPI interrupts enumeration fix for a regression related to the handling of IRQ attribute conflicts between MADT and the ACPI namespace (Jiang Liu). - Fixes related to ACPI device PM (Mika Westerberg, Srinidhi Kasagar). - ACPI device registration code reorganization to separate the sysfs-related code and bus type operations from the rest (Rafael J Wysocki). - Assorted cleanups in the ACPI core (Jarkko Nikula, Mathias Krause, Andy Shevchenko, Rafael J Wysocki, Nicolas Iooss). - ACPI cpufreq driver and ia64 cpufreq driver fixes and cleanups (Pan Xinhui, Rafael J Wysocki). - cpufreq core cleanups on top of the previous changes allowing it to preseve its sysfs directories over system suspend/resume (Viresh Kumar, Rafael J Wysocki, Sebastian Andrzej Siewior). - cpufreq fixes and cleanups related to governors (Viresh Kumar). - cpufreq updates (core and the cpufreq-dt driver) related to the turbo/boost mode support (Viresh Kumar, Bartlomiej Zolnierkiewicz). - New DT bindings for Operating Performance Points (OPP), support for them in the OPP framework and in the cpufreq-dt driver plus related OPP framework fixes and cleanups (Viresh Kumar). - cpufreq powernv driver updates (Shilpasri G Bhat). - New cpufreq driver for Mediatek MT8173 (Pi-Cheng Chen). - Assorted cpufreq driver (speedstep-lib, sfi, integrator) cleanups and fixes (Abhilash Jindal, Andrzej Hajda, Cristian Ardelean). - intel_pstate driver updates including Skylake-S support, support for enabling HW P-states per CPU and an additional vendor bypass list entry (Kristen Carlson Accardi, Chen Yu, Ethan Zhao). - cpuidle core fixes related to the handling of coupled idle states (Xunlei Pang). - intel_idle driver updates including Skylake Client support and support for freeze-mode-specific idle states (Len Brown). - Driver core updates related to power management (Andy Shevchenko, Rafael J Wysocki). - Generic power domains framework fixes and cleanups (Jon Hunter, Geert Uytterhoeven, Rajendra Nayak, Ulf Hansson). - Device PM QoS framework update to allow the latency tolerance setting to be exposed to user space via sysfs (Mika Westerberg). - devfreq support for PPMUv2 in Exynos5433 and a fix for an incorrect exynos-ppmu DT binding (Chanwoo Choi, Javier Martinez Canillas). - System sleep support updates (Alan Stern, Len Brown, SungEun Kim). - rockchip-io AVS support updates (Heiko Stuebner). - PM core clocks support fixup (Colin Ian King). - Power capping RAPL driver update including support for Skylake H/S and Broadwell-H (Radivoje Jovanovic, Seiichi Ikarashi). - Generic device properties framework fixes related to the handling of static (driver-provided) property sets (Andy Shevchenko). - turbostat and cpupower updates (Len Brown, Shilpasri G Bhat, Shreyas B Prabhu)" * tag 'pm+acpi-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (180 commits) cpufreq: speedstep-lib: Use monotonic clock cpufreq: powernv: Increase the verbosity of OCC console messages cpufreq: sfi: use kmemdup rather than duplicating its implementation cpufreq: drop !cpufreq_driver check from cpufreq_parse_governor() cpufreq: rename cpufreq_real_policy as cpufreq_user_policy cpufreq: remove redundant 'policy' field from user_policy cpufreq: remove redundant 'governor' field from user_policy cpufreq: update user_policy.* on success cpufreq: use memcpy() to copy policy cpufreq: remove redundant CPUFREQ_INCOMPATIBLE notifier event cpufreq: mediatek: Add MT8173 cpufreq driver dt-bindings: mediatek: Add MT8173 CPU DVFS clock bindings PM / Domains: Fix typo in description of genpd_dev_pm_detach() PM / Domains: Remove unusable governor dummies PM / Domains: Make pm_genpd_init() available to modules PM / domains: Align column headers and data in pm_genpd_summary output powercap / RAPL: disable the 2nd power limit properly tools: cpupower: Fix error when running cpupower monitor PM / OPP: Drop unlikely before IS_ERR(_OR_NULL) PM / OPP: Fix static checker warning (broken 64bit big endian systems) ...
-rw-r--r--Documentation/acpi/method-tracing.txt204
-rw-r--r--Documentation/cpu-freq/core.txt7
-rw-r--r--Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt83
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt43
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt (renamed from Documentation/devicetree/bindings/power/opp.txt)40
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.txt14
-rw-r--r--Documentation/power/devices.txt7
-rw-r--r--Documentation/power/runtime_pm.txt4
-rw-r--r--arch/powerpc/include/asm/opal-api.h12
-rw-r--r--arch/x86/include/asm/msr-index.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--drivers/acpi/Kconfig19
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_ipmi.c4
-rw-r--r--drivers/acpi/acpi_lpss.c38
-rw-r--r--drivers/acpi/acpi_memhotplug.c5
-rw-r--r--drivers/acpi/acpi_pad.c4
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acdebug.h26
-rw-r--r--drivers/acpi/acpica/acdispat.h8
-rw-r--r--drivers/acpi/acpica/acglobal.h20
-rw-r--r--drivers/acpi/acpica/acinterp.h22
-rw-r--r--drivers/acpi/acpica/aclocal.h28
-rw-r--r--drivers/acpi/acpica/acmacros.h9
-rw-r--r--drivers/acpi/acpica/acnamesp.h13
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/acparser.h4
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h14
-rw-r--r--drivers/acpi/acpica/acutils.h25
-rw-r--r--drivers/acpi/acpica/dsargs.c4
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c231
-rw-r--r--drivers/acpi/acpica/dsinit.c20
-rw-r--r--drivers/acpi/acpica/dsmethod.c35
-rw-r--r--drivers/acpi/acpica/dsopcode.c31
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evregion.c22
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/excreate.c1
-rw-r--r--drivers/acpi/acpica/exdebug.c324
-rw-r--r--drivers/acpi/acpica/exdump.c5
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c16
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c15
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsload.c16
-rw-r--r--drivers/acpi/acpica/nsnames.c275
-rw-r--r--drivers/acpi/acpica/nsparse.c42
-rw-r--r--drivers/acpi/acpica/nsutils.c19
-rw-r--r--drivers/acpi/acpica/nsxfname.c8
-rw-r--r--drivers/acpi/acpica/psargs.c26
-rw-r--r--drivers/acpi/acpica/psloop.c32
-rw-r--r--drivers/acpi/acpica/psobject.c17
-rw-r--r--drivers/acpi/acpica/psparse.c14
-rw-r--r--drivers/acpi/acpica/psutils.c8
-rw-r--r--drivers/acpi/acpica/psxface.c123
-rw-r--r--drivers/acpi/acpica/rscreate.c3
-rw-r--r--drivers/acpi/acpica/tbfadt.c6
-rw-r--r--drivers/acpi/acpica/tbfind.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c40
-rw-r--r--drivers/acpi/acpica/tbutils.c73
-rw-r--r--drivers/acpi/acpica/tbxfload.c93
-rw-r--r--drivers/acpi/acpica/utdebug.c31
-rw-r--r--drivers/acpi/acpica/utdelete.c3
-rw-r--r--drivers/acpi/acpica/utfileio.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utmisc.c4
-rw-r--r--drivers/acpi/acpica/utnonansi.c380
-rw-r--r--drivers/acpi/acpica/utstring.c342
-rw-r--r--drivers/acpi/acpica/utxface.c12
-rw-r--r--drivers/acpi/acpica/utxfinit.c11
-rw-r--r--drivers/acpi/apei/apei-base.c4
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c4
-rw-r--r--drivers/acpi/apei/erst.c4
-rw-r--r--drivers/acpi/apei/ghes.c4
-rw-r--r--drivers/acpi/apei/hest.c4
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/blacklist.c4
-rw-r--r--drivers/acpi/bus.c408
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cm_sbs.c4
-rw-r--r--drivers/acpi/container.c4
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/device_sysfs.c521
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec.c86
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/hed.c4
-rw-r--r--drivers/acpi/internal.h16
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c45
-rw-r--r--drivers/acpi/pci_irq.c4
-rw-r--r--drivers/acpi/pci_link.c20
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c4
-rw-r--r--drivers/acpi/power.c19
-rw-r--r--drivers/acpi/processor_driver.c92
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/acpi/processor_perflib.c10
-rw-r--r--drivers/acpi/processor_thermal.c4
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/property.c5
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/scan.c860
-rw-r--r--drivers/acpi/sysfs.c133
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/base/core.c43
-rw-r--r--drivers/base/dd.c20
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/domain.c386
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c1003
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/qos.c37
-rw-r--r--drivers/base/power/sysfs.c11
-rw-r--r--drivers/base/property.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm7
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c93
-rw-r--r--drivers/cpufreq/cpufreq-dt.c73
-rw-r--r--drivers/cpufreq/cpufreq.c419
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c25
-rw-r--r--drivers/cpufreq/cpufreq_governor.c196
-rw-r--r--drivers/cpufreq/cpufreq_governor.h40
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c67
-rw-r--r--drivers/cpufreq/cpufreq_opp.c4
-rw-r--r--drivers/cpufreq/e_powersaver.c2
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c20
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c18
-rw-r--r--drivers/cpufreq/intel_pstate.c20
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c527
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c5
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c199
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c4
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c9
-rw-r--r--drivers/cpuidle/coupled.c8
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--drivers/cpuidle/cpuidle.h7
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c170
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h70
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/idma64.c710
-rw-r--r--drivers/dma/idma64.h233
-rw-r--r--drivers/idle/intel_idle.c72
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/mfd/Kconfig23
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/intel-lpss-acpi.c84
-rw-r--r--drivers/mfd/intel-lpss-pci.c113
-rw-r--r--drivers/mfd/intel-lpss.c524
-rw-r--r--drivers/mfd/intel-lpss.h62
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/power/avs/Kconfig2
-rw-r--r--drivers/power/avs/rockchip-io-domain.c59
-rw-r--r--drivers/powercap/intel_rapl.c8
-rw-r--r--drivers/video/fbdev/pxafb.c1
-rw-r--r--drivers/video/fbdev/sa1100fb.c1
-rw-r--r--drivers/xen/xen-acpi-processor.c16
-rw-r--r--include/acpi/acbuffer.h1
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acoutput.h21
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/acpi/acpiosxf.h6
-rw-r--r--include/acpi/acpixf.h16
-rw-r--r--include/acpi/actbl2.h17
-rw-r--r--include/acpi/actypes.h13
-rw-r--r--include/acpi/platform/acenv.h19
-rw-r--r--include/acpi/platform/acenvex.h3
-rw-r--r--include/acpi/platform/acmsvcex.h54
-rw-r--r--include/acpi/platform/acwinex.h49
-rw-r--r--include/acpi/processor.h59
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/klist.h1
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/pm_domain.h9
-rw-r--r--include/linux/pm_opp.h36
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--kernel/power/Kconfig10
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/power/wakelock.c18
-rw-r--r--lib/klist.c41
-rw-r--r--tools/power/acpi/Makefile168
-rw-r--r--tools/power/acpi/Makefile.config92
-rw-r--r--tools/power/acpi/Makefile.rules37
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile53
-rw-r--r--tools/power/acpi/tools/ec/Makefile33
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c4
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.85
-rw-r--r--tools/power/x86/turbostat/turbostat.c100
212 files changed, 8402 insertions, 3335 deletions
diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt
index f6efb1ea559a..c2505eefc878 100644
--- a/Documentation/acpi/method-tracing.txt
+++ b/Documentation/acpi/method-tracing.txt
@@ -1,26 +1,192 @@
1/sys/module/acpi/parameters/: 1ACPICA Trace Facility
2 2
3trace_method_name 3Copyright (C) 2015, Intel Corporation
4 The AML method name that the user wants to trace 4Author: Lv Zheng <lv.zheng@intel.com>
5 5
6trace_debug_layer
7 The temporary debug_layer used when tracing the method.
8 Using 0xffffffff by default if it is 0.
9 6
10trace_debug_level 7Abstract:
11 The temporary debug_level used when tracing the method.
12 Using 0x00ffffff by default if it is 0.
13 8
14trace_state 9This document describes the functions and the interfaces of the method
15 The status of the tracing feature. 10tracing facility.
11
121. Functionalities and usage examples:
13
14 ACPICA provides method tracing capability. And two functions are
15 currently implemented using this capability.
16
17 A. Log reducer
18 ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
19 enabled. The debugging messages which are deployed via
20 ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
21 level (known as debug layer, configured via
22 /sys/module/acpi/parameters/debug_layer) and per-type level (known as
23 debug level, configured via /sys/module/acpi/parameters/debug_level).
24
25 But when the particular layer/level is applied to the control method
26 evaluations, the quantity of the debugging outputs may still be too
27 large to be put into the kernel log buffer. The idea thus is worked out
28 to only enable the particular debug layer/level (normally more detailed)
29 logs when the control method evaluation is started, and disable the
30 detailed logging when the control method evaluation is stopped.
31
32 The following command examples illustrate the usage of the "log reducer"
33 functionality:
34 a. Filter out the debug layer/level matched logs when control methods
35 are being evaluated:
36 # cd /sys/module/acpi/parameters
37 # echo "0xXXXXXXXX" > trace_debug_layer
38 # echo "0xYYYYYYYY" > trace_debug_level
39 # echo "enable" > trace_state
40 b. Filter out the debug layer/level matched logs when the specified
41 control method is being evaluated:
42 # cd /sys/module/acpi/parameters
43 # echo "0xXXXXXXXX" > trace_debug_layer
44 # echo "0xYYYYYYYY" > trace_debug_level
45 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
46 # echo "method" > /sys/module/acpi/parameters/trace_state
47 c. Filter out the debug layer/level matched logs when the specified
48 control method is being evaluated for the first time:
49 # cd /sys/module/acpi/parameters
50 # echo "0xXXXXXXXX" > trace_debug_layer
51 # echo "0xYYYYYYYY" > trace_debug_level
52 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
53 # echo "method-once" > /sys/module/acpi/parameters/trace_state
54 Where:
55 0xXXXXXXXX/0xYYYYYYYY: Refer to Documentation/acpi/debug.txt for
56 possible debug layer/level masking values.
57 \PPPP.AAAA.TTTT.HHHH: Full path of a control method that can be found
58 in the ACPI namespace. It needn't be an entry
59 of a control method evaluation.
60
61 B. AML tracer
62
63 There are special log entries added by the method tracing facility at
64 the "trace points" the AML interpreter starts/stops to execute a control
65 method, or an AML opcode. Note that the format of the log entries are
66 subject to change:
67 [ 0.186427] exdebug-0398 ex_trace_point : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
68 [ 0.186630] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905c88:If] execution.
69 [ 0.186820] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:LEqual] execution.
70 [ 0.187010] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905a20:-NamePath-] execution.
71 [ 0.187214] exdebug-0398 ex_trace_point : Opcode End [0xf5905a20:-NamePath-] execution.
72 [ 0.187407] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
73 [ 0.187594] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
74 [ 0.187789] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:LEqual] execution.
75 [ 0.187980] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:Return] execution.
76 [ 0.188146] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
77 [ 0.188334] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
78 [ 0.188524] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:Return] execution.
79 [ 0.188712] exdebug-0398 ex_trace_point : Opcode End [0xf5905c88:If] execution.
80 [ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
16 81
17 "enabled" means this feature is enabled 82 Developers can utilize these special log entries to track the AML
18 and the AML method is traced every time it's executed. 83 interpretion, thus can aid issue debugging and performance tuning. Note
84 that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
85 macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
86 "AML tracer" logs.
19 87
20 "1" means this feature is enabled and the AML method 88 The following command examples illustrate the usage of the "AML tracer"
21 will only be traced during the next execution. 89 functionality:
90 a. Filter out the method start/stop "AML tracer" logs when control
91 methods are being evaluated:
92 # cd /sys/module/acpi/parameters
93 # echo "0x80" > trace_debug_layer
94 # echo "0x10" > trace_debug_level
95 # echo "enable" > trace_state
96 b. Filter out the method start/stop "AML tracer" when the specified
97 control method is being evaluated:
98 # cd /sys/module/acpi/parameters
99 # echo "0x80" > trace_debug_layer
100 # echo "0x10" > trace_debug_level
101 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
102 # echo "method" > trace_state
103 c. Filter out the method start/stop "AML tracer" logs when the specified
104 control method is being evaluated for the first time:
105 # cd /sys/module/acpi/parameters
106 # echo "0x80" > trace_debug_layer
107 # echo "0x10" > trace_debug_level
108 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
109 # echo "method-once" > trace_state
110 d. Filter out the method/opcode start/stop "AML tracer" when the
111 specified control method is being evaluated:
112 # cd /sys/module/acpi/parameters
113 # echo "0x80" > trace_debug_layer
114 # echo "0x10" > trace_debug_level
115 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
116 # echo "opcode" > trace_state
117 e. Filter out the method/opcode start/stop "AML tracer" when the
118 specified control method is being evaluated for the first time:
119 # cd /sys/module/acpi/parameters
120 # echo "0x80" > trace_debug_layer
121 # echo "0x10" > trace_debug_level
122 # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
123 # echo "opcode-opcode" > trace_state
22 124
23 "disabled" means this feature is disabled. 125 Note that all above method tracing facility related module parameters can
24 Users can enable/disable this debug tracing feature by 126 be used as the boot parameters, for example:
25 "echo string > /sys/module/acpi/parameters/trace_state". 127 acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
26 "string" should be one of "enable", "disable" and "1". 128 acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
129
1302. Interface descriptions:
131
132 All method tracing functions can be configured via ACPI module
133 parameters that are accessible at /sys/module/acpi/parameters/:
134
135 trace_method_name
136 The full path of the AML method that the user wants to trace.
137 Note that the full path shouldn't contain the trailing "_"s in its
138 name segments but may contain "\" to form an absolute path.
139
140 trace_debug_layer
141 The temporary debug_layer used when the tracing feature is enabled.
142 Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
143 used to match all "AML tracer" logs.
144
145 trace_debug_level
146 The temporary debug_level used when the tracing feature is enabled.
147 Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
148 debug_level used to match all "AML tracer" logs.
149
150 trace_state
151 The status of the tracing feature.
152 Users can enable/disable this debug tracing feature by executing
153 the following command:
154 # echo string > /sys/module/acpi/parameters/trace_state
155 Where "string" should be one of the followings:
156 "disable"
157 Disable the method tracing feature.
158 "enable"
159 Enable the method tracing feature.
160 ACPICA debugging messages matching
161 "trace_debug_layer/trace_debug_level" during any method
162 execution will be logged.
163 "method"
164 Enable the method tracing feature.
165 ACPICA debugging messages matching
166 "trace_debug_layer/trace_debug_level" during method execution
167 of "trace_method_name" will be logged.
168 "method-once"
169 Enable the method tracing feature.
170 ACPICA debugging messages matching
171 "trace_debug_layer/trace_debug_level" during method execution
172 of "trace_method_name" will be logged only once.
173 "opcode"
174 Enable the method tracing feature.
175 ACPICA debugging messages matching
176 "trace_debug_layer/trace_debug_level" during method/opcode
177 execution of "trace_method_name" will be logged.
178 "opcode-once"
179 Enable the method tracing feature.
180 ACPICA debugging messages matching
181 "trace_debug_layer/trace_debug_level" during method/opcode
182 execution of "trace_method_name" will be logged only once.
183 Note that, the difference between the "enable" and other feature
184 enabling options are:
185 1. When "enable" is specified, since
186 "trace_debug_layer/trace_debug_level" shall apply to all control
187 method evaluations, after configuring "trace_state" to "enable",
188 "trace_method_name" will be reset to NULL.
189 2. When "method/opcode" is specified, if
190 "trace_method_name" is NULL when "trace_state" is configured to
191 these options, the "trace_debug_layer/trace_debug_level" will
192 apply to all control method evaluations.
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 70933eadc308..ba78e7c2a069 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -55,16 +55,13 @@ transition notifiers.
55---------------------------- 55----------------------------
56 56
57These are notified when a new policy is intended to be set. Each 57These are notified when a new policy is intended to be set. Each
58CPUFreq policy notifier is called three times for a policy transition: 58CPUFreq policy notifier is called twice for a policy transition:
59 59
601.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if 601.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
61 they see a need for this - may it be thermal considerations or 61 they see a need for this - may it be thermal considerations or
62 hardware limitations. 62 hardware limitations.
63 63
642.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid 642.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
65 hardware failure.
66
673.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
68 - if two hardware drivers failed to agree on a new policy before this 65 - if two hardware drivers failed to agree on a new policy before this
69 stage, the incompatible hardware shall be shut down, and the user 66 stage, the incompatible hardware shall be shut down, and the user
70 informed of this. 67 informed of this.
diff --git a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
new file mode 100644
index 000000000000..52b457c23eed
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
@@ -0,0 +1,83 @@
1Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
2
3Required properties:
4- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
5- clock-names: Should contain the following:
6 "cpu" - The multiplexer for clock input of CPU cluster.
7 "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
8 source (usually MAINPLL) when the original CPU PLL is under
9 transition and not stable yet.
10 Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
11 generic clock consumer properties.
12- proc-supply: Regulator for Vproc of CPU cluster.
13
14Optional properties:
15- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
16 needs to do "voltage tracking" to step by step scale up/down Vproc and
17 Vsram to fit SoC specific needs. When absent, the voltage scaling
18 flow is handled by hardware, hence no software "voltage tracking" is
19 needed.
20
21Example:
22--------
23 cpu0: cpu@0 {
24 device_type = "cpu";
25 compatible = "arm,cortex-a53";
26 reg = <0x000>;
27 enable-method = "psci";
28 cpu-idle-states = <&CPU_SLEEP_0>;
29 clocks = <&infracfg CLK_INFRA_CA53SEL>,
30 <&apmixedsys CLK_APMIXED_MAINPLL>;
31 clock-names = "cpu", "intermediate";
32 };
33
34 cpu1: cpu@1 {
35 device_type = "cpu";
36 compatible = "arm,cortex-a53";
37 reg = <0x001>;
38 enable-method = "psci";
39 cpu-idle-states = <&CPU_SLEEP_0>;
40 clocks = <&infracfg CLK_INFRA_CA53SEL>,
41 <&apmixedsys CLK_APMIXED_MAINPLL>;
42 clock-names = "cpu", "intermediate";
43 };
44
45 cpu2: cpu@100 {
46 device_type = "cpu";
47 compatible = "arm,cortex-a57";
48 reg = <0x100>;
49 enable-method = "psci";
50 cpu-idle-states = <&CPU_SLEEP_0>;
51 clocks = <&infracfg CLK_INFRA_CA57SEL>,
52 <&apmixedsys CLK_APMIXED_MAINPLL>;
53 clock-names = "cpu", "intermediate";
54 };
55
56 cpu3: cpu@101 {
57 device_type = "cpu";
58 compatible = "arm,cortex-a57";
59 reg = <0x101>;
60 enable-method = "psci";
61 cpu-idle-states = <&CPU_SLEEP_0>;
62 clocks = <&infracfg CLK_INFRA_CA57SEL>,
63 <&apmixedsys CLK_APMIXED_MAINPLL>;
64 clock-names = "cpu", "intermediate";
65 };
66
67 &cpu0 {
68 proc-supply = <&mt6397_vpca15_reg>;
69 };
70
71 &cpu1 {
72 proc-supply = <&mt6397_vpca15_reg>;
73 };
74
75 &cpu2 {
76 proc-supply = <&da9211_vcpu_reg>;
77 sram-supply = <&mt6397_vsramca7_reg>;
78 };
79
80 &cpu3 {
81 proc-supply = <&da9211_vcpu_reg>;
82 sram-supply = <&mt6397_vsramca7_reg>;
83 };
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
index b54bf3a2ff57..3e36c1d11386 100644
--- a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -11,15 +11,14 @@ to various devfreq devices. The devfreq devices would use the event data when
11derterming the current state of each IP. 11derterming the current state of each IP.
12 12
13Required properties: 13Required properties:
14- compatible: Should be "samsung,exynos-ppmu". 14- compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
15- reg: physical base address of each PPMU and length of memory mapped region. 15- reg: physical base address of each PPMU and length of memory mapped region.
16 16
17Optional properties: 17Optional properties:
18- clock-names : the name of clock used by the PPMU, "ppmu" 18- clock-names : the name of clock used by the PPMU, "ppmu"
19- clocks : phandles for clock specified in "clock-names" property 19- clocks : phandles for clock specified in "clock-names" property
20- #clock-cells: should be 1.
21 20
22Example1 : PPMU nodes in exynos3250.dtsi are listed below. 21Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
23 22
24 ppmu_dmc0: ppmu_dmc0@106a0000 { 23 ppmu_dmc0: ppmu_dmc0@106a0000 {
25 compatible = "samsung,exynos-ppmu"; 24 compatible = "samsung,exynos-ppmu";
@@ -108,3 +107,41 @@ Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
108 }; 107 };
109 }; 108 };
110 }; 109 };
110
111Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
112
113 ppmu_d0_cpu: ppmu_d0_cpu@10480000 {
114 compatible = "samsung,exynos-ppmu-v2";
115 reg = <0x10480000 0x2000>;
116 status = "disabled";
117 };
118
119 ppmu_d0_general: ppmu_d0_general@10490000 {
120 compatible = "samsung,exynos-ppmu-v2";
121 reg = <0x10490000 0x2000>;
122 status = "disabled";
123 };
124
125 ppmu_d0_rt: ppmu_d0_rt@104a0000 {
126 compatible = "samsung,exynos-ppmu-v2";
127 reg = <0x104a0000 0x2000>;
128 status = "disabled";
129 };
130
131 ppmu_d1_cpu: ppmu_d1_cpu@104b0000 {
132 compatible = "samsung,exynos-ppmu-v2";
133 reg = <0x104b0000 0x2000>;
134 status = "disabled";
135 };
136
137 ppmu_d1_general: ppmu_d1_general@104c0000 {
138 compatible = "samsung,exynos-ppmu-v2";
139 reg = <0x104c0000 0x2000>;
140 status = "disabled";
141 };
142
143 ppmu_d1_rt: ppmu_d1_rt@104d0000 {
144 compatible = "samsung,exynos-ppmu-v2";
145 reg = <0x104d0000 0x2000>;
146 status = "disabled";
147 };
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 0d5e7c978121..0cb44dc21f97 100644
--- a/Documentation/devicetree/bindings/power/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -88,7 +88,7 @@ This defines voltage-current-frequency combinations along with other related
88properties. 88properties.
89 89
90Required properties: 90Required properties:
91- opp-hz: Frequency in Hz 91- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
92 92
93Optional properties: 93Optional properties:
94- opp-microvolt: voltage in micro Volts. 94- opp-microvolt: voltage in micro Volts.
@@ -158,20 +158,20 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
158 opp-shared; 158 opp-shared;
159 159
160 opp00 { 160 opp00 {
161 opp-hz = <1000000000>; 161 opp-hz = /bits/ 64 <1000000000>;
162 opp-microvolt = <970000 975000 985000>; 162 opp-microvolt = <970000 975000 985000>;
163 opp-microamp = <70000>; 163 opp-microamp = <70000>;
164 clock-latency-ns = <300000>; 164 clock-latency-ns = <300000>;
165 opp-suspend; 165 opp-suspend;
166 }; 166 };
167 opp01 { 167 opp01 {
168 opp-hz = <1100000000>; 168 opp-hz = /bits/ 64 <1100000000>;
169 opp-microvolt = <980000 1000000 1010000>; 169 opp-microvolt = <980000 1000000 1010000>;
170 opp-microamp = <80000>; 170 opp-microamp = <80000>;
171 clock-latency-ns = <310000>; 171 clock-latency-ns = <310000>;
172 }; 172 };
173 opp02 { 173 opp02 {
174 opp-hz = <1200000000>; 174 opp-hz = /bits/ 64 <1200000000>;
175 opp-microvolt = <1025000>; 175 opp-microvolt = <1025000>;
176 clock-latency-ns = <290000>; 176 clock-latency-ns = <290000>;
177 turbo-mode; 177 turbo-mode;
@@ -237,20 +237,20 @@ independently.
237 */ 237 */
238 238
239 opp00 { 239 opp00 {
240 opp-hz = <1000000000>; 240 opp-hz = /bits/ 64 <1000000000>;
241 opp-microvolt = <970000 975000 985000>; 241 opp-microvolt = <970000 975000 985000>;
242 opp-microamp = <70000>; 242 opp-microamp = <70000>;
243 clock-latency-ns = <300000>; 243 clock-latency-ns = <300000>;
244 opp-suspend; 244 opp-suspend;
245 }; 245 };
246 opp01 { 246 opp01 {
247 opp-hz = <1100000000>; 247 opp-hz = /bits/ 64 <1100000000>;
248 opp-microvolt = <980000 1000000 1010000>; 248 opp-microvolt = <980000 1000000 1010000>;
249 opp-microamp = <80000>; 249 opp-microamp = <80000>;
250 clock-latency-ns = <310000>; 250 clock-latency-ns = <310000>;
251 }; 251 };
252 opp02 { 252 opp02 {
253 opp-hz = <1200000000>; 253 opp-hz = /bits/ 64 <1200000000>;
254 opp-microvolt = <1025000>; 254 opp-microvolt = <1025000>;
255 opp-microamp = <90000; 255 opp-microamp = <90000;
256 lock-latency-ns = <290000>; 256 lock-latency-ns = <290000>;
@@ -313,20 +313,20 @@ DVFS state together.
313 opp-shared; 313 opp-shared;
314 314
315 opp00 { 315 opp00 {
316 opp-hz = <1000000000>; 316 opp-hz = /bits/ 64 <1000000000>;
317 opp-microvolt = <970000 975000 985000>; 317 opp-microvolt = <970000 975000 985000>;
318 opp-microamp = <70000>; 318 opp-microamp = <70000>;
319 clock-latency-ns = <300000>; 319 clock-latency-ns = <300000>;
320 opp-suspend; 320 opp-suspend;
321 }; 321 };
322 opp01 { 322 opp01 {
323 opp-hz = <1100000000>; 323 opp-hz = /bits/ 64 <1100000000>;
324 opp-microvolt = <980000 1000000 1010000>; 324 opp-microvolt = <980000 1000000 1010000>;
325 opp-microamp = <80000>; 325 opp-microamp = <80000>;
326 clock-latency-ns = <310000>; 326 clock-latency-ns = <310000>;
327 }; 327 };
328 opp02 { 328 opp02 {
329 opp-hz = <1200000000>; 329 opp-hz = /bits/ 64 <1200000000>;
330 opp-microvolt = <1025000>; 330 opp-microvolt = <1025000>;
331 opp-microamp = <90000>; 331 opp-microamp = <90000>;
332 clock-latency-ns = <290000>; 332 clock-latency-ns = <290000>;
@@ -339,20 +339,20 @@ DVFS state together.
339 opp-shared; 339 opp-shared;
340 340
341 opp10 { 341 opp10 {
342 opp-hz = <1300000000>; 342 opp-hz = /bits/ 64 <1300000000>;
343 opp-microvolt = <1045000 1050000 1055000>; 343 opp-microvolt = <1045000 1050000 1055000>;
344 opp-microamp = <95000>; 344 opp-microamp = <95000>;
345 clock-latency-ns = <400000>; 345 clock-latency-ns = <400000>;
346 opp-suspend; 346 opp-suspend;
347 }; 347 };
348 opp11 { 348 opp11 {
349 opp-hz = <1400000000>; 349 opp-hz = /bits/ 64 <1400000000>;
350 opp-microvolt = <1075000>; 350 opp-microvolt = <1075000>;
351 opp-microamp = <100000>; 351 opp-microamp = <100000>;
352 clock-latency-ns = <400000>; 352 clock-latency-ns = <400000>;
353 }; 353 };
354 opp12 { 354 opp12 {
355 opp-hz = <1500000000>; 355 opp-hz = /bits/ 64 <1500000000>;
356 opp-microvolt = <1010000 1100000 1110000>; 356 opp-microvolt = <1010000 1100000 1110000>;
357 opp-microamp = <95000>; 357 opp-microamp = <95000>;
358 clock-latency-ns = <400000>; 358 clock-latency-ns = <400000>;
@@ -379,7 +379,7 @@ Example 4: Handling multiple regulators
379 opp-shared; 379 opp-shared;
380 380
381 opp00 { 381 opp00 {
382 opp-hz = <1000000000>; 382 opp-hz = /bits/ 64 <1000000000>;
383 opp-microvolt = <970000>, /* Supply 0 */ 383 opp-microvolt = <970000>, /* Supply 0 */
384 <960000>, /* Supply 1 */ 384 <960000>, /* Supply 1 */
385 <960000>; /* Supply 2 */ 385 <960000>; /* Supply 2 */
@@ -392,7 +392,7 @@ Example 4: Handling multiple regulators
392 /* OR */ 392 /* OR */
393 393
394 opp00 { 394 opp00 {
395 opp-hz = <1000000000>; 395 opp-hz = /bits/ 64 <1000000000>;
396 opp-microvolt = <970000 975000 985000>, /* Supply 0 */ 396 opp-microvolt = <970000 975000 985000>, /* Supply 0 */
397 <960000 965000 975000>, /* Supply 1 */ 397 <960000 965000 975000>, /* Supply 1 */
398 <960000 965000 975000>; /* Supply 2 */ 398 <960000 965000 975000>; /* Supply 2 */
@@ -405,7 +405,7 @@ Example 4: Handling multiple regulators
405 /* OR */ 405 /* OR */
406 406
407 opp00 { 407 opp00 {
408 opp-hz = <1000000000>; 408 opp-hz = /bits/ 64 <1000000000>;
409 opp-microvolt = <970000 975000 985000>, /* Supply 0 */ 409 opp-microvolt = <970000 975000 985000>, /* Supply 0 */
410 <960000 965000 975000>, /* Supply 1 */ 410 <960000 965000 975000>, /* Supply 1 */
411 <960000 965000 975000>; /* Supply 2 */ 411 <960000 965000 975000>; /* Supply 2 */
@@ -437,12 +437,12 @@ Example 5: Multiple OPP tables
437 opp-shared; 437 opp-shared;
438 438
439 opp00 { 439 opp00 {
440 opp-hz = <600000000>; 440 opp-hz = /bits/ 64 <600000000>;
441 ... 441 ...
442 }; 442 };
443 443
444 opp01 { 444 opp01 {
445 opp-hz = <800000000>; 445 opp-hz = /bits/ 64 <800000000>;
446 ... 446 ...
447 }; 447 };
448 }; 448 };
@@ -453,12 +453,12 @@ Example 5: Multiple OPP tables
453 opp-shared; 453 opp-shared;
454 454
455 opp10 { 455 opp10 {
456 opp-hz = <1000000000>; 456 opp-hz = /bits/ 64 <1000000000>;
457 ... 457 ...
458 }; 458 };
459 459
460 opp11 { 460 opp11 {
461 opp-hz = <1100000000>; 461 opp-hz = /bits/ 64 <1100000000>;
462 ... 462 ...
463 }; 463 };
464 }; 464 };
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 0f8ed3710c66..025b5e7df61c 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -48,7 +48,7 @@ Example 2:
48 #power-domain-cells = <1>; 48 #power-domain-cells = <1>;
49 }; 49 };
50 50
51 child: power-controller@12340000 { 51 child: power-controller@12341000 {
52 compatible = "foo,power-controller"; 52 compatible = "foo,power-controller";
53 reg = <0x12341000 0x1000>; 53 reg = <0x12341000 0x1000>;
54 power-domains = <&parent 0>; 54 power-domains = <&parent 0>;
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index 8b70db103ca7..b8627e763dba 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -33,6 +33,8 @@ Required properties:
33- compatible: should be one of: 33- compatible: should be one of:
34 - "rockchip,rk3188-io-voltage-domain" for rk3188 34 - "rockchip,rk3188-io-voltage-domain" for rk3188
35 - "rockchip,rk3288-io-voltage-domain" for rk3288 35 - "rockchip,rk3288-io-voltage-domain" for rk3288
36 - "rockchip,rk3368-io-voltage-domain" for rk3368
37 - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
36- rockchip,grf: phandle to the syscon managing the "general register files" 38- rockchip,grf: phandle to the syscon managing the "general register files"
37 39
38 40
@@ -64,6 +66,18 @@ Possible supplies for rk3288:
64- sdcard-supply: The supply connected to SDMMC0_VDD. 66- sdcard-supply: The supply connected to SDMMC0_VDD.
65- wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0. 67- wifi-supply: The supply connected to APIO3_VDD. Also known as SDIO0.
66 68
69Possible supplies for rk3368:
70- audio-supply: The supply connected to APIO3_VDD.
71- dvp-supply: The supply connected to DVPIO_VDD.
72- flash0-supply: The supply connected to FLASH0_VDD. Typically for eMMC
73- gpio30-supply: The supply connected to APIO1_VDD.
74- gpio1830 The supply connected to APIO4_VDD.
75- sdcard-supply: The supply connected to SDMMC0_VDD.
76- wifi-supply: The supply connected to APIO2_VDD. Also known as SDIO0.
77
78Possible supplies for rk3368 pmu-domains:
79- pmu-supply: The supply connected to PMUIO_VDD.
80- vop-supply: The supply connected to LCDC_VDD.
67 81
68Example: 82Example:
69 83
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index d172bce0fd49..8ba6625fdd63 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -341,6 +341,13 @@ the phases are:
341 and is entirely responsible for bringing the device back to the 341 and is entirely responsible for bringing the device back to the
342 functional state as appropriate. 342 functional state as appropriate.
343 343
344 Note that this direct-complete procedure applies even if the device is
345 disabled for runtime PM; only the runtime-PM status matters. It follows
346 that if a device has system-sleep callbacks but does not support runtime
347 PM, then its prepare callback must never return a positive value. This
348 is because all devices are initially set to runtime-suspended with
349 runtime PM disabled.
350
344 2. The suspend methods should quiesce the device to stop it from performing 351 2. The suspend methods should quiesce the device to stop it from performing
345 I/O. They also may save the device registers and put it into the 352 I/O. They also may save the device registers and put it into the
346 appropriate low-power state, depending on the bus type the device is on, 353 appropriate low-power state, depending on the bus type the device is on,
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index e76dc0ad4d2b..0784bc3a2ab5 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -445,10 +445,6 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
445 bool pm_runtime_status_suspended(struct device *dev); 445 bool pm_runtime_status_suspended(struct device *dev);
446 - return true if the device's runtime PM status is 'suspended' 446 - return true if the device's runtime PM status is 'suspended'
447 447
448 bool pm_runtime_suspended_if_enabled(struct device *dev);
449 - return true if the device's runtime PM status is 'suspended' and its
450 'power.disable_depth' field is equal to 1
451
452 void pm_runtime_allow(struct device *dev); 448 void pm_runtime_allow(struct device *dev);
453 - set the power.runtime_auto flag for the device and decrease its usage 449 - set the power.runtime_auto flag for the device and decrease its usage
454 counter (used by the /sys/devices/.../power/control interface to 450 counter (used by the /sys/devices/.../power/control interface to
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index e9e4c52f3685..64dc9f547fb6 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -361,6 +361,7 @@ enum opal_msg_type {
361 OPAL_MSG_HMI_EVT, 361 OPAL_MSG_HMI_EVT,
362 OPAL_MSG_DPO, 362 OPAL_MSG_DPO,
363 OPAL_MSG_PRD, 363 OPAL_MSG_PRD,
364 OPAL_MSG_OCC,
364 OPAL_MSG_TYPE_MAX, 365 OPAL_MSG_TYPE_MAX,
365}; 366};
366 367
@@ -700,6 +701,17 @@ struct opal_prd_msg_header {
700 701
701struct opal_prd_msg; 702struct opal_prd_msg;
702 703
704#define OCC_RESET 0
705#define OCC_LOAD 1
706#define OCC_THROTTLE 2
707#define OCC_MAX_THROTTLE_STATUS 5
708
709struct opal_occ_msg {
710 __be64 type;
711 __be64 chip;
712 __be64 throttle_status;
713};
714
703/* 715/*
704 * SG entries 716 * SG entries
705 * 717 *
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fcd17c1fc0c6..c1c0a1c14344 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -184,6 +184,12 @@
184#define MSR_PP1_ENERGY_STATUS 0x00000641 184#define MSR_PP1_ENERGY_STATUS 0x00000641
185#define MSR_PP1_POLICY 0x00000642 185#define MSR_PP1_POLICY 0x00000642
186 186
187#define MSR_CONFIG_TDP_NOMINAL 0x00000648
188#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
189#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
190#define MSR_CONFIG_TDP_CONTROL 0x0000064B
191#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
192
187#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 193#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
188#define MSR_PKG_ANY_CORE_C0_RES 0x00000659 194#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
189#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A 195#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 75e8bad53798..ded848c20e05 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -445,6 +445,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
445 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; 445 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
446 446
447 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); 447 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
448 acpi_penalize_sci_irq(bus_irq, trigger, polarity);
448 449
449 /* 450 /*
450 * stash over-ride to indicate we've been here 451 * stash over-ride to indicate we've been here
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 114cf48085ab..54e9729f9634 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -189,17 +189,24 @@ config ACPI_DOCK
189 This driver supports ACPI-controlled docking stations and removable 189 This driver supports ACPI-controlled docking stations and removable
190 drive bays such as the IBM Ultrabay and the Dell Module Bay. 190 drive bays such as the IBM Ultrabay and the Dell Module Bay.
191 191
192config ACPI_PROCESSOR 192config ACPI_CPU_FREQ_PSS
193 tristate "Processor" 193 bool
194 select THERMAL 194 select THERMAL
195
196config ACPI_PROCESSOR_IDLE
197 bool
195 select CPU_IDLE 198 select CPU_IDLE
199
200config ACPI_PROCESSOR
201 tristate "Processor"
196 depends on X86 || IA64 202 depends on X86 || IA64
203 select ACPI_PROCESSOR_IDLE
204 select ACPI_CPU_FREQ_PSS
197 default y 205 default y
198 help 206 help
199 This driver installs ACPI as the idle handler for Linux and uses 207 This driver adds support for the ACPI Processor package. It is required
200 ACPI C2 and C3 processor states to save power on systems that 208 by several flavors of cpufreq performance-state, thermal, throttling and
201 support it. It is required by several flavors of cpufreq 209 idle drivers.
202 performance-state drivers.
203 210
204 To compile this driver as a module, choose M here: 211 To compile this driver as a module, choose M here:
205 the module will be called processor. 212 the module will be called processor.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 8321430d7f24..b5e7cd8a9c71 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += nvs.o
24# Power management related files 24# Power management related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
26acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o 26acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
27acpi-y += device_pm.o 27acpi-y += device_sysfs.o device_pm.o
28acpi-$(CONFIG_ACPI_SLEEP) += proc.o 28acpi-$(CONFIG_ACPI_SLEEP) += proc.o
29 29
30 30
@@ -80,8 +80,10 @@ obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
80obj-$(CONFIG_ACPI_BGRT) += bgrt.o 80obj-$(CONFIG_ACPI_BGRT) += bgrt.o
81 81
82# processor has its own "processor." module_param namespace 82# processor has its own "processor." module_param namespace
83processor-y := processor_driver.o processor_throttling.o 83processor-y := processor_driver.o
84processor-y += processor_idle.o processor_thermal.o 84processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o
85processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \
86 processor_thermal.o
85processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 87processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
86 88
87obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 89obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 9b5354a2cd08..f71b756b05c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index ac0f52f6df2b..f77956c3fd45 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -17,10 +17,6 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details. 18 * General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */ 21 */
26 22
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 502454c24e69..f51bd0d0bc17 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -59,6 +59,7 @@ ACPI_MODULE_NAME("acpi_lpss");
59#define LPSS_CLK_DIVIDER BIT(2) 59#define LPSS_CLK_DIVIDER BIT(2)
60#define LPSS_LTR BIT(3) 60#define LPSS_LTR BIT(3)
61#define LPSS_SAVE_CTX BIT(4) 61#define LPSS_SAVE_CTX BIT(4)
62#define LPSS_NO_D3_DELAY BIT(5)
62 63
63struct lpss_private_data; 64struct lpss_private_data;
64 65
@@ -155,6 +156,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
155 .flags = LPSS_SAVE_CTX, 156 .flags = LPSS_SAVE_CTX,
156}; 157};
157 158
159static const struct lpss_device_desc bsw_pwm_dev_desc = {
160 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
161};
162
158static const struct lpss_device_desc byt_uart_dev_desc = { 163static const struct lpss_device_desc byt_uart_dev_desc = {
159 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 164 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
160 .clk_con_id = "baudclk", 165 .clk_con_id = "baudclk",
@@ -162,6 +167,14 @@ static const struct lpss_device_desc byt_uart_dev_desc = {
162 .setup = lpss_uart_setup, 167 .setup = lpss_uart_setup,
163}; 168};
164 169
170static const struct lpss_device_desc bsw_uart_dev_desc = {
171 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
172 | LPSS_NO_D3_DELAY,
173 .clk_con_id = "baudclk",
174 .prv_offset = 0x800,
175 .setup = lpss_uart_setup,
176};
177
165static const struct lpss_device_desc byt_spi_dev_desc = { 178static const struct lpss_device_desc byt_spi_dev_desc = {
166 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 179 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
167 .prv_offset = 0x400, 180 .prv_offset = 0x400,
@@ -177,8 +190,15 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
177 .setup = byt_i2c_setup, 190 .setup = byt_i2c_setup,
178}; 191};
179 192
193static const struct lpss_device_desc bsw_i2c_dev_desc = {
194 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
195 .prv_offset = 0x800,
196 .setup = byt_i2c_setup,
197};
198
180static struct lpss_device_desc bsw_spi_dev_desc = { 199static struct lpss_device_desc bsw_spi_dev_desc = {
181 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 200 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
201 | LPSS_NO_D3_DELAY,
182 .prv_offset = 0x400, 202 .prv_offset = 0x400,
183 .setup = lpss_deassert_reset, 203 .setup = lpss_deassert_reset,
184}; 204};
@@ -213,11 +233,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
213 { "INT33FC", }, 233 { "INT33FC", },
214 234
215 /* Braswell LPSS devices */ 235 /* Braswell LPSS devices */
216 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, 236 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
217 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 237 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
218 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, 238 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
219 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 239 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
220 240
241 /* Broadwell LPSS devices */
221 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 242 { "INT3430", LPSS_ADDR(lpt_dev_desc) },
222 { "INT3431", LPSS_ADDR(lpt_dev_desc) }, 243 { "INT3431", LPSS_ADDR(lpt_dev_desc) },
223 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, 244 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
@@ -557,9 +578,14 @@ static void acpi_lpss_restore_ctx(struct device *dev,
557 * The following delay is needed or the subsequent write operations may 578 * The following delay is needed or the subsequent write operations may
558 * fail. The LPSS devices are actually PCI devices and the PCI spec 579 * fail. The LPSS devices are actually PCI devices and the PCI spec
559 * expects 10ms delay before the device can be accessed after D3 to D0 580 * expects 10ms delay before the device can be accessed after D3 to D0
560 * transition. 581 * transition. However some platforms like BSW does not need this delay.
561 */ 582 */
562 msleep(10); 583 unsigned int delay = 10; /* default 10ms delay */
584
585 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
586 delay = 0;
587
588 msleep(delay);
563 589
564 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 590 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
565 unsigned long offset = i * sizeof(u32); 591 unsigned long offset = i * sizeof(u32);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index ee28f4d15625..6b0d3ef7309c 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -16,11 +16,6 @@
16 * NON INFRINGEMENT. See the GNU General Public License for more 16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 *
24 * ACPI based HotPlug driver that supports Memory Hotplug 19 * ACPI based HotPlug driver that supports Memory Hotplug
25 * This driver fields notifications from firmware for memory add 20 * This driver fields notifications from firmware for memory add
26 * and remove operations and alerts the VM of the affected memory 21 * and remove operations and alerts the VM of the affected memory
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 00b39802d7ec..ae307ff36acb 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -12,10 +12,6 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 92a5f738e370..985b8a83184e 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -485,7 +485,7 @@ static const struct acpi_device_id processor_device_ids[] = {
485 { } 485 { }
486}; 486};
487 487
488static struct acpi_scan_handler __refdata processor_handler = { 488static struct acpi_scan_handler processor_handler = {
489 .ids = processor_device_ids, 489 .ids = processor_device_ids,
490 .attach = acpi_processor_add, 490 .attach = acpi_processor_add,
491#ifdef CONFIG_ACPI_HOTPLUG_CPU 491#ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8c2fe2f2f9fd..5778e8e4313a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -17,10 +17,6 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details. 18 * General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */ 21 */
26 22
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index c1a963581dc0..fedcc16b56cc 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -11,6 +11,7 @@ obj-y += acpi.o
11acpi-y := \ 11acpi-y := \
12 dsargs.o \ 12 dsargs.o \
13 dscontrol.o \ 13 dscontrol.o \
14 dsdebug.o \
14 dsfield.o \ 15 dsfield.o \
15 dsinit.o \ 16 dsinit.o \
16 dsmethod.o \ 17 dsmethod.o \
@@ -164,6 +165,7 @@ acpi-y += \
164 utmath.o \ 165 utmath.o \
165 utmisc.o \ 166 utmisc.o \
166 utmutex.o \ 167 utmutex.o \
168 utnonansi.o \
167 utobject.o \ 169 utobject.o \
168 utosi.o \ 170 utosi.o \
169 utownerid.o \ 171 utownerid.o \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 43685dd36c77..eb2e926d8218 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -67,9 +67,6 @@ struct acpi_db_execute_walk {
67}; 67};
68 68
69#define PARAM_LIST(pl) pl 69#define PARAM_LIST(pl) pl
70#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose)
71#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\
72 acpi_os_printf PARAM_LIST(fp);}
73 70
74#define EX_NO_SINGLE_STEP 1 71#define EX_NO_SINGLE_STEP 1
75#define EX_SINGLE_STEP 2 72#define EX_SINGLE_STEP 2
@@ -77,10 +74,6 @@ struct acpi_db_execute_walk {
77/* 74/*
78 * dbxface - external debugger interfaces 75 * dbxface - external debugger interfaces
79 */ 76 */
80acpi_status acpi_db_initialize(void);
81
82void acpi_db_terminate(void);
83
84acpi_status 77acpi_status
85acpi_db_single_step(struct acpi_walk_state *walk_state, 78acpi_db_single_step(struct acpi_walk_state *walk_state,
86 union acpi_parse_object *op, u32 op_type); 79 union acpi_parse_object *op, u32 op_type);
@@ -102,6 +95,8 @@ void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
102 95
103acpi_status acpi_db_sleep(char *object_arg); 96acpi_status acpi_db_sleep(char *object_arg);
104 97
98void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg);
99
105void acpi_db_display_locks(void); 100void acpi_db_display_locks(void);
106 101
107void acpi_db_display_resources(char *object_arg); 102void acpi_db_display_resources(char *object_arg);
@@ -262,6 +257,23 @@ char *acpi_db_get_next_token(char *string,
262 char **next, acpi_object_type * return_type); 257 char **next, acpi_object_type * return_type);
263 258
264/* 259/*
260 * dbobject
261 */
262void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc);
263
264void
265acpi_db_display_internal_object(union acpi_operand_object *obj_desc,
266 struct acpi_walk_state *walk_state);
267
268void acpi_db_decode_arguments(struct acpi_walk_state *walk_state);
269
270void acpi_db_decode_locals(struct acpi_walk_state *walk_state);
271
272void
273acpi_db_dump_method_info(acpi_status status,
274 struct acpi_walk_state *walk_state);
275
276/*
265 * dbstats - Generation and display of ACPI table statistics 277 * dbstats - Generation and display of ACPI table statistics
266 */ 278 */
267void acpi_db_generate_statistics(union acpi_parse_object *root, u8 is_method); 279void acpi_db_generate_statistics(union acpi_parse_object *root, u8 is_method);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 408f04bcaab4..7094dc89eb81 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -354,4 +354,12 @@ acpi_status
354acpi_ds_result_push(union acpi_operand_object *object, 354acpi_ds_result_push(union acpi_operand_object *object,
355 struct acpi_walk_state *walk_state); 355 struct acpi_walk_state *walk_state);
356 356
357/*
358 * dsdebug - parser debugging routines
359 */
360void
361acpi_ds_dump_method_stack(acpi_status status,
362 struct acpi_walk_state *walk_state,
363 union acpi_parse_object *op);
364
357#endif /* _ACDISPAT_H_ */ 365#endif /* _ACDISPAT_H_ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 53f96a370762..09f37b516808 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -58,11 +58,12 @@ ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
58 58
59ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT); 59ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
60ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); 60ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
61ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
62ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
63ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
61 64
62#if (!ACPI_REDUCED_HARDWARE) 65#if (!ACPI_REDUCED_HARDWARE)
63ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); 66ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
64ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
65ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
66 67
67#endif /* !ACPI_REDUCED_HARDWARE */ 68#endif /* !ACPI_REDUCED_HARDWARE */
68 69
@@ -235,6 +236,10 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
235 236
236ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list); 237ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
237 238
239/* Maximum number of While() loop iterations before forced abort */
240
241ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
242
238/* Control method single step flag */ 243/* Control method single step flag */
239 244
240ACPI_GLOBAL(u8, acpi_gbl_cm_single_step); 245ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -290,8 +295,6 @@ ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
290 295
291ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level); 296ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
292ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer); 297ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
293ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
294ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
295 298
296/***************************************************************************** 299/*****************************************************************************
297 * 300 *
@@ -309,9 +312,10 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
309ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE); 312ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
310ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE); 313ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
311ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE); 314ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
315ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
312 316
313ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm); 317ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
314ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose); 318ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
315ACPI_GLOBAL(u8, acpi_gbl_num_external_methods); 319ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
316ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods); 320ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
317ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list); 321ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
@@ -346,8 +350,8 @@ ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
346/* 350/*
347 * Statistic globals 351 * Statistic globals
348 */ 352 */
349ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]); 353ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
350ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]); 354ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
351ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc); 355ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
352ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc); 356ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
353ACPI_GLOBAL(u32, acpi_gbl_num_nodes); 357ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 7ac98000b46b..e820ed8f173f 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -131,6 +131,28 @@ void
131acpi_ex_do_debug_object(union acpi_operand_object *source_desc, 131acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
132 u32 level, u32 index); 132 u32 level, u32 index);
133 133
134void
135acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
136 union acpi_operand_object *obj_desc,
137 struct acpi_walk_state *walk_state);
138
139void
140acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
141 union acpi_operand_object *obj_desc,
142 struct acpi_walk_state *walk_state);
143
144void
145acpi_ex_start_trace_opcode(union acpi_parse_object *op,
146 struct acpi_walk_state *walk_state);
147
148void
149acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
150 struct acpi_walk_state *walk_state);
151
152void
153acpi_ex_trace_point(acpi_trace_event_type type,
154 u8 begin, u8 *aml, char *pathname);
155
134/* 156/*
135 * exfield - ACPI AML (p-code) execution - field manipulation 157 * exfield - ACPI AML (p-code) execution - field manipulation
136 */ 158 */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index bc600969c6a1..6f708267ad8c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -174,8 +174,12 @@ struct acpi_namespace_node {
174 */ 174 */
175#ifdef ACPI_LARGE_NAMESPACE_NODE 175#ifdef ACPI_LARGE_NAMESPACE_NODE
176 union acpi_parse_object *op; 176 union acpi_parse_object *op;
177 void *method_locals;
178 void *method_args;
177 u32 value; 179 u32 value;
178 u32 length; 180 u32 length;
181 u8 arg_count;
182
179#endif 183#endif
180}; 184};
181 185
@@ -209,11 +213,9 @@ struct acpi_table_list {
209#define ACPI_ROOT_ORIGIN_ALLOCATED (1) 213#define ACPI_ROOT_ORIGIN_ALLOCATED (1)
210#define ACPI_ROOT_ALLOW_RESIZE (2) 214#define ACPI_ROOT_ALLOW_RESIZE (2)
211 215
212/* Predefined (fixed) table indexes */ 216/* Predefined table indexes */
213 217
214#define ACPI_TABLE_INDEX_DSDT (0) 218#define ACPI_INVALID_TABLE_INDEX (0xFFFFFFFF)
215#define ACPI_TABLE_INDEX_FACS (1)
216#define ACPI_TABLE_INDEX_X_FACS (2)
217 219
218struct acpi_find_context { 220struct acpi_find_context {
219 char *search_for; 221 char *search_for;
@@ -404,6 +406,13 @@ struct acpi_simple_repair_info {
404 406
405#define ACPI_NUM_RTYPES 5 /* Number of actual object types */ 407#define ACPI_NUM_RTYPES 5 /* Number of actual object types */
406 408
409/* Info for running the _REG methods */
410
411struct acpi_reg_walk_info {
412 acpi_adr_space_type space_id;
413 u32 reg_run_count;
414};
415
407/***************************************************************************** 416/*****************************************************************************
408 * 417 *
409 * Event typedefs and structs 418 * Event typedefs and structs
@@ -715,7 +724,7 @@ union acpi_parse_value {
715 union acpi_parse_object *arg; /* arguments and contained ops */ 724 union acpi_parse_object *arg; /* arguments and contained ops */
716}; 725};
717 726
718#ifdef ACPI_DISASSEMBLER 727#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
719#define ACPI_DISASM_ONLY_MEMBERS(a) a; 728#define ACPI_DISASM_ONLY_MEMBERS(a) a;
720#else 729#else
721#define ACPI_DISASM_ONLY_MEMBERS(a) 730#define ACPI_DISASM_ONLY_MEMBERS(a)
@@ -726,7 +735,7 @@ union acpi_parse_value {
726 u8 descriptor_type; /* To differentiate various internal objs */\ 735 u8 descriptor_type; /* To differentiate various internal objs */\
727 u8 flags; /* Type of Op */\ 736 u8 flags; /* Type of Op */\
728 u16 aml_opcode; /* AML opcode */\ 737 u16 aml_opcode; /* AML opcode */\
729 u32 aml_offset; /* Offset of declaration in AML */\ 738 u8 *aml; /* Address of declaration in AML */\
730 union acpi_parse_object *next; /* Next op */\ 739 union acpi_parse_object *next; /* Next op */\
731 struct acpi_namespace_node *node; /* For use by interpreter */\ 740 struct acpi_namespace_node *node; /* For use by interpreter */\
732 union acpi_parse_value value; /* Value or args associated with the opcode */\ 741 union acpi_parse_value value; /* Value or args associated with the opcode */\
@@ -1103,6 +1112,9 @@ struct acpi_db_method_info {
1103 * Index of current thread inside all them created. 1112 * Index of current thread inside all them created.
1104 */ 1113 */
1105 char init_args; 1114 char init_args;
1115#ifdef ACPI_DEBUGGER
1116 acpi_object_type arg_types[4];
1117#endif
1106 char *arguments[4]; 1118 char *arguments[4];
1107 char num_threads_str[11]; 1119 char num_threads_str[11];
1108 char id_of_thread_str[11]; 1120 char id_of_thread_str[11];
@@ -1119,6 +1131,10 @@ struct acpi_integrity_info {
1119#define ACPI_DB_CONSOLE_OUTPUT 0x02 1131#define ACPI_DB_CONSOLE_OUTPUT 0x02
1120#define ACPI_DB_DUPLICATE_OUTPUT 0x03 1132#define ACPI_DB_DUPLICATE_OUTPUT 0x03
1121 1133
1134struct acpi_object_info {
1135 u32 types[ACPI_TOTAL_TYPES];
1136};
1137
1122/***************************************************************************** 1138/*****************************************************************************
1123 * 1139 *
1124 * Debug 1140 * Debug
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c240bdf824f2..e85366ceb15a 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -220,6 +220,15 @@
220#define ACPI_MUL_32(a) _ACPI_MUL(a, 5) 220#define ACPI_MUL_32(a) _ACPI_MUL(a, 5)
221#define ACPI_MOD_32(a) _ACPI_MOD(a, 32) 221#define ACPI_MOD_32(a) _ACPI_MOD(a, 32)
222 222
223/* Test for ASCII character */
224
225#define ACPI_IS_ASCII(c) ((c) < 0x80)
226
227/* Signed integers */
228
229#define ACPI_SIGN_POSITIVE 0
230#define ACPI_SIGN_NEGATIVE 1
231
223/* 232/*
224 * Rounding macros (Power of two boundaries only) 233 * Rounding macros (Power of two boundaries only)
225 */ 234 */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 0dd088290d80..ea0d9076d408 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -272,17 +272,20 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
272 */ 272 */
273u32 acpi_ns_opens_scope(acpi_object_type type); 273u32 acpi_ns_opens_scope(acpi_object_type type);
274 274
275acpi_status
276acpi_ns_build_external_path(struct acpi_namespace_node *node,
277 acpi_size size, char *name_buffer);
278
279char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node); 275char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
280 276
277u32
278acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
279 char *full_path, u32 path_size, u8 no_trailing);
280
281char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
282 u8 no_trailing);
283
281char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state); 284char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
282 285
283acpi_status 286acpi_status
284acpi_ns_handle_to_pathname(acpi_handle target_handle, 287acpi_ns_handle_to_pathname(acpi_handle target_handle,
285 struct acpi_buffer *buffer); 288 struct acpi_buffer *buffer, u8 no_trailing);
286 289
287u8 290u8
288acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for); 291acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index c81d98d09cac..0bd02c4a5f75 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -176,6 +176,7 @@ struct acpi_object_method {
176 u8 param_count; 176 u8 param_count;
177 u8 sync_level; 177 u8 sync_level;
178 union acpi_operand_object *mutex; 178 union acpi_operand_object *mutex;
179 union acpi_operand_object *node;
179 u8 *aml_start; 180 u8 *aml_start;
180 union { 181 union {
181 acpi_internal_method implementation; 182 acpi_internal_method implementation;
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 0cdd2fce493a..6021ccfb0b1c 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -225,11 +225,11 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *root);
225/* 225/*
226 * psutils - parser utilities 226 * psutils - parser utilities
227 */ 227 */
228union acpi_parse_object *acpi_ps_create_scope_op(void); 228union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml);
229 229
230void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode); 230void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode);
231 231
232union acpi_parse_object *acpi_ps_alloc_op(u16 opcode); 232union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml);
233 233
234void acpi_ps_free_op(union acpi_parse_object *op); 234void acpi_ps_free_op(union acpi_parse_object *op);
235 235
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 44997ca02ae2..f9992dced1f9 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -85,7 +85,7 @@ struct acpi_walk_state {
85 u8 namespace_override; /* Override existing objects */ 85 u8 namespace_override; /* Override existing objects */
86 u8 result_size; /* Total elements for the result stack */ 86 u8 result_size; /* Total elements for the result stack */
87 u8 result_count; /* Current number of occupied elements of result stack */ 87 u8 result_count; /* Current number of occupied elements of result stack */
88 u32 aml_offset; 88 u8 *aml;
89 u32 arg_types; 89 u32 arg_types;
90 u32 method_breakpoint; /* For single stepping */ 90 u32 method_breakpoint; /* For single stepping */
91 u32 user_breakpoint; /* User AML breakpoint */ 91 u32 user_breakpoint; /* User AML breakpoint */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7e0b6f1bec9c..f7731f260c31 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -154,14 +154,20 @@ void acpi_tb_check_dsdt_header(void);
154struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index); 154struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
155 155
156void 156void
157acpi_tb_install_table_with_override(u32 table_index, 157acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
158 struct acpi_table_desc *new_table_desc, 158 u8 override, u32 *table_index);
159 u8 override);
160 159
161acpi_status 160acpi_status
162acpi_tb_install_fixed_table(acpi_physical_address address, 161acpi_tb_install_fixed_table(acpi_physical_address address,
163 char *signature, u32 table_index); 162 char *signature, u32 *table_index);
164 163
165acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address); 164acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
166 165
166u8 acpi_is_valid_signature(char *signature);
167
168/*
169 * tbxfload
170 */
171acpi_status acpi_tb_load_namespace(void);
172
167#endif /* __ACTABLES_H__ */ 173#endif /* __ACTABLES_H__ */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 6de0d3573037..fb2aa5066f3f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -167,6 +167,17 @@ struct acpi_pkg_info {
167#define DB_QWORD_DISPLAY 8 167#define DB_QWORD_DISPLAY 8
168 168
169/* 169/*
170 * utnonansi - Non-ANSI C library functions
171 */
172void acpi_ut_strupr(char *src_string);
173
174void acpi_ut_strlwr(char *src_string);
175
176int acpi_ut_stricmp(char *string1, char *string2);
177
178acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
179
180/*
170 * utglobal - Global data structures and procedures 181 * utglobal - Global data structures and procedures
171 */ 182 */
172acpi_status acpi_ut_init_globals(void); 183acpi_status acpi_ut_init_globals(void);
@@ -205,8 +216,6 @@ acpi_status acpi_ut_hardware_initialize(void);
205 216
206void acpi_ut_subsystem_shutdown(void); 217void acpi_ut_subsystem_shutdown(void);
207 218
208#define ACPI_IS_ASCII(c) ((c) < 0x80)
209
210/* 219/*
211 * utcopy - Object construction and conversion interfaces 220 * utcopy - Object construction and conversion interfaces
212 */ 221 */
@@ -508,7 +517,7 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
508 517
509u8 acpi_ut_is_pci_root_bridge(char *id); 518u8 acpi_ut_is_pci_root_bridge(char *id);
510 519
511#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP) 520#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
512u8 acpi_ut_is_aml_table(struct acpi_table_header *table); 521u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
513#endif 522#endif
514 523
@@ -567,16 +576,6 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
567/* 576/*
568 * utstring - String and character utilities 577 * utstring - String and character utilities
569 */ 578 */
570void acpi_ut_strupr(char *src_string);
571
572#ifdef ACPI_ASL_COMPILER
573void acpi_ut_strlwr(char *src_string);
574
575int acpi_ut_stricmp(char *string1, char *string2);
576#endif
577
578acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
579
580void acpi_ut_print_string(char *string, u16 max_length); 579void acpi_ut_print_string(char *string, u16 max_length);
581 580
582#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP 581#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 3e6989738e85..e2ab59e39162 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -86,7 +86,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
86 86
87 /* Allocate a new parser op to be the root of the parsed tree */ 87 /* Allocate a new parser op to be the root of the parsed tree */
88 88
89 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); 89 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
90 if (!op) { 90 if (!op) {
91 return_ACPI_STATUS(AE_NO_MEMORY); 91 return_ACPI_STATUS(AE_NO_MEMORY);
92 } 92 }
@@ -129,7 +129,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
129 129
130 /* Evaluate the deferred arguments */ 130 /* Evaluate the deferred arguments */
131 131
132 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); 132 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
133 if (!op) { 133 if (!op) {
134 return_ACPI_STATUS(AE_NO_MEMORY); 134 return_ACPI_STATUS(AE_NO_MEMORY);
135 } 135 }
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 39da9da62bbf..435fc16e2f83 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -212,7 +212,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
212 */ 212 */
213 control_state->control.loop_count++; 213 control_state->control.loop_count++;
214 if (control_state->control.loop_count > 214 if (control_state->control.loop_count >
215 ACPI_MAX_LOOP_ITERATIONS) { 215 acpi_gbl_max_loop_iterations) {
216 status = AE_AML_INFINITE_LOOP; 216 status = AE_AML_INFINITE_LOOP;
217 break; 217 break;
218 } 218 }
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
new file mode 100644
index 000000000000..309556efc553
--- /dev/null
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -0,0 +1,231 @@
1/******************************************************************************
2 *
3 * Module Name: dsdebug - Parser/Interpreter interface - debugging
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acdispat.h"
47#include "acnamesp.h"
48#ifdef ACPI_DISASSEMBLER
49#include "acdisasm.h"
50#endif
51#include "acinterp.h"
52
53#define _COMPONENT ACPI_DISPATCHER
54ACPI_MODULE_NAME("dsdebug")
55
56#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
57/* Local prototypes */
58static void
59acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
60 const char *message);
61
62/*******************************************************************************
63 *
64 * FUNCTION: acpi_ds_print_node_pathname
65 *
66 * PARAMETERS: node - Object
67 * message - Prefix message
68 *
69 * DESCRIPTION: Print an object's full namespace pathname
70 * Manages allocation/freeing of a pathname buffer
71 *
72 ******************************************************************************/
73
74static void
75acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
76 const char *message)
77{
78 struct acpi_buffer buffer;
79 acpi_status status;
80
81 ACPI_FUNCTION_TRACE(ds_print_node_pathname);
82
83 if (!node) {
84 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[NULL NAME]"));
85 return_VOID;
86 }
87
88 /* Convert handle to full pathname and print it (with supplied message) */
89
90 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
91
92 status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
93 if (ACPI_SUCCESS(status)) {
94 if (message) {
95 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "%s ",
96 message));
97 }
98
99 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[%s] (Node %p)",
100 (char *)buffer.pointer, node));
101 ACPI_FREE(buffer.pointer);
102 }
103
104 return_VOID;
105}
106
107/*******************************************************************************
108 *
109 * FUNCTION: acpi_ds_dump_method_stack
110 *
111 * PARAMETERS: status - Method execution status
112 * walk_state - Current state of the parse tree walk
113 * op - Executing parse op
114 *
115 * RETURN: None
116 *
117 * DESCRIPTION: Called when a method has been aborted because of an error.
118 * Dumps the method execution stack.
119 *
120 ******************************************************************************/
121
122void
123acpi_ds_dump_method_stack(acpi_status status,
124 struct acpi_walk_state *walk_state,
125 union acpi_parse_object *op)
126{
127 union acpi_parse_object *next;
128 struct acpi_thread_state *thread;
129 struct acpi_walk_state *next_walk_state;
130 struct acpi_namespace_node *previous_method = NULL;
131 union acpi_operand_object *method_desc;
132
133 ACPI_FUNCTION_TRACE(ds_dump_method_stack);
134
135 /* Ignore control codes, they are not errors */
136
137 if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
138 return_VOID;
139 }
140
141 /* We may be executing a deferred opcode */
142
143 if (walk_state->deferred_node) {
144 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
145 "Executing subtree for Buffer/Package/Region\n"));
146 return_VOID;
147 }
148
149 /*
150 * If there is no Thread, we are not actually executing a method.
151 * This can happen when the iASL compiler calls the interpreter
152 * to perform constant folding.
153 */
154 thread = walk_state->thread;
155 if (!thread) {
156 return_VOID;
157 }
158
159 /* Display exception and method name */
160
161 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
162 "\n**** Exception %s during execution of method ",
163 acpi_format_exception(status)));
164 acpi_ds_print_node_pathname(walk_state->method_node, NULL);
165
166 /* Display stack of executing methods */
167
168 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
169 "\n\nMethod Execution Stack:\n"));
170 next_walk_state = thread->walk_state_list;
171
172 /* Walk list of linked walk states */
173
174 while (next_walk_state) {
175 method_desc = next_walk_state->method_desc;
176 if (method_desc) {
177 acpi_ex_stop_trace_method((struct acpi_namespace_node *)
178 method_desc->method.node,
179 method_desc, walk_state);
180 }
181
182 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
183 " Method [%4.4s] executing: ",
184 acpi_ut_get_node_name(next_walk_state->
185 method_node)));
186
187 /* First method is the currently executing method */
188
189 if (next_walk_state == walk_state) {
190 if (op) {
191
192 /* Display currently executing ASL statement */
193
194 next = op->common.next;
195 op->common.next = NULL;
196
197#ifdef ACPI_DISASSEMBLER
198 acpi_dm_disassemble(next_walk_state, op,
199 ACPI_UINT32_MAX);
200#endif
201 op->common.next = next;
202 }
203 } else {
204 /*
205 * This method has called another method
206 * NOTE: the method call parse subtree is already deleted at this
207 * point, so we cannot disassemble the method invocation.
208 */
209 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
210 "Call to method "));
211 acpi_ds_print_node_pathname(previous_method, NULL);
212 }
213
214 previous_method = next_walk_state->method_node;
215 next_walk_state = next_walk_state->next;
216 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "\n"));
217 }
218
219 return_VOID;
220}
221
222#else
223void
224acpi_ds_dump_method_stack(acpi_status status,
225 struct acpi_walk_state *walk_state,
226 union acpi_parse_object *op)
227{
228 return;
229}
230
231#endif
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 95779e8ec3bb..920f1b199bc6 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -237,12 +237,22 @@ acpi_ds_initialize_objects(u32 table_index,
237 return_ACPI_STATUS(status); 237 return_ACPI_STATUS(status);
238 } 238 }
239 239
240 /* DSDT is always the first AML table */
241
242 if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
243 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
244 "\nInitializing Namespace objects:\n"));
245 }
246
247 /* Summary of objects initialized */
248
240 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 249 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
241 "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, " 250 "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, "
242 "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n", 251 "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
243 table->signature, owner_id, info.object_count, 252 table->signature, table->oem_table_id, owner_id,
244 info.device_count, info.op_region_count, 253 info.object_count, info.device_count,
245 info.method_count, info.serial_method_count, 254 info.op_region_count, info.method_count,
255 info.serial_method_count,
246 info.non_serial_method_count, 256 info.non_serial_method_count,
247 info.serialized_method_count)); 257 info.serialized_method_count));
248 258
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 85bb951430d9..bc32f3194afe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -46,11 +46,9 @@
46#include "acdispat.h" 46#include "acdispat.h"
47#include "acinterp.h" 47#include "acinterp.h"
48#include "acnamesp.h" 48#include "acnamesp.h"
49#ifdef ACPI_DISASSEMBLER
50#include "acdisasm.h"
51#endif
52#include "acparser.h" 49#include "acparser.h"
53#include "amlcode.h" 50#include "amlcode.h"
51#include "acdebug.h"
54 52
55#define _COMPONENT ACPI_DISPATCHER 53#define _COMPONENT ACPI_DISPATCHER
56ACPI_MODULE_NAME("dsmethod") 54ACPI_MODULE_NAME("dsmethod")
@@ -103,7 +101,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
103 101
104 /* Create/Init a root op for the method parse tree */ 102 /* Create/Init a root op for the method parse tree */
105 103
106 op = acpi_ps_alloc_op(AML_METHOD_OP); 104 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
107 if (!op) { 105 if (!op) {
108 return_ACPI_STATUS(AE_NO_MEMORY); 106 return_ACPI_STATUS(AE_NO_MEMORY);
109 } 107 }
@@ -205,7 +203,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
205 * RETURN: Status 203 * RETURN: Status
206 * 204 *
207 * DESCRIPTION: Called on method error. Invoke the global exception handler if 205 * DESCRIPTION: Called on method error. Invoke the global exception handler if
208 * present, dump the method data if the disassembler is configured 206 * present, dump the method data if the debugger is configured
209 * 207 *
210 * Note: Allows the exception handler to change the status code 208 * Note: Allows the exception handler to change the status code
211 * 209 *
@@ -214,6 +212,8 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
214acpi_status 212acpi_status
215acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state) 213acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
216{ 214{
215 u32 aml_offset;
216
217 ACPI_FUNCTION_ENTRY(); 217 ACPI_FUNCTION_ENTRY();
218 218
219 /* Ignore AE_OK and control exception codes */ 219 /* Ignore AE_OK and control exception codes */
@@ -234,26 +234,30 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
234 * Handler can map the exception code to anything it wants, including 234 * Handler can map the exception code to anything it wants, including
235 * AE_OK, in which case the executing method will not be aborted. 235 * AE_OK, in which case the executing method will not be aborted.
236 */ 236 */
237 aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
238 walk_state->parser_state.
239 aml_start);
240
237 status = acpi_gbl_exception_handler(status, 241 status = acpi_gbl_exception_handler(status,
238 walk_state->method_node ? 242 walk_state->method_node ?
239 walk_state->method_node-> 243 walk_state->method_node->
240 name.integer : 0, 244 name.integer : 0,
241 walk_state->opcode, 245 walk_state->opcode,
242 walk_state->aml_offset, 246 aml_offset, NULL);
243 NULL);
244 acpi_ex_enter_interpreter(); 247 acpi_ex_enter_interpreter();
245 } 248 }
246 249
247 acpi_ds_clear_implicit_return(walk_state); 250 acpi_ds_clear_implicit_return(walk_state);
248 251
249#ifdef ACPI_DISASSEMBLER
250 if (ACPI_FAILURE(status)) { 252 if (ACPI_FAILURE(status)) {
253 acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
251 254
252 /* Display method locals/args if disassembler is present */ 255 /* Display method locals/args if debugger is present */
253 256
254 acpi_dm_dump_method_info(status, walk_state, walk_state->op); 257#ifdef ACPI_DEBUGGER
255 } 258 acpi_db_dump_method_info(status, walk_state);
256#endif 259#endif
260 }
257 261
258 return (status); 262 return (status);
259} 263}
@@ -328,6 +332,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
328 return_ACPI_STATUS(AE_NULL_ENTRY); 332 return_ACPI_STATUS(AE_NULL_ENTRY);
329 } 333 }
330 334
335 acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
336
331 /* Prevent wraparound of thread count */ 337 /* Prevent wraparound of thread count */
332 338
333 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { 339 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
@@ -574,9 +580,7 @@ cleanup:
574 /* On error, we must terminate the method properly */ 580 /* On error, we must terminate the method properly */
575 581
576 acpi_ds_terminate_control_method(obj_desc, next_walk_state); 582 acpi_ds_terminate_control_method(obj_desc, next_walk_state);
577 if (next_walk_state) { 583 acpi_ds_delete_walk_state(next_walk_state);
578 acpi_ds_delete_walk_state(next_walk_state);
579 }
580 584
581 return_ACPI_STATUS(status); 585 return_ACPI_STATUS(status);
582} 586}
@@ -826,5 +830,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
826 } 830 }
827 } 831 }
828 832
833 acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
834 method.node, method_desc, walk_state);
835
829 return_VOID; 836 return_VOID;
830} 837}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index ea0cc4e08f80..81d7b9863e32 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -480,8 +480,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
480 union acpi_operand_object **operand; 480 union acpi_operand_object **operand;
481 struct acpi_namespace_node *node; 481 struct acpi_namespace_node *node;
482 union acpi_parse_object *next_op; 482 union acpi_parse_object *next_op;
483 u32 table_index;
484 struct acpi_table_header *table; 483 struct acpi_table_header *table;
484 u32 table_index;
485 485
486 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); 486 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
487 487
@@ -504,6 +504,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
504 return_ACPI_STATUS(status); 504 return_ACPI_STATUS(status);
505 } 505 }
506 506
507 operand = &walk_state->operands[0];
508
507 /* 509 /*
508 * Resolve the Signature string, oem_id string, 510 * Resolve the Signature string, oem_id string,
509 * and oem_table_id string operands 511 * and oem_table_id string operands
@@ -511,32 +513,34 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
511 status = acpi_ex_resolve_operands(op->common.aml_opcode, 513 status = acpi_ex_resolve_operands(op->common.aml_opcode,
512 ACPI_WALK_OPERANDS, walk_state); 514 ACPI_WALK_OPERANDS, walk_state);
513 if (ACPI_FAILURE(status)) { 515 if (ACPI_FAILURE(status)) {
514 return_ACPI_STATUS(status); 516 goto cleanup;
515 } 517 }
516 518
517 operand = &walk_state->operands[0];
518
519 /* Find the ACPI table */ 519 /* Find the ACPI table */
520 520
521 status = acpi_tb_find_table(operand[0]->string.pointer, 521 status = acpi_tb_find_table(operand[0]->string.pointer,
522 operand[1]->string.pointer, 522 operand[1]->string.pointer,
523 operand[2]->string.pointer, &table_index); 523 operand[2]->string.pointer, &table_index);
524 if (ACPI_FAILURE(status)) { 524 if (ACPI_FAILURE(status)) {
525 return_ACPI_STATUS(status); 525 if (status == AE_NOT_FOUND) {
526 ACPI_ERROR((AE_INFO,
527 "ACPI Table [%4.4s] OEM:(%s, %s) not found in RSDT/XSDT",
528 operand[0]->string.pointer,
529 operand[1]->string.pointer,
530 operand[2]->string.pointer));
531 }
532 goto cleanup;
526 } 533 }
527 534
528 acpi_ut_remove_reference(operand[0]);
529 acpi_ut_remove_reference(operand[1]);
530 acpi_ut_remove_reference(operand[2]);
531
532 status = acpi_get_table_by_index(table_index, &table); 535 status = acpi_get_table_by_index(table_index, &table);
533 if (ACPI_FAILURE(status)) { 536 if (ACPI_FAILURE(status)) {
534 return_ACPI_STATUS(status); 537 goto cleanup;
535 } 538 }
536 539
537 obj_desc = acpi_ns_get_attached_object(node); 540 obj_desc = acpi_ns_get_attached_object(node);
538 if (!obj_desc) { 541 if (!obj_desc) {
539 return_ACPI_STATUS(AE_NOT_EXIST); 542 status = AE_NOT_EXIST;
543 goto cleanup;
540 } 544 }
541 545
542 obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); 546 obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
@@ -551,6 +555,11 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
551 555
552 obj_desc->region.flags |= AOPOBJ_DATA_VALID; 556 obj_desc->region.flags |= AOPOBJ_DATA_VALID;
553 557
558cleanup:
559 acpi_ut_remove_reference(operand[0]);
560 acpi_ut_remove_reference(operand[1]);
561 acpi_ut_remove_reference(operand[2]);
562
554 return_ACPI_STATUS(status); 563 return_ACPI_STATUS(status);
555} 564}
556 565
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 845ff44919c3..097188a6b1c1 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -388,7 +388,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
388 388
389 /* Create a new op */ 389 /* Create a new op */
390 390
391 op = acpi_ps_alloc_op(walk_state->opcode); 391 op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
392 if (!op) { 392 if (!op) {
393 return_ACPI_STATUS(AE_NO_MEMORY); 393 return_ACPI_STATUS(AE_NO_MEMORY);
394 } 394 }
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index fcaa30c611fb..e2c08cd79aca 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -335,7 +335,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
335 335
336 /* Create a new op */ 336 /* Create a new op */
337 337
338 op = acpi_ps_alloc_op(walk_state->opcode); 338 op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
339 if (!op) { 339 if (!op) {
340 return_ACPI_STATUS(AE_NO_MEMORY); 340 return_ACPI_STATUS(AE_NO_MEMORY);
341 } 341 }
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 2ba28a63fb68..5ee79a16fe33 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -626,9 +626,17 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
626 acpi_adr_space_type space_id) 626 acpi_adr_space_type space_id)
627{ 627{
628 acpi_status status; 628 acpi_status status;
629 struct acpi_reg_walk_info info;
629 630
630 ACPI_FUNCTION_TRACE(ev_execute_reg_methods); 631 ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
631 632
633 info.space_id = space_id;
634 info.reg_run_count = 0;
635
636 ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
637 " Running _REG methods for SpaceId %s\n",
638 acpi_ut_get_region_name(info.space_id)));
639
632 /* 640 /*
633 * Run all _REG methods for all Operation Regions for this space ID. This 641 * Run all _REG methods for all Operation Regions for this space ID. This
634 * is a separate walk in order to handle any interdependencies between 642 * is a separate walk in order to handle any interdependencies between
@@ -637,7 +645,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
637 */ 645 */
638 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, 646 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
639 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, 647 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
640 NULL, &space_id, NULL); 648 NULL, &info, NULL);
641 649
642 /* Special case for EC: handle "orphan" _REG methods with no region */ 650 /* Special case for EC: handle "orphan" _REG methods with no region */
643 651
@@ -645,6 +653,11 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
645 acpi_ev_orphan_ec_reg_method(node); 653 acpi_ev_orphan_ec_reg_method(node);
646 } 654 }
647 655
656 ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
657 " Executed %u _REG methods for SpaceId %s\n",
658 info.reg_run_count,
659 acpi_ut_get_region_name(info.space_id)));
660
648 return_ACPI_STATUS(status); 661 return_ACPI_STATUS(status);
649} 662}
650 663
@@ -664,10 +677,10 @@ acpi_ev_reg_run(acpi_handle obj_handle,
664{ 677{
665 union acpi_operand_object *obj_desc; 678 union acpi_operand_object *obj_desc;
666 struct acpi_namespace_node *node; 679 struct acpi_namespace_node *node;
667 acpi_adr_space_type space_id;
668 acpi_status status; 680 acpi_status status;
681 struct acpi_reg_walk_info *info;
669 682
670 space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context); 683 info = ACPI_CAST_PTR(struct acpi_reg_walk_info, context);
671 684
672 /* Convert and validate the device handle */ 685 /* Convert and validate the device handle */
673 686
@@ -696,13 +709,14 @@ acpi_ev_reg_run(acpi_handle obj_handle,
696 709
697 /* Object is a Region */ 710 /* Object is a Region */
698 711
699 if (obj_desc->region.space_id != space_id) { 712 if (obj_desc->region.space_id != info->space_id) {
700 713
701 /* This region is for a different address space, just ignore it */ 714 /* This region is for a different address space, just ignore it */
702 715
703 return (AE_OK); 716 return (AE_OK);
704 } 717 }
705 718
719 info->reg_run_count++;
706 status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT); 720 status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
707 return (status); 721 return (status);
708} 722}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 24a4c5c2b124..b540913c11ac 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -162,14 +162,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
162 162
163 ACPI_FUNCTION_TRACE(ex_load_table_op); 163 ACPI_FUNCTION_TRACE(ex_load_table_op);
164 164
165 /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
166
167 if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
168 (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
169 (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
170 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
171 }
172
173 /* Find the ACPI table in the RSDT/XSDT */ 165 /* Find the ACPI table in the RSDT/XSDT */
174 166
175 status = acpi_tb_find_table(operand[0]->string.pointer, 167 status = acpi_tb_find_table(operand[0]->string.pointer,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index aaeea4840aaa..ccb7219bdcee 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -486,6 +486,7 @@ acpi_ex_create_method(u8 * aml_start,
486 486
487 obj_desc->method.aml_start = aml_start; 487 obj_desc->method.aml_start = aml_start;
488 obj_desc->method.aml_length = aml_length; 488 obj_desc->method.aml_length = aml_length;
489 obj_desc->method.node = operand[0];
489 490
490 /* 491 /*
491 * Disassemble the method flags. Split off the arg_count, Serialized 492 * Disassemble the method flags. Split off the arg_count, Serialized
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 815442bbd051..de92458236f5 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -43,11 +43,21 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "acnamesp.h"
46#include "acinterp.h" 47#include "acinterp.h"
48#include "acparser.h"
47 49
48#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exdebug") 51ACPI_MODULE_NAME("exdebug")
50 52
53static union acpi_operand_object *acpi_gbl_trace_method_object = NULL;
54
55/* Local prototypes */
56
57#ifdef ACPI_DEBUG_OUTPUT
58static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
59#endif
60
51#ifndef ACPI_NO_ERROR_MESSAGES 61#ifndef ACPI_NO_ERROR_MESSAGES
52/******************************************************************************* 62/*******************************************************************************
53 * 63 *
@@ -70,6 +80,7 @@ ACPI_MODULE_NAME("exdebug")
70 * enabled if necessary. 80 * enabled if necessary.
71 * 81 *
72 ******************************************************************************/ 82 ******************************************************************************/
83
73void 84void
74acpi_ex_do_debug_object(union acpi_operand_object *source_desc, 85acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
75 u32 level, u32 index) 86 u32 level, u32 index)
@@ -308,3 +319,316 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
308 return_VOID; 319 return_VOID;
309} 320}
310#endif 321#endif
322
323/*******************************************************************************
324 *
325 * FUNCTION: acpi_ex_interpreter_trace_enabled
326 *
327 * PARAMETERS: name - Whether method name should be matched,
328 * this should be checked before starting
329 * the tracer
330 *
331 * RETURN: TRUE if interpreter trace is enabled.
332 *
333 * DESCRIPTION: Check whether interpreter trace is enabled
334 *
335 ******************************************************************************/
336
337static u8 acpi_ex_interpreter_trace_enabled(char *name)
338{
339
340 /* Check if tracing is enabled */
341
342 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) {
343 return (FALSE);
344 }
345
346 /*
347 * Check if tracing is filtered:
348 *
349 * 1. If the tracer is started, acpi_gbl_trace_method_object should have
350 * been filled by the trace starter
351 * 2. If the tracer is not started, acpi_gbl_trace_method_name should be
352 * matched if it is specified
353 * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should
354 * not be cleared by the trace stopper during the first match
355 */
356 if (acpi_gbl_trace_method_object) {
357 return (TRUE);
358 }
359 if (name &&
360 (acpi_gbl_trace_method_name &&
361 strcmp(acpi_gbl_trace_method_name, name))) {
362 return (FALSE);
363 }
364 if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) &&
365 !acpi_gbl_trace_method_name) {
366 return (FALSE);
367 }
368
369 return (TRUE);
370}
371
372/*******************************************************************************
373 *
374 * FUNCTION: acpi_ex_get_trace_event_name
375 *
376 * PARAMETERS: type - Trace event type
377 *
378 * RETURN: Trace event name.
379 *
380 * DESCRIPTION: Used to obtain the full trace event name.
381 *
382 ******************************************************************************/
383
384#ifdef ACPI_DEBUG_OUTPUT
385
386static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type)
387{
388 switch (type) {
389 case ACPI_TRACE_AML_METHOD:
390
391 return "Method";
392
393 case ACPI_TRACE_AML_OPCODE:
394
395 return "Opcode";
396
397 case ACPI_TRACE_AML_REGION:
398
399 return "Region";
400
401 default:
402
403 return "";
404 }
405}
406
407#endif
408
409/*******************************************************************************
410 *
411 * FUNCTION: acpi_ex_trace_point
412 *
413 * PARAMETERS: type - Trace event type
414 * begin - TRUE if before execution
415 * aml - Executed AML address
416 * pathname - Object path
417 *
418 * RETURN: None
419 *
420 * DESCRIPTION: Internal interpreter execution trace.
421 *
422 ******************************************************************************/
423
424void
425acpi_ex_trace_point(acpi_trace_event_type type,
426 u8 begin, u8 *aml, char *pathname)
427{
428
429 ACPI_FUNCTION_NAME(ex_trace_point);
430
431 if (pathname) {
432 ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
433 "%s %s [0x%p:%s] execution.\n",
434 acpi_ex_get_trace_event_name(type),
435 begin ? "Begin" : "End", aml, pathname));
436 } else {
437 ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
438 "%s %s [0x%p] execution.\n",
439 acpi_ex_get_trace_event_name(type),
440 begin ? "Begin" : "End", aml));
441 }
442}
443
444/*******************************************************************************
445 *
446 * FUNCTION: acpi_ex_start_trace_method
447 *
448 * PARAMETERS: method_node - Node of the method
449 * obj_desc - The method object
450 * walk_state - current state, NULL if not yet executing
451 * a method.
452 *
453 * RETURN: None
454 *
455 * DESCRIPTION: Start control method execution trace
456 *
457 ******************************************************************************/
458
459void
460acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
461 union acpi_operand_object *obj_desc,
462 struct acpi_walk_state *walk_state)
463{
464 acpi_status status;
465 char *pathname = NULL;
466 u8 enabled = FALSE;
467
468 ACPI_FUNCTION_NAME(ex_start_trace_method);
469
470 if (method_node) {
471 pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
472 }
473
474 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
475 if (ACPI_FAILURE(status)) {
476 goto exit;
477 }
478
479 enabled = acpi_ex_interpreter_trace_enabled(pathname);
480 if (enabled && !acpi_gbl_trace_method_object) {
481 acpi_gbl_trace_method_object = obj_desc;
482 acpi_gbl_original_dbg_level = acpi_dbg_level;
483 acpi_gbl_original_dbg_layer = acpi_dbg_layer;
484 acpi_dbg_level = ACPI_TRACE_LEVEL_ALL;
485 acpi_dbg_layer = ACPI_TRACE_LAYER_ALL;
486
487 if (acpi_gbl_trace_dbg_level) {
488 acpi_dbg_level = acpi_gbl_trace_dbg_level;
489 }
490 if (acpi_gbl_trace_dbg_layer) {
491 acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
492 }
493 }
494 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
495
496exit:
497 if (enabled) {
498 ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE,
499 obj_desc ? obj_desc->method.aml_start : NULL,
500 pathname);
501 }
502 if (pathname) {
503 ACPI_FREE(pathname);
504 }
505}
506
507/*******************************************************************************
508 *
509 * FUNCTION: acpi_ex_stop_trace_method
510 *
511 * PARAMETERS: method_node - Node of the method
512 * obj_desc - The method object
513 * walk_state - current state, NULL if not yet executing
514 * a method.
515 *
516 * RETURN: None
517 *
518 * DESCRIPTION: Stop control method execution trace
519 *
520 ******************************************************************************/
521
522void
523acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
524 union acpi_operand_object *obj_desc,
525 struct acpi_walk_state *walk_state)
526{
527 acpi_status status;
528 char *pathname = NULL;
529 u8 enabled;
530
531 ACPI_FUNCTION_NAME(ex_stop_trace_method);
532
533 if (method_node) {
534 pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
535 }
536
537 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
538 if (ACPI_FAILURE(status)) {
539 goto exit_path;
540 }
541
542 enabled = acpi_ex_interpreter_trace_enabled(NULL);
543
544 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
545
546 if (enabled) {
547 ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE,
548 obj_desc ? obj_desc->method.aml_start : NULL,
549 pathname);
550 }
551
552 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
553 if (ACPI_FAILURE(status)) {
554 goto exit_path;
555 }
556
557 /* Check whether the tracer should be stopped */
558
559 if (acpi_gbl_trace_method_object == obj_desc) {
560
561 /* Disable further tracing if type is one-shot */
562
563 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) {
564 acpi_gbl_trace_method_name = NULL;
565 }
566
567 acpi_dbg_level = acpi_gbl_original_dbg_level;
568 acpi_dbg_layer = acpi_gbl_original_dbg_layer;
569 acpi_gbl_trace_method_object = NULL;
570 }
571
572 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
573
574exit_path:
575 if (pathname) {
576 ACPI_FREE(pathname);
577 }
578}
579
580/*******************************************************************************
581 *
582 * FUNCTION: acpi_ex_start_trace_opcode
583 *
584 * PARAMETERS: op - The parser opcode object
585 * walk_state - current state, NULL if not yet executing
586 * a method.
587 *
588 * RETURN: None
589 *
590 * DESCRIPTION: Start opcode execution trace
591 *
592 ******************************************************************************/
593
594void
595acpi_ex_start_trace_opcode(union acpi_parse_object *op,
596 struct acpi_walk_state *walk_state)
597{
598
599 ACPI_FUNCTION_NAME(ex_start_trace_opcode);
600
601 if (acpi_ex_interpreter_trace_enabled(NULL) &&
602 (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
603 ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE,
604 op->common.aml, op->common.aml_op_name);
605 }
606}
607
608/*******************************************************************************
609 *
610 * FUNCTION: acpi_ex_stop_trace_opcode
611 *
612 * PARAMETERS: op - The parser opcode object
613 * walk_state - current state, NULL if not yet executing
614 * a method.
615 *
616 * RETURN: None
617 *
618 * DESCRIPTION: Stop opcode execution trace
619 *
620 ******************************************************************************/
621
622void
623acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
624 struct acpi_walk_state *walk_state)
625{
626
627 ACPI_FUNCTION_NAME(ex_stop_trace_opcode);
628
629 if (acpi_ex_interpreter_trace_enabled(NULL) &&
630 (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
631 ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE,
632 op->common.aml, op->common.aml_op_name);
633 }
634}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 401e7edcd419..d836f888bb16 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -995,9 +995,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
995 if (obj_desc->reference.class == ACPI_REFCLASS_NAME) { 995 if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
996 acpi_os_printf(" %p ", obj_desc->reference.node); 996 acpi_os_printf(" %p ", obj_desc->reference.node);
997 997
998 status = 998 status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
999 acpi_ns_handle_to_pathname(obj_desc->reference.node, 999 &ret_buf, TRUE);
1000 &ret_buf);
1001 if (ACPI_FAILURE(status)) { 1000 if (ACPI_FAILURE(status)) {
1002 acpi_os_printf(" Could not convert name to pathname\n"); 1001 acpi_os_printf(" Could not convert name to pathname\n");
1003 } else { 1002 } else {
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c7e3b929aa85..1b372ef69308 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -126,7 +126,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
126 if (!source_desc) { 126 if (!source_desc) {
127 ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p", 127 ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
128 node->name.ascii, node)); 128 node->name.ascii, node));
129 return_ACPI_STATUS(AE_AML_NO_OPERAND); 129 return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
130 } 130 }
131 131
132 /* 132 /*
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index b6b7f3af29e4..7b109128b035 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -337,8 +337,9 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
337 acpi_object_type * return_type, 337 acpi_object_type * return_type,
338 union acpi_operand_object **return_desc) 338 union acpi_operand_object **return_desc)
339{ 339{
340 union acpi_operand_object *obj_desc = (void *)operand; 340 union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
341 struct acpi_namespace_node *node; 341 struct acpi_namespace_node *node =
342 ACPI_CAST_PTR(struct acpi_namespace_node, operand);
342 acpi_object_type type; 343 acpi_object_type type;
343 acpi_status status; 344 acpi_status status;
344 345
@@ -355,9 +356,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
355 case ACPI_DESC_TYPE_NAMED: 356 case ACPI_DESC_TYPE_NAMED:
356 357
357 type = ((struct acpi_namespace_node *)obj_desc)->type; 358 type = ((struct acpi_namespace_node *)obj_desc)->type;
358 obj_desc = 359 obj_desc = acpi_ns_get_attached_object(node);
359 acpi_ns_get_attached_object((struct acpi_namespace_node *)
360 obj_desc);
361 360
362 /* If we had an Alias node, use the attached object for type info */ 361 /* If we had an Alias node, use the attached object for type info */
363 362
@@ -368,6 +367,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
368 acpi_namespace_node *) 367 acpi_namespace_node *)
369 obj_desc); 368 obj_desc);
370 } 369 }
370
371 if (!obj_desc) {
372 ACPI_ERROR((AE_INFO,
373 "[%4.4s] Node is unresolved or uninitialized",
374 acpi_ut_get_node_name(node)));
375 return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
376 }
371 break; 377 break;
372 378
373 default: 379 default:
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 52dfd0d050fa..d62a61612b3f 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -160,19 +160,8 @@ acpi_set_firmware_waking_vectors(acpi_physical_address physical_address,
160 160
161 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors); 161 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
162 162
163 /* If Hardware Reduced flag is set, there is no FACS */ 163 if (acpi_gbl_FACS) {
164 164 (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS,
165 if (acpi_gbl_reduced_hardware) {
166 return_ACPI_STATUS (AE_OK);
167 }
168
169 if (acpi_gbl_facs32) {
170 (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
171 physical_address,
172 physical_address64);
173 }
174 if (acpi_gbl_facs64) {
175 (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
176 physical_address, 165 physical_address,
177 physical_address64); 166 physical_address64);
178 } 167 }
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 80670cb32b5a..7eba578d36f3 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -274,6 +274,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
274 acpi_ex_exit_interpreter(); 274 acpi_ex_exit_interpreter();
275 275
276 if (ACPI_FAILURE(status)) { 276 if (ACPI_FAILURE(status)) {
277 info->return_object = NULL;
277 goto cleanup; 278 goto cleanup;
278 } 279 }
279 280
@@ -464,7 +465,8 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
464 465
465 status = acpi_ns_evaluate(info); 466 status = acpi_ns_evaluate(info);
466 467
467 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n", 468 ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
469 "Executed module-level code at %p\n",
468 method_obj->method.aml_start)); 470 method_obj->method.aml_start));
469 471
470 /* Delete a possible implicit return value (in slack mode) */ 472 /* Delete a possible implicit return value (in slack mode) */
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index bd6cd4a81316..14ab83668207 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -111,7 +111,21 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
111 if (ACPI_SUCCESS(status)) { 111 if (ACPI_SUCCESS(status)) {
112 acpi_tb_set_table_loaded_flag(table_index, TRUE); 112 acpi_tb_set_table_loaded_flag(table_index, TRUE);
113 } else { 113 } else {
114 (void)acpi_tb_release_owner_id(table_index); 114 /*
115 * On error, delete any namespace objects created by this table.
116 * We cannot initialize these objects, so delete them. There are
117 * a couple of expecially bad cases:
118 * AE_ALREADY_EXISTS - namespace collision.
119 * AE_NOT_FOUND - the target of a Scope operator does not
120 * exist. This target of Scope must already exist in the
121 * namespace, as per the ACPI specification.
122 */
123 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
124 acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
125 tables[table_index].owner_id);
126 acpi_tb_release_owner_id(table_index);
127
128 return_ACPI_STATUS(status);
115 } 129 }
116 130
117unlock: 131unlock:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d293d9748036..8934b4eddb73 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -51,73 +51,6 @@ ACPI_MODULE_NAME("nsnames")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_ns_build_external_path
55 *
56 * PARAMETERS: node - NS node whose pathname is needed
57 * size - Size of the pathname
58 * *name_buffer - Where to return the pathname
59 *
60 * RETURN: Status
61 * Places the pathname into the name_buffer, in external format
62 * (name segments separated by path separators)
63 *
64 * DESCRIPTION: Generate a full pathaname
65 *
66 ******************************************************************************/
67acpi_status
68acpi_ns_build_external_path(struct acpi_namespace_node *node,
69 acpi_size size, char *name_buffer)
70{
71 acpi_size index;
72 struct acpi_namespace_node *parent_node;
73
74 ACPI_FUNCTION_ENTRY();
75
76 /* Special case for root */
77
78 index = size - 1;
79 if (index < ACPI_NAME_SIZE) {
80 name_buffer[0] = AML_ROOT_PREFIX;
81 name_buffer[1] = 0;
82 return (AE_OK);
83 }
84
85 /* Store terminator byte, then build name backwards */
86
87 parent_node = node;
88 name_buffer[index] = 0;
89
90 while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
91 index -= ACPI_NAME_SIZE;
92
93 /* Put the name into the buffer */
94
95 ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
96 parent_node = parent_node->parent;
97
98 /* Prefix name with the path separator */
99
100 index--;
101 name_buffer[index] = ACPI_PATH_SEPARATOR;
102 }
103
104 /* Overwrite final separator with the root prefix character */
105
106 name_buffer[index] = AML_ROOT_PREFIX;
107
108 if (index != 0) {
109 ACPI_ERROR((AE_INFO,
110 "Could not construct external pathname; index=%u, size=%u, Path=%s",
111 (u32) index, (u32) size, &name_buffer[size]));
112
113 return (AE_BAD_PARAMETER);
114 }
115
116 return (AE_OK);
117}
118
119/*******************************************************************************
120 *
121 * FUNCTION: acpi_ns_get_external_pathname 54 * FUNCTION: acpi_ns_get_external_pathname
122 * 55 *
123 * PARAMETERS: node - Namespace node whose pathname is needed 56 * PARAMETERS: node - Namespace node whose pathname is needed
@@ -130,37 +63,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
130 * for error and debug statements. 63 * for error and debug statements.
131 * 64 *
132 ******************************************************************************/ 65 ******************************************************************************/
133
134char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) 66char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
135{ 67{
136 acpi_status status;
137 char *name_buffer; 68 char *name_buffer;
138 acpi_size size;
139 69
140 ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); 70 ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
141 71
142 /* Calculate required buffer size based on depth below root */ 72 name_buffer = acpi_ns_get_normalized_pathname(node, FALSE);
143
144 size = acpi_ns_get_pathname_length(node);
145 if (!size) {
146 return_PTR(NULL);
147 }
148
149 /* Allocate a buffer to be returned to caller */
150
151 name_buffer = ACPI_ALLOCATE_ZEROED(size);
152 if (!name_buffer) {
153 ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
154 return_PTR(NULL);
155 }
156
157 /* Build the path in the allocated buffer */
158
159 status = acpi_ns_build_external_path(node, size, name_buffer);
160 if (ACPI_FAILURE(status)) {
161 ACPI_FREE(name_buffer);
162 return_PTR(NULL);
163 }
164 73
165 return_PTR(name_buffer); 74 return_PTR(name_buffer);
166} 75}
@@ -180,33 +89,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
180acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) 89acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
181{ 90{
182 acpi_size size; 91 acpi_size size;
183 struct acpi_namespace_node *next_node;
184 92
185 ACPI_FUNCTION_ENTRY(); 93 ACPI_FUNCTION_ENTRY();
186 94
187 /* 95 size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
188 * Compute length of pathname as 5 * number of name segments.
189 * Go back up the parent tree to the root
190 */
191 size = 0;
192 next_node = node;
193 96
194 while (next_node && (next_node != acpi_gbl_root_node)) { 97 return (size);
195 if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
196 ACPI_ERROR((AE_INFO,
197 "Invalid Namespace Node (%p) while traversing namespace",
198 next_node));
199 return (0);
200 }
201 size += ACPI_PATH_SEGMENT_LENGTH;
202 next_node = next_node->parent;
203 }
204
205 if (!size) {
206 size = 1; /* Root node case */
207 }
208
209 return (size + 1); /* +1 for null string terminator */
210} 98}
211 99
212/******************************************************************************* 100/*******************************************************************************
@@ -216,6 +104,8 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
216 * PARAMETERS: target_handle - Handle of named object whose name is 104 * PARAMETERS: target_handle - Handle of named object whose name is
217 * to be found 105 * to be found
218 * buffer - Where the pathname is returned 106 * buffer - Where the pathname is returned
107 * no_trailing - Remove trailing '_' for each name
108 * segment
219 * 109 *
220 * RETURN: Status, Buffer is filled with pathname if status is AE_OK 110 * RETURN: Status, Buffer is filled with pathname if status is AE_OK
221 * 111 *
@@ -225,7 +115,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
225 115
226acpi_status 116acpi_status
227acpi_ns_handle_to_pathname(acpi_handle target_handle, 117acpi_ns_handle_to_pathname(acpi_handle target_handle,
228 struct acpi_buffer * buffer) 118 struct acpi_buffer * buffer, u8 no_trailing)
229{ 119{
230 acpi_status status; 120 acpi_status status;
231 struct acpi_namespace_node *node; 121 struct acpi_namespace_node *node;
@@ -240,7 +130,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
240 130
241 /* Determine size required for the caller buffer */ 131 /* Determine size required for the caller buffer */
242 132
243 required_size = acpi_ns_get_pathname_length(node); 133 required_size =
134 acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
244 if (!required_size) { 135 if (!required_size) {
245 return_ACPI_STATUS(AE_BAD_PARAMETER); 136 return_ACPI_STATUS(AE_BAD_PARAMETER);
246 } 137 }
@@ -254,8 +145,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
254 145
255 /* Build the path in the caller buffer */ 146 /* Build the path in the caller buffer */
256 147
257 status = 148 (void)acpi_ns_build_normalized_path(node, buffer->pointer,
258 acpi_ns_build_external_path(node, required_size, buffer->pointer); 149 required_size, no_trailing);
259 if (ACPI_FAILURE(status)) { 150 if (ACPI_FAILURE(status)) {
260 return_ACPI_STATUS(status); 151 return_ACPI_STATUS(status);
261 } 152 }
@@ -264,3 +155,149 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
264 (char *)buffer->pointer, (u32) required_size)); 155 (char *)buffer->pointer, (u32) required_size));
265 return_ACPI_STATUS(AE_OK); 156 return_ACPI_STATUS(AE_OK);
266} 157}
158
159/*******************************************************************************
160 *
161 * FUNCTION: acpi_ns_build_normalized_path
162 *
163 * PARAMETERS: node - Namespace node
164 * full_path - Where the path name is returned
165 * path_size - Size of returned path name buffer
166 * no_trailing - Remove trailing '_' from each name segment
167 *
168 * RETURN: Return 1 if the AML path is empty, otherwise returning (length
169 * of pathname + 1) which means the 'FullPath' contains a trailing
170 * null.
171 *
172 * DESCRIPTION: Build and return a full namespace pathname.
173 * Note that if the size of 'FullPath' isn't large enough to
174 * contain the namespace node's path name, the actual required
175 * buffer length is returned, and it should be greater than
176 * 'PathSize'. So callers are able to check the returning value
177 * to determine the buffer size of 'FullPath'.
178 *
179 ******************************************************************************/
180
181u32
182acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
183 char *full_path, u32 path_size, u8 no_trailing)
184{
185 u32 length = 0, i;
186 char name[ACPI_NAME_SIZE];
187 u8 do_no_trailing;
188 char c, *left, *right;
189 struct acpi_namespace_node *next_node;
190
191 ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node);
192
193#define ACPI_PATH_PUT8(path, size, byte, length) \
194 do { \
195 if ((length) < (size)) \
196 { \
197 (path)[(length)] = (byte); \
198 } \
199 (length)++; \
200 } while (0)
201
202 /*
203 * Make sure the path_size is correct, so that we don't need to
204 * validate both full_path and path_size.
205 */
206 if (!full_path) {
207 path_size = 0;
208 }
209
210 if (!node) {
211 goto build_trailing_null;
212 }
213
214 next_node = node;
215 while (next_node && next_node != acpi_gbl_root_node) {
216 if (next_node != node) {
217 ACPI_PATH_PUT8(full_path, path_size,
218 AML_DUAL_NAME_PREFIX, length);
219 }
220 ACPI_MOVE_32_TO_32(name, &next_node->name);
221 do_no_trailing = no_trailing;
222 for (i = 0; i < 4; i++) {
223 c = name[4 - i - 1];
224 if (do_no_trailing && c != '_') {
225 do_no_trailing = FALSE;
226 }
227 if (!do_no_trailing) {
228 ACPI_PATH_PUT8(full_path, path_size, c, length);
229 }
230 }
231 next_node = next_node->parent;
232 }
233 ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length);
234
235 /* Reverse the path string */
236
237 if (length <= path_size) {
238 left = full_path;
239 right = full_path + length - 1;
240 while (left < right) {
241 c = *left;
242 *left++ = *right;
243 *right-- = c;
244 }
245 }
246
247 /* Append the trailing null */
248
249build_trailing_null:
250 ACPI_PATH_PUT8(full_path, path_size, '\0', length);
251
252#undef ACPI_PATH_PUT8
253
254 return_UINT32(length);
255}
256
257/*******************************************************************************
258 *
259 * FUNCTION: acpi_ns_get_normalized_pathname
260 *
261 * PARAMETERS: node - Namespace node whose pathname is needed
262 * no_trailing - Remove trailing '_' from each name segment
263 *
264 * RETURN: Pointer to storage containing the fully qualified name of
265 * the node, In external format (name segments separated by path
266 * separators.)
267 *
268 * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
269 * for error and debug statements. All trailing '_' will be
270 * removed from the full pathname if 'NoTrailing' is specified..
271 *
272 ******************************************************************************/
273
274char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
275 u8 no_trailing)
276{
277 char *name_buffer;
278 acpi_size size;
279
280 ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node);
281
282 /* Calculate required buffer size based on depth below root */
283
284 size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
285 if (!size) {
286 return_PTR(NULL);
287 }
288
289 /* Allocate a buffer to be returned to caller */
290
291 name_buffer = ACPI_ALLOCATE_ZEROED(size);
292 if (!name_buffer) {
293 ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
294 return_PTR(NULL);
295 }
296
297 /* Build the path in the allocated buffer */
298
299 (void)acpi_ns_build_normalized_path(node, name_buffer, size,
300 no_trailing);
301
302 return_PTR(name_buffer);
303}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 57a4cfe547e4..3736d43b18b9 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -70,7 +70,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
70{ 70{
71 union acpi_parse_object *parse_root; 71 union acpi_parse_object *parse_root;
72 acpi_status status; 72 acpi_status status;
73 u32 aml_length; 73 u32 aml_length;
74 u8 *aml_start; 74 u8 *aml_start;
75 struct acpi_walk_state *walk_state; 75 struct acpi_walk_state *walk_state;
76 struct acpi_table_header *table; 76 struct acpi_table_header *table;
@@ -78,6 +78,20 @@ acpi_ns_one_complete_parse(u32 pass_number,
78 78
79 ACPI_FUNCTION_TRACE(ns_one_complete_parse); 79 ACPI_FUNCTION_TRACE(ns_one_complete_parse);
80 80
81 status = acpi_get_table_by_index(table_index, &table);
82 if (ACPI_FAILURE(status)) {
83 return_ACPI_STATUS(status);
84 }
85
86 /* Table must consist of at least a complete header */
87
88 if (table->length < sizeof(struct acpi_table_header)) {
89 return_ACPI_STATUS(AE_BAD_HEADER);
90 }
91
92 aml_start = (u8 *)table + sizeof(struct acpi_table_header);
93 aml_length = table->length - sizeof(struct acpi_table_header);
94
81 status = acpi_tb_get_owner_id(table_index, &owner_id); 95 status = acpi_tb_get_owner_id(table_index, &owner_id);
82 if (ACPI_FAILURE(status)) { 96 if (ACPI_FAILURE(status)) {
83 return_ACPI_STATUS(status); 97 return_ACPI_STATUS(status);
@@ -85,7 +99,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
85 99
86 /* Create and init a Root Node */ 100 /* Create and init a Root Node */
87 101
88 parse_root = acpi_ps_create_scope_op(); 102 parse_root = acpi_ps_create_scope_op(aml_start);
89 if (!parse_root) { 103 if (!parse_root) {
90 return_ACPI_STATUS(AE_NO_MEMORY); 104 return_ACPI_STATUS(AE_NO_MEMORY);
91 } 105 }
@@ -98,23 +112,12 @@ acpi_ns_one_complete_parse(u32 pass_number,
98 return_ACPI_STATUS(AE_NO_MEMORY); 112 return_ACPI_STATUS(AE_NO_MEMORY);
99 } 113 }
100 114
101 status = acpi_get_table_by_index(table_index, &table); 115 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
116 aml_start, aml_length, NULL,
117 (u8)pass_number);
102 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
103 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
104 acpi_ps_free_op(parse_root); 120 goto cleanup;
105 return_ACPI_STATUS(status);
106 }
107
108 /* Table must consist of at least a complete header */
109
110 if (table->length < sizeof(struct acpi_table_header)) {
111 status = AE_BAD_HEADER;
112 } else {
113 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
114 aml_length = table->length - sizeof(struct acpi_table_header);
115 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
116 aml_start, aml_length, NULL,
117 (u8) pass_number);
118 } 121 }
119 122
120 /* Found OSDT table, enable the namespace override feature */ 123 /* Found OSDT table, enable the namespace override feature */
@@ -124,11 +127,6 @@ acpi_ns_one_complete_parse(u32 pass_number,
124 walk_state->namespace_override = TRUE; 127 walk_state->namespace_override = TRUE;
125 } 128 }
126 129
127 if (ACPI_FAILURE(status)) {
128 acpi_ds_delete_walk_state(walk_state);
129 goto cleanup;
130 }
131
132 /* start_node is the default location to load the table */ 130 /* start_node is the default location to load the table */
133 131
134 if (start_node && start_node != acpi_gbl_root_node) { 132 if (start_node && start_node != acpi_gbl_root_node) {
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 8d8104b8bd28..de325ae04ce1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -83,7 +83,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
83 83
84 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 84 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
85 85
86 status = acpi_ns_handle_to_pathname(node, &buffer); 86 status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
87 if (ACPI_SUCCESS(status)) { 87 if (ACPI_SUCCESS(status)) {
88 if (message) { 88 if (message) {
89 acpi_os_printf("%s ", message); 89 acpi_os_printf("%s ", message);
@@ -596,6 +596,23 @@ void acpi_ns_terminate(void)
596 596
597 ACPI_FUNCTION_TRACE(ns_terminate); 597 ACPI_FUNCTION_TRACE(ns_terminate);
598 598
599#ifdef ACPI_EXEC_APP
600 {
601 union acpi_operand_object *prev;
602 union acpi_operand_object *next;
603
604 /* Delete any module-level code blocks */
605
606 next = acpi_gbl_module_code_list;
607 while (next) {
608 prev = next;
609 next = next->method.mutex;
610 prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
611 acpi_ut_remove_reference(prev);
612 }
613 }
614#endif
615
599 /* 616 /*
600 * Free the entire namespace -- all nodes and all objects 617 * Free the entire namespace -- all nodes and all objects
601 * attached to the nodes 618 * attached to the nodes
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 9ff643b9553f..4b4d2f43d406 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -172,11 +172,15 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
172 return (status); 172 return (status);
173 } 173 }
174 174
175 if (name_type == ACPI_FULL_PATHNAME) { 175 if (name_type == ACPI_FULL_PATHNAME ||
176 name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
176 177
177 /* Get the full pathname (From the namespace root) */ 178 /* Get the full pathname (From the namespace root) */
178 179
179 status = acpi_ns_handle_to_pathname(handle, buffer); 180 status = acpi_ns_handle_to_pathname(handle, buffer,
181 name_type ==
182 ACPI_FULL_PATHNAME ? FALSE :
183 TRUE);
180 return (status); 184 return (status);
181 } 185 }
182 186
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 6d038770577b..29d8b7b01dca 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -287,7 +287,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
287 "Control Method - %p Desc %p Path=%p\n", node, 287 "Control Method - %p Desc %p Path=%p\n", node,
288 method_desc, path)); 288 method_desc, path));
289 289
290 name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); 290 name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start);
291 if (!name_op) { 291 if (!name_op) {
292 return_ACPI_STATUS(AE_NO_MEMORY); 292 return_ACPI_STATUS(AE_NO_MEMORY);
293 } 293 }
@@ -484,7 +484,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state 484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
485 *parser_state) 485 *parser_state)
486{ 486{
487 u32 aml_offset; 487 u8 *aml;
488 union acpi_parse_object *field; 488 union acpi_parse_object *field;
489 union acpi_parse_object *arg = NULL; 489 union acpi_parse_object *arg = NULL;
490 u16 opcode; 490 u16 opcode;
@@ -498,8 +498,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
498 498
499 ACPI_FUNCTION_TRACE(ps_get_next_field); 499 ACPI_FUNCTION_TRACE(ps_get_next_field);
500 500
501 aml_offset = 501 aml = parser_state->aml;
502 (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
503 502
504 /* Determine field type */ 503 /* Determine field type */
505 504
@@ -536,13 +535,11 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
536 535
537 /* Allocate a new field op */ 536 /* Allocate a new field op */
538 537
539 field = acpi_ps_alloc_op(opcode); 538 field = acpi_ps_alloc_op(opcode, aml);
540 if (!field) { 539 if (!field) {
541 return_PTR(NULL); 540 return_PTR(NULL);
542 } 541 }
543 542
544 field->common.aml_offset = aml_offset;
545
546 /* Decode the field type */ 543 /* Decode the field type */
547 544
548 switch (opcode) { 545 switch (opcode) {
@@ -604,6 +601,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
604 * Argument for Connection operator can be either a Buffer 601 * Argument for Connection operator can be either a Buffer
605 * (resource descriptor), or a name_string. 602 * (resource descriptor), or a name_string.
606 */ 603 */
604 aml = parser_state->aml;
607 if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { 605 if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
608 parser_state->aml++; 606 parser_state->aml++;
609 607
@@ -616,7 +614,8 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
616 614
617 /* Non-empty list */ 615 /* Non-empty list */
618 616
619 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); 617 arg =
618 acpi_ps_alloc_op(AML_INT_BYTELIST_OP, aml);
620 if (!arg) { 619 if (!arg) {
621 acpi_ps_free_op(field); 620 acpi_ps_free_op(field);
622 return_PTR(NULL); 621 return_PTR(NULL);
@@ -665,7 +664,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
665 664
666 parser_state->aml = pkg_end; 665 parser_state->aml = pkg_end;
667 } else { 666 } else {
668 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); 667 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, aml);
669 if (!arg) { 668 if (!arg) {
670 acpi_ps_free_op(field); 669 acpi_ps_free_op(field);
671 return_PTR(NULL); 670 return_PTR(NULL);
@@ -730,7 +729,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
730 729
731 /* Constants, strings, and namestrings are all the same size */ 730 /* Constants, strings, and namestrings are all the same size */
732 731
733 arg = acpi_ps_alloc_op(AML_BYTE_OP); 732 arg = acpi_ps_alloc_op(AML_BYTE_OP, parser_state->aml);
734 if (!arg) { 733 if (!arg) {
735 return_ACPI_STATUS(AE_NO_MEMORY); 734 return_ACPI_STATUS(AE_NO_MEMORY);
736 } 735 }
@@ -777,7 +776,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
777 776
778 /* Non-empty list */ 777 /* Non-empty list */
779 778
780 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); 779 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP,
780 parser_state->aml);
781 if (!arg) { 781 if (!arg) {
782 return_ACPI_STATUS(AE_NO_MEMORY); 782 return_ACPI_STATUS(AE_NO_MEMORY);
783 } 783 }
@@ -807,7 +807,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
807 807
808 /* null_name or name_string */ 808 /* null_name or name_string */
809 809
810 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); 810 arg =
811 acpi_ps_alloc_op(AML_INT_NAMEPATH_OP,
812 parser_state->aml);
811 if (!arg) { 813 if (!arg) {
812 return_ACPI_STATUS(AE_NO_MEMORY); 814 return_ACPI_STATUS(AE_NO_MEMORY);
813 } 815 }
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 90437227d790..03ac8c9a67ab 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -51,6 +51,7 @@
51 51
52#include <acpi/acpi.h> 52#include <acpi/acpi.h>
53#include "accommon.h" 53#include "accommon.h"
54#include "acinterp.h"
54#include "acparser.h" 55#include "acparser.h"
55#include "acdispat.h" 56#include "acdispat.h"
56#include "amlcode.h" 57#include "amlcode.h"
@@ -125,10 +126,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
125 */ 126 */
126 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) 127 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
127 && !walk_state->arg_count) { 128 && !walk_state->arg_count) {
128 walk_state->aml_offset = 129 walk_state->aml = walk_state->parser_state.aml;
129 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
130 walk_state->parser_state.
131 aml_start);
132 130
133 status = 131 status =
134 acpi_ps_get_next_arg(walk_state, 132 acpi_ps_get_next_arg(walk_state,
@@ -140,7 +138,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
140 } 138 }
141 139
142 if (arg) { 140 if (arg) {
143 arg->common.aml_offset = walk_state->aml_offset;
144 acpi_ps_append_arg(op, arg); 141 acpi_ps_append_arg(op, arg);
145 } 142 }
146 143
@@ -324,6 +321,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
324 union acpi_operand_object *method_obj; 321 union acpi_operand_object *method_obj;
325 struct acpi_namespace_node *parent_node; 322 struct acpi_namespace_node *parent_node;
326 323
324 ACPI_FUNCTION_TRACE(ps_link_module_code);
325
327 /* Get the tail of the list */ 326 /* Get the tail of the list */
328 327
329 prev = next = acpi_gbl_module_code_list; 328 prev = next = acpi_gbl_module_code_list;
@@ -343,9 +342,13 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
343 342
344 method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); 343 method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
345 if (!method_obj) { 344 if (!method_obj) {
346 return; 345 return_VOID;
347 } 346 }
348 347
348 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
349 "Create/Link new code block: %p\n",
350 method_obj));
351
349 if (parent_op->common.node) { 352 if (parent_op->common.node) {
350 parent_node = parent_op->common.node; 353 parent_node = parent_op->common.node;
351 } else { 354 } else {
@@ -370,8 +373,14 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
370 prev->method.mutex = method_obj; 373 prev->method.mutex = method_obj;
371 } 374 }
372 } else { 375 } else {
376 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
377 "Appending to existing code block: %p\n",
378 prev));
379
373 prev->method.aml_length += aml_length; 380 prev->method.aml_length += aml_length;
374 } 381 }
382
383 return_VOID;
375} 384}
376 385
377/******************************************************************************* 386/*******************************************************************************
@@ -494,16 +503,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
494 continue; 503 continue;
495 } 504 }
496 505
497 op->common.aml_offset = walk_state->aml_offset; 506 acpi_ex_start_trace_opcode(op, walk_state);
498
499 if (walk_state->op_info) {
500 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
501 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
502 (u32) op->common.aml_opcode,
503 walk_state->op_info->name, op,
504 parser_state->aml,
505 op->common.aml_offset));
506 }
507 } 507 }
508 508
509 /* 509 /*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 2f5ddd806c58..e54bc2aa7a88 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -66,12 +66,11 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
66 66
67static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) 67static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
68{ 68{
69 u32 aml_offset;
69 70
70 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state); 71 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
71 72
72 walk_state->aml_offset = 73 walk_state->aml = walk_state->parser_state.aml;
73 (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
74 walk_state->parser_state.aml_start);
75 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state)); 74 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
76 75
77 /* 76 /*
@@ -98,10 +97,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
98 /* The opcode is unrecognized. Complain and skip unknown opcodes */ 97 /* The opcode is unrecognized. Complain and skip unknown opcodes */
99 98
100 if (walk_state->pass_number == 2) { 99 if (walk_state->pass_number == 2) {
100 aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
101 walk_state->
102 parser_state.aml_start);
103
101 ACPI_ERROR((AE_INFO, 104 ACPI_ERROR((AE_INFO,
102 "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring", 105 "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
103 walk_state->opcode, 106 walk_state->opcode,
104 (u32)(walk_state->aml_offset + 107 (u32)(aml_offset +
105 sizeof(struct acpi_table_header)))); 108 sizeof(struct acpi_table_header))));
106 109
107 ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16), 110 ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
@@ -115,14 +118,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
115 acpi_os_printf 118 acpi_os_printf
116 ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n", 119 ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
117 walk_state->opcode, 120 walk_state->opcode,
118 (u32)(walk_state->aml_offset + 121 (u32)(aml_offset +
119 sizeof(struct acpi_table_header))); 122 sizeof(struct acpi_table_header)));
120 123
121 /* Dump the context surrounding the invalid opcode */ 124 /* Dump the context surrounding the invalid opcode */
122 125
123 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. 126 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
124 aml - 16), 48, DB_BYTE_DISPLAY, 127 aml - 16), 48, DB_BYTE_DISPLAY,
125 (walk_state->aml_offset + 128 (aml_offset +
126 sizeof(struct acpi_table_header) - 129 sizeof(struct acpi_table_header) -
127 16)); 130 16));
128 acpi_os_printf(" */\n"); 131 acpi_os_printf(" */\n");
@@ -294,7 +297,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
294 /* Create Op structure and append to parent's argument list */ 297 /* Create Op structure and append to parent's argument list */
295 298
296 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode); 299 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
297 op = acpi_ps_alloc_op(walk_state->opcode); 300 op = acpi_ps_alloc_op(walk_state->opcode, aml_op_start);
298 if (!op) { 301 if (!op) {
299 return_ACPI_STATUS(AE_NO_MEMORY); 302 return_ACPI_STATUS(AE_NO_MEMORY);
300 } 303 }
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index a555f7f7b9a2..98001d7f6f80 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -147,6 +147,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
147 return_ACPI_STATUS(AE_OK); /* OK for now */ 147 return_ACPI_STATUS(AE_OK); /* OK for now */
148 } 148 }
149 149
150 acpi_ex_stop_trace_opcode(op, walk_state);
151
150 /* Delete this op and the subtree below it if asked to */ 152 /* Delete this op and the subtree below it if asked to */
151 153
152 if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) != 154 if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
@@ -185,7 +187,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
185 * op must be replaced by a placeholder return op 187 * op must be replaced by a placeholder return op
186 */ 188 */
187 replacement_op = 189 replacement_op =
188 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 190 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
191 op->common.aml);
189 if (!replacement_op) { 192 if (!replacement_op) {
190 status = AE_NO_MEMORY; 193 status = AE_NO_MEMORY;
191 } 194 }
@@ -209,7 +212,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
209 || (op->common.parent->common.aml_opcode == 212 || (op->common.parent->common.aml_opcode ==
210 AML_VAR_PACKAGE_OP)) { 213 AML_VAR_PACKAGE_OP)) {
211 replacement_op = 214 replacement_op =
212 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 215 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
216 op->common.aml);
213 if (!replacement_op) { 217 if (!replacement_op) {
214 status = AE_NO_MEMORY; 218 status = AE_NO_MEMORY;
215 } 219 }
@@ -224,7 +228,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
224 AML_VAR_PACKAGE_OP)) { 228 AML_VAR_PACKAGE_OP)) {
225 replacement_op = 229 replacement_op =
226 acpi_ps_alloc_op(op->common. 230 acpi_ps_alloc_op(op->common.
227 aml_opcode); 231 aml_opcode,
232 op->common.aml);
228 if (!replacement_op) { 233 if (!replacement_op) {
229 status = AE_NO_MEMORY; 234 status = AE_NO_MEMORY;
230 } else { 235 } else {
@@ -240,7 +245,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
240 default: 245 default:
241 246
242 replacement_op = 247 replacement_op =
243 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 248 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
249 op->common.aml);
244 if (!replacement_op) { 250 if (!replacement_op) {
245 status = AE_NO_MEMORY; 251 status = AE_NO_MEMORY;
246 } 252 }
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 32440912023a..183cc1efbc51 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("psutils")
60 * DESCRIPTION: Create a Scope and associated namepath op with the root name 60 * DESCRIPTION: Create a Scope and associated namepath op with the root name
61 * 61 *
62 ******************************************************************************/ 62 ******************************************************************************/
63union acpi_parse_object *acpi_ps_create_scope_op(void) 63union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml)
64{ 64{
65 union acpi_parse_object *scope_op; 65 union acpi_parse_object *scope_op;
66 66
67 scope_op = acpi_ps_alloc_op(AML_SCOPE_OP); 67 scope_op = acpi_ps_alloc_op(AML_SCOPE_OP, aml);
68 if (!scope_op) { 68 if (!scope_op) {
69 return (NULL); 69 return (NULL);
70 } 70 }
@@ -103,6 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
103 * FUNCTION: acpi_ps_alloc_op 103 * FUNCTION: acpi_ps_alloc_op
104 * 104 *
105 * PARAMETERS: opcode - Opcode that will be stored in the new Op 105 * PARAMETERS: opcode - Opcode that will be stored in the new Op
106 * aml - Address of the opcode
106 * 107 *
107 * RETURN: Pointer to the new Op, null on failure 108 * RETURN: Pointer to the new Op, null on failure
108 * 109 *
@@ -112,7 +113,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
112 * 113 *
113 ******************************************************************************/ 114 ******************************************************************************/
114 115
115union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) 116union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
116{ 117{
117 union acpi_parse_object *op; 118 union acpi_parse_object *op;
118 const struct acpi_opcode_info *op_info; 119 const struct acpi_opcode_info *op_info;
@@ -149,6 +150,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
149 150
150 if (op) { 151 if (op) {
151 acpi_ps_init_op(op, opcode); 152 acpi_ps_init_op(op, opcode);
153 op->common.aml = aml;
152 op->common.flags = flags; 154 op->common.flags = flags;
153 } 155 }
154 156
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 841a5ea06094..4254805dd319 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -47,15 +47,12 @@
47#include "acdispat.h" 47#include "acdispat.h"
48#include "acinterp.h" 48#include "acinterp.h"
49#include "actables.h" 49#include "actables.h"
50#include "acnamesp.h"
50 51
51#define _COMPONENT ACPI_PARSER 52#define _COMPONENT ACPI_PARSER
52ACPI_MODULE_NAME("psxface") 53ACPI_MODULE_NAME("psxface")
53 54
54/* Local Prototypes */ 55/* Local Prototypes */
55static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
56
57static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
58
59static void 56static void
60acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); 57acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
61 58
@@ -76,7 +73,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
76 ******************************************************************************/ 73 ******************************************************************************/
77 74
78acpi_status 75acpi_status
79acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags) 76acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
80{ 77{
81 acpi_status status; 78 acpi_status status;
82 79
@@ -85,108 +82,14 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
85 return (status); 82 return (status);
86 } 83 }
87 84
88 /* TBDs: Validate name, allow full path or just nameseg */ 85 acpi_gbl_trace_method_name = name;
89
90 acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
91 acpi_gbl_trace_flags = flags; 86 acpi_gbl_trace_flags = flags;
92 87 acpi_gbl_trace_dbg_level = debug_level;
93 if (debug_level) { 88 acpi_gbl_trace_dbg_layer = debug_layer;
94 acpi_gbl_trace_dbg_level = debug_level; 89 status = AE_OK;
95 }
96 if (debug_layer) {
97 acpi_gbl_trace_dbg_layer = debug_layer;
98 }
99 90
100 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 91 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
101 return (AE_OK); 92 return (status);
102}
103
104/*******************************************************************************
105 *
106 * FUNCTION: acpi_ps_start_trace
107 *
108 * PARAMETERS: info - Method info struct
109 *
110 * RETURN: None
111 *
112 * DESCRIPTION: Start control method execution trace
113 *
114 ******************************************************************************/
115
116static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
117{
118 acpi_status status;
119
120 ACPI_FUNCTION_ENTRY();
121
122 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
123 if (ACPI_FAILURE(status)) {
124 return;
125 }
126
127 if ((!acpi_gbl_trace_method_name) ||
128 (acpi_gbl_trace_method_name != info->node->name.integer)) {
129 goto exit;
130 }
131
132 acpi_gbl_original_dbg_level = acpi_dbg_level;
133 acpi_gbl_original_dbg_layer = acpi_dbg_layer;
134
135 acpi_dbg_level = 0x00FFFFFF;
136 acpi_dbg_layer = ACPI_UINT32_MAX;
137
138 if (acpi_gbl_trace_dbg_level) {
139 acpi_dbg_level = acpi_gbl_trace_dbg_level;
140 }
141 if (acpi_gbl_trace_dbg_layer) {
142 acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
143 }
144
145exit:
146 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
147}
148
149/*******************************************************************************
150 *
151 * FUNCTION: acpi_ps_stop_trace
152 *
153 * PARAMETERS: info - Method info struct
154 *
155 * RETURN: None
156 *
157 * DESCRIPTION: Stop control method execution trace
158 *
159 ******************************************************************************/
160
161static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
162{
163 acpi_status status;
164
165 ACPI_FUNCTION_ENTRY();
166
167 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
168 if (ACPI_FAILURE(status)) {
169 return;
170 }
171
172 if ((!acpi_gbl_trace_method_name) ||
173 (acpi_gbl_trace_method_name != info->node->name.integer)) {
174 goto exit;
175 }
176
177 /* Disable further tracing if type is one-shot */
178
179 if (acpi_gbl_trace_flags & 1) {
180 acpi_gbl_trace_method_name = 0;
181 acpi_gbl_trace_dbg_level = 0;
182 acpi_gbl_trace_dbg_layer = 0;
183 }
184
185 acpi_dbg_level = acpi_gbl_original_dbg_level;
186 acpi_dbg_layer = acpi_gbl_original_dbg_layer;
187
188exit:
189 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
190} 93}
191 94
192/******************************************************************************* 95/*******************************************************************************
@@ -212,7 +115,7 @@ exit:
212 * 115 *
213 ******************************************************************************/ 116 ******************************************************************************/
214 117
215acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) 118acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
216{ 119{
217 acpi_status status; 120 acpi_status status;
218 union acpi_parse_object *op; 121 union acpi_parse_object *op;
@@ -243,10 +146,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
243 */ 146 */
244 acpi_ps_update_parameter_list(info, REF_INCREMENT); 147 acpi_ps_update_parameter_list(info, REF_INCREMENT);
245 148
246 /* Begin tracing if requested */
247
248 acpi_ps_start_trace(info);
249
250 /* 149 /*
251 * Execute the method. Performs parse simultaneously 150 * Execute the method. Performs parse simultaneously
252 */ 151 */
@@ -256,7 +155,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
256 155
257 /* Create and init a Root Node */ 156 /* Create and init a Root Node */
258 157
259 op = acpi_ps_create_scope_op(); 158 op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start);
260 if (!op) { 159 if (!op) {
261 status = AE_NO_MEMORY; 160 status = AE_NO_MEMORY;
262 goto cleanup; 161 goto cleanup;
@@ -326,10 +225,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
326cleanup: 225cleanup:
327 acpi_ps_delete_parse_tree(op); 226 acpi_ps_delete_parse_tree(op);
328 227
329 /* End optional tracing */
330
331 acpi_ps_stop_trace(info);
332
333 /* Take away the extra reference that we gave the parameters above */ 228 /* Take away the extra reference that we gave the parameters above */
334 229
335 acpi_ps_update_parameter_list(info, REF_DECREMENT); 230 acpi_ps_update_parameter_list(info, REF_DECREMENT);
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 3fa829e96c2a..a5344428f3ae 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -348,7 +348,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
348 status = 348 status =
349 acpi_ns_handle_to_pathname((acpi_handle) 349 acpi_ns_handle_to_pathname((acpi_handle)
350 node, 350 node,
351 &path_buffer); 351 &path_buffer,
352 FALSE);
352 353
353 /* +1 to include null terminator */ 354 /* +1 to include null terminator */
354 355
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6253001b6375..455a0700db39 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -345,7 +345,7 @@ void acpi_tb_parse_fadt(u32 table_index)
345 /* Obtain the DSDT and FACS tables via their addresses within the FADT */ 345 /* Obtain the DSDT and FACS tables via their addresses within the FADT */
346 346
347 acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, 347 acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
348 ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); 348 ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
349 349
350 /* If Hardware Reduced flag is set, there is no FACS */ 350 /* If Hardware Reduced flag is set, there is no FACS */
351 351
@@ -354,13 +354,13 @@ void acpi_tb_parse_fadt(u32 table_index)
354 acpi_tb_install_fixed_table((acpi_physical_address) 354 acpi_tb_install_fixed_table((acpi_physical_address)
355 acpi_gbl_FADT.facs, 355 acpi_gbl_FADT.facs,
356 ACPI_SIG_FACS, 356 ACPI_SIG_FACS,
357 ACPI_TABLE_INDEX_FACS); 357 &acpi_gbl_facs_index);
358 } 358 }
359 if (acpi_gbl_FADT.Xfacs) { 359 if (acpi_gbl_FADT.Xfacs) {
360 acpi_tb_install_fixed_table((acpi_physical_address) 360 acpi_tb_install_fixed_table((acpi_physical_address)
361 acpi_gbl_FADT.Xfacs, 361 acpi_gbl_FADT.Xfacs,
362 ACPI_SIG_FACS, 362 ACPI_SIG_FACS,
363 ACPI_TABLE_INDEX_X_FACS); 363 &acpi_gbl_xfacs_index);
364 } 364 }
365 } 365 }
366} 366}
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 119c84ad9833..405529d49a1a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -68,12 +68,25 @@ acpi_status
68acpi_tb_find_table(char *signature, 68acpi_tb_find_table(char *signature,
69 char *oem_id, char *oem_table_id, u32 *table_index) 69 char *oem_id, char *oem_table_id, u32 *table_index)
70{ 70{
71 u32 i;
72 acpi_status status; 71 acpi_status status;
73 struct acpi_table_header header; 72 struct acpi_table_header header;
73 u32 i;
74 74
75 ACPI_FUNCTION_TRACE(tb_find_table); 75 ACPI_FUNCTION_TRACE(tb_find_table);
76 76
77 /* Validate the input table signature */
78
79 if (!acpi_is_valid_signature(signature)) {
80 return_ACPI_STATUS(AE_BAD_SIGNATURE);
81 }
82
83 /* Don't allow the OEM strings to be too long */
84
85 if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) ||
86 (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) {
87 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
88 }
89
77 /* Normalize the input strings */ 90 /* Normalize the input strings */
78 91
79 memset(&header, 0, sizeof(struct acpi_table_header)); 92 memset(&header, 0, sizeof(struct acpi_table_header));
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 15ea98e0068d..6319b42420c6 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -100,9 +100,9 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
100 * 100 *
101 * FUNCTION: acpi_tb_install_table_with_override 101 * FUNCTION: acpi_tb_install_table_with_override
102 * 102 *
103 * PARAMETERS: table_index - Index into root table array 103 * PARAMETERS: new_table_desc - New table descriptor to install
104 * new_table_desc - New table descriptor to install
105 * override - Whether override should be performed 104 * override - Whether override should be performed
105 * table_index - Where the table index is returned
106 * 106 *
107 * RETURN: None 107 * RETURN: None
108 * 108 *
@@ -114,12 +114,14 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
114 ******************************************************************************/ 114 ******************************************************************************/
115 115
116void 116void
117acpi_tb_install_table_with_override(u32 table_index, 117acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
118 struct acpi_table_desc *new_table_desc, 118 u8 override, u32 *table_index)
119 u8 override)
120{ 119{
120 u32 i;
121 acpi_status status;
121 122
122 if (table_index >= acpi_gbl_root_table_list.current_table_count) { 123 status = acpi_tb_get_next_table_descriptor(&i, NULL);
124 if (ACPI_FAILURE(status)) {
123 return; 125 return;
124 } 126 }
125 127
@@ -134,8 +136,7 @@ acpi_tb_install_table_with_override(u32 table_index,
134 acpi_tb_override_table(new_table_desc); 136 acpi_tb_override_table(new_table_desc);
135 } 137 }
136 138
137 acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. 139 acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i],
138 tables[table_index],
139 new_table_desc->address, 140 new_table_desc->address,
140 new_table_desc->flags, 141 new_table_desc->flags,
141 new_table_desc->pointer); 142 new_table_desc->pointer);
@@ -143,9 +144,13 @@ acpi_tb_install_table_with_override(u32 table_index,
143 acpi_tb_print_table_header(new_table_desc->address, 144 acpi_tb_print_table_header(new_table_desc->address,
144 new_table_desc->pointer); 145 new_table_desc->pointer);
145 146
147 /* This synchronizes acpi_gbl_dsdt_index */
148
149 *table_index = i;
150
146 /* Set the global integer width (based upon revision of the DSDT) */ 151 /* Set the global integer width (based upon revision of the DSDT) */
147 152
148 if (table_index == ACPI_TABLE_INDEX_DSDT) { 153 if (i == acpi_gbl_dsdt_index) {
149 acpi_ut_set_integer_width(new_table_desc->pointer->revision); 154 acpi_ut_set_integer_width(new_table_desc->pointer->revision);
150 } 155 }
151} 156}
@@ -157,7 +162,7 @@ acpi_tb_install_table_with_override(u32 table_index,
157 * PARAMETERS: address - Physical address of DSDT or FACS 162 * PARAMETERS: address - Physical address of DSDT or FACS
158 * signature - Table signature, NULL if no need to 163 * signature - Table signature, NULL if no need to
159 * match 164 * match
160 * table_index - Index into root table array 165 * table_index - Where the table index is returned
161 * 166 *
162 * RETURN: Status 167 * RETURN: Status
163 * 168 *
@@ -168,7 +173,7 @@ acpi_tb_install_table_with_override(u32 table_index,
168 173
169acpi_status 174acpi_status
170acpi_tb_install_fixed_table(acpi_physical_address address, 175acpi_tb_install_fixed_table(acpi_physical_address address,
171 char *signature, u32 table_index) 176 char *signature, u32 *table_index)
172{ 177{
173 struct acpi_table_desc new_table_desc; 178 struct acpi_table_desc new_table_desc;
174 acpi_status status; 179 acpi_status status;
@@ -200,7 +205,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
200 goto release_and_exit; 205 goto release_and_exit;
201 } 206 }
202 207
203 acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE); 208 /* Add the table to the global root table list */
209
210 acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index);
204 211
205release_and_exit: 212release_and_exit:
206 213
@@ -355,13 +362,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
355 362
356 /* Add the table to the global root table list */ 363 /* Add the table to the global root table list */
357 364
358 status = acpi_tb_get_next_table_descriptor(&i, NULL); 365 acpi_tb_install_table_with_override(&new_table_desc, override,
359 if (ACPI_FAILURE(status)) { 366 table_index);
360 goto release_and_exit;
361 }
362
363 *table_index = i;
364 acpi_tb_install_table_with_override(i, &new_table_desc, override);
365 367
366release_and_exit: 368release_and_exit:
367 369
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 568ac0e4a3c6..4337990127cc 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -68,28 +68,27 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
68 68
69acpi_status acpi_tb_initialize_facs(void) 69acpi_status acpi_tb_initialize_facs(void)
70{ 70{
71 struct acpi_table_facs *facs;
71 72
72 /* If Hardware Reduced flag is set, there is no FACS */ 73 /* If Hardware Reduced flag is set, there is no FACS */
73 74
74 if (acpi_gbl_reduced_hardware) { 75 if (acpi_gbl_reduced_hardware) {
75 acpi_gbl_FACS = NULL; 76 acpi_gbl_FACS = NULL;
76 return (AE_OK); 77 return (AE_OK);
77 } 78 } else if (acpi_gbl_FADT.Xfacs &&
78 79 (!acpi_gbl_FADT.facs
79 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 80 || !acpi_gbl_use32_bit_facs_addresses)) {
80 ACPI_CAST_INDIRECT_PTR(struct 81 (void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
81 acpi_table_header, 82 ACPI_CAST_INDIRECT_PTR(struct
82 &acpi_gbl_facs32)); 83 acpi_table_header,
83 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS, 84 &facs));
84 ACPI_CAST_INDIRECT_PTR(struct 85 acpi_gbl_FACS = facs;
85 acpi_table_header, 86 } else if (acpi_gbl_FADT.facs) {
86 &acpi_gbl_facs64)); 87 (void)acpi_get_table_by_index(acpi_gbl_facs_index,
87 88 ACPI_CAST_INDIRECT_PTR(struct
88 if (acpi_gbl_facs64 89 acpi_table_header,
89 && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) { 90 &facs));
90 acpi_gbl_FACS = acpi_gbl_facs64; 91 acpi_gbl_FACS = facs;
91 } else if (acpi_gbl_facs32) {
92 acpi_gbl_FACS = acpi_gbl_facs32;
93 } 92 }
94 93
95 /* If there is no FACS, just continue. There was already an error msg */ 94 /* If there is no FACS, just continue. There was already an error msg */
@@ -192,7 +191,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
192 acpi_tb_uninstall_table(table_desc); 191 acpi_tb_uninstall_table(table_desc);
193 192
194 acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. 193 acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
195 tables[ACPI_TABLE_INDEX_DSDT], 194 tables[acpi_gbl_dsdt_index],
196 ACPI_PTR_TO_PHYSADDR(new_table), 195 ACPI_PTR_TO_PHYSADDR(new_table),
197 ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, 196 ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
198 new_table); 197 new_table);
@@ -369,13 +368,6 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
369 table_entry_size); 368 table_entry_size);
370 table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header)); 369 table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
371 370
372 /*
373 * First three entries in the table array are reserved for the DSDT
374 * and 32bit/64bit FACS, which are not actually present in the
375 * RSDT/XSDT - they come from the FADT
376 */
377 acpi_gbl_root_table_list.current_table_count = 3;
378
379 /* Initialize the root table array from the RSDT/XSDT */ 371 /* Initialize the root table array from the RSDT/XSDT */
380 372
381 for (i = 0; i < table_count; i++) { 373 for (i = 0; i < table_count; i++) {
@@ -412,3 +404,36 @@ next_table:
412 404
413 return_ACPI_STATUS(AE_OK); 405 return_ACPI_STATUS(AE_OK);
414} 406}
407
408/*******************************************************************************
409 *
410 * FUNCTION: acpi_is_valid_signature
411 *
412 * PARAMETERS: signature - Sig string to be validated
413 *
414 * RETURN: TRUE if signature is correct length and has valid characters
415 *
416 * DESCRIPTION: Validate an ACPI table signature.
417 *
418 ******************************************************************************/
419
420u8 acpi_is_valid_signature(char *signature)
421{
422 u32 i;
423
424 /* Validate the signature length */
425
426 if (strlen(signature) != ACPI_NAME_SIZE) {
427 return (FALSE);
428 }
429
430 /* Validate each character in the signature */
431
432 for (i = 0; i < ACPI_NAME_SIZE; i++) {
433 if (!acpi_ut_valid_acpi_char(signature[i], i)) {
434 return (FALSE);
435 }
436 }
437
438 return (TRUE);
439}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 9682d40ca6ff..55ee14ca9418 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -51,9 +51,6 @@
51#define _COMPONENT ACPI_TABLES 51#define _COMPONENT ACPI_TABLES
52ACPI_MODULE_NAME("tbxfload") 52ACPI_MODULE_NAME("tbxfload")
53 53
54/* Local prototypes */
55static acpi_status acpi_tb_load_namespace(void);
56
57/******************************************************************************* 54/*******************************************************************************
58 * 55 *
59 * FUNCTION: acpi_load_tables 56 * FUNCTION: acpi_load_tables
@@ -65,7 +62,6 @@ static acpi_status acpi_tb_load_namespace(void);
65 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT 62 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
66 * 63 *
67 ******************************************************************************/ 64 ******************************************************************************/
68
69acpi_status __init acpi_load_tables(void) 65acpi_status __init acpi_load_tables(void)
70{ 66{
71 acpi_status status; 67 acpi_status status;
@@ -75,6 +71,13 @@ acpi_status __init acpi_load_tables(void)
75 /* Load the namespace from the tables */ 71 /* Load the namespace from the tables */
76 72
77 status = acpi_tb_load_namespace(); 73 status = acpi_tb_load_namespace();
74
75 /* Don't let single failures abort the load */
76
77 if (status == AE_CTRL_TERMINATE) {
78 status = AE_OK;
79 }
80
78 if (ACPI_FAILURE(status)) { 81 if (ACPI_FAILURE(status)) {
79 ACPI_EXCEPTION((AE_INFO, status, 82 ACPI_EXCEPTION((AE_INFO, status,
80 "While loading namespace from ACPI tables")); 83 "While loading namespace from ACPI tables"));
@@ -97,11 +100,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
97 * the RSDT/XSDT. 100 * the RSDT/XSDT.
98 * 101 *
99 ******************************************************************************/ 102 ******************************************************************************/
100static acpi_status acpi_tb_load_namespace(void) 103acpi_status acpi_tb_load_namespace(void)
101{ 104{
102 acpi_status status; 105 acpi_status status;
103 u32 i; 106 u32 i;
104 struct acpi_table_header *new_dsdt; 107 struct acpi_table_header *new_dsdt;
108 struct acpi_table_desc *table;
109 u32 tables_loaded = 0;
110 u32 tables_failed = 0;
105 111
106 ACPI_FUNCTION_TRACE(tb_load_namespace); 112 ACPI_FUNCTION_TRACE(tb_load_namespace);
107 113
@@ -111,15 +117,11 @@ static acpi_status acpi_tb_load_namespace(void)
111 * Load the namespace. The DSDT is required, but any SSDT and 117 * Load the namespace. The DSDT is required, but any SSDT and
112 * PSDT tables are optional. Verify the DSDT. 118 * PSDT tables are optional. Verify the DSDT.
113 */ 119 */
120 table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
121
114 if (!acpi_gbl_root_table_list.current_table_count || 122 if (!acpi_gbl_root_table_list.current_table_count ||
115 !ACPI_COMPARE_NAME(& 123 !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
116 (acpi_gbl_root_table_list. 124 ACPI_FAILURE(acpi_tb_validate_table(table))) {
117 tables[ACPI_TABLE_INDEX_DSDT].signature),
118 ACPI_SIG_DSDT)
119 ||
120 ACPI_FAILURE(acpi_tb_validate_table
121 (&acpi_gbl_root_table_list.
122 tables[ACPI_TABLE_INDEX_DSDT]))) {
123 status = AE_NO_ACPI_TABLES; 125 status = AE_NO_ACPI_TABLES;
124 goto unlock_and_exit; 126 goto unlock_and_exit;
125 } 127 }
@@ -130,8 +132,7 @@ static acpi_status acpi_tb_load_namespace(void)
130 * array can change dynamically as tables are loaded at run-time. Note: 132 * array can change dynamically as tables are loaded at run-time. Note:
131 * .Pointer field is not validated until after call to acpi_tb_validate_table. 133 * .Pointer field is not validated until after call to acpi_tb_validate_table.
132 */ 134 */
133 acpi_gbl_DSDT = 135 acpi_gbl_DSDT = table->pointer;
134 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
135 136
136 /* 137 /*
137 * Optionally copy the entire DSDT to local memory (instead of simply 138 * Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,7 +141,7 @@ static acpi_status acpi_tb_load_namespace(void)
140 * the DSDT. 141 * the DSDT.
141 */ 142 */
142 if (acpi_gbl_copy_dsdt_locally) { 143 if (acpi_gbl_copy_dsdt_locally) {
143 new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT); 144 new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index);
144 if (new_dsdt) { 145 if (new_dsdt) {
145 acpi_gbl_DSDT = new_dsdt; 146 acpi_gbl_DSDT = new_dsdt;
146 } 147 }
@@ -157,41 +158,65 @@ static acpi_status acpi_tb_load_namespace(void)
157 158
158 /* Load and parse tables */ 159 /* Load and parse tables */
159 160
160 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node); 161 status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node);
161 if (ACPI_FAILURE(status)) { 162 if (ACPI_FAILURE(status)) {
162 return_ACPI_STATUS(status); 163 ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed"));
164 tables_failed++;
165 } else {
166 tables_loaded++;
163 } 167 }
164 168
165 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ 169 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
166 170
167 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 171 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
168 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { 172 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
173 table = &acpi_gbl_root_table_list.tables[i];
174
169 if (!acpi_gbl_root_table_list.tables[i].address || 175 if (!acpi_gbl_root_table_list.tables[i].address ||
170 (!ACPI_COMPARE_NAME 176 (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
171 (&(acpi_gbl_root_table_list.tables[i].signature), 177 && !ACPI_COMPARE_NAME(table->signature.ascii,
172 ACPI_SIG_SSDT) 178 ACPI_SIG_PSDT)
173 && 179 && !ACPI_COMPARE_NAME(table->signature.ascii,
174 !ACPI_COMPARE_NAME(& 180 ACPI_SIG_OSDT))
175 (acpi_gbl_root_table_list.tables[i]. 181 || ACPI_FAILURE(acpi_tb_validate_table(table))) {
176 signature), ACPI_SIG_PSDT)
177 &&
178 !ACPI_COMPARE_NAME(&
179 (acpi_gbl_root_table_list.tables[i].
180 signature), ACPI_SIG_OSDT))
181 ||
182 ACPI_FAILURE(acpi_tb_validate_table
183 (&acpi_gbl_root_table_list.tables[i]))) {
184 continue; 182 continue;
185 } 183 }
186 184
187 /* Ignore errors while loading tables, get as many as possible */ 185 /* Ignore errors while loading tables, get as many as possible */
188 186
189 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 187 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
190 (void)acpi_ns_load_table(i, acpi_gbl_root_node); 188 status = acpi_ns_load_table(i, acpi_gbl_root_node);
189 if (ACPI_FAILURE(status)) {
190 ACPI_EXCEPTION((AE_INFO, status,
191 "(%4.4s:%8.8s) while loading table",
192 table->signature.ascii,
193 table->pointer->oem_table_id));
194 tables_failed++;
195
196 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
197 "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n",
198 table->signature.ascii,
199 table->pointer->oem_table_id));
200 } else {
201 tables_loaded++;
202 }
203
191 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 204 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
192 } 205 }
193 206
194 ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired")); 207 if (!tables_failed) {
208 ACPI_INFO((AE_INFO,
209 "%u ACPI AML tables successfully acquired and loaded",
210 tables_loaded));
211 } else {
212 ACPI_ERROR((AE_INFO,
213 "%u table load failures, %u successful",
214 tables_failed, tables_loaded));
215
216 /* Indicate at least one failure */
217
218 status = AE_CTRL_TERMINATE;
219 }
195 220
196unlock_and_exit: 221unlock_and_exit:
197 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 222 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index cd02693841db..4146229103c8 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -45,6 +45,7 @@
45 45
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include "accommon.h" 47#include "accommon.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utdebug") 51ACPI_MODULE_NAME("utdebug")
@@ -560,8 +561,37 @@ acpi_ut_ptr_exit(u32 line_number,
560 } 561 }
561} 562}
562 563
564/*******************************************************************************
565 *
566 * FUNCTION: acpi_trace_point
567 *
568 * PARAMETERS: type - Trace event type
569 * begin - TRUE if before execution
570 * aml - Executed AML address
571 * pathname - Object path
572 * pointer - Pointer to the related object
573 *
574 * RETURN: None
575 *
576 * DESCRIPTION: Interpreter execution trace.
577 *
578 ******************************************************************************/
579
580void
581acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)
582{
583
584 ACPI_FUNCTION_ENTRY();
585
586 acpi_ex_trace_point(type, begin, aml, pathname);
587
588#ifdef ACPI_USE_SYSTEM_TRACER
589 acpi_os_trace_point(type, begin, aml, pathname);
563#endif 590#endif
591}
564 592
593ACPI_EXPORT_SYMBOL(acpi_trace_point)
594#endif
565#ifdef ACPI_APPLICATION 595#ifdef ACPI_APPLICATION
566/******************************************************************************* 596/*******************************************************************************
567 * 597 *
@@ -575,7 +605,6 @@ acpi_ut_ptr_exit(u32 line_number,
575 * DESCRIPTION: Print error message to the console, used by applications. 605 * DESCRIPTION: Print error message to the console, used by applications.
576 * 606 *
577 ******************************************************************************/ 607 ******************************************************************************/
578
579void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...) 608void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
580{ 609{
581 va_list args; 610 va_list args;
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 71fce389fd48..1638312e3d8f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -209,6 +209,9 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
209 acpi_ut_delete_object_desc(object->method.mutex); 209 acpi_ut_delete_object_desc(object->method.mutex);
210 object->method.mutex = NULL; 210 object->method.mutex = NULL;
211 } 211 }
212 if (object->method.node) {
213 object->method.node = NULL;
214 }
212 break; 215 break;
213 216
214 case ACPI_TYPE_REGION: 217 case ACPI_TYPE_REGION:
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 857af824337b..75a94f52b4be 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -312,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
312 /* Get the entire file */ 312 /* Get the entire file */
313 313
314 fprintf(stderr, 314 fprintf(stderr,
315 "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n", 315 "Reading ACPI table from file %12s - Length %.8u (0x%06X)\n",
316 filename, file_size, file_size); 316 filename, file_size, file_size);
317 317
318 status = acpi_ut_read_table(file, table, &table_length); 318 status = acpi_ut_read_table(file, table, &table_length);
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index e402e07b4846..28ab3a1d5ec1 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -204,11 +204,10 @@ acpi_status acpi_ut_init_globals(void)
204 acpi_gbl_acpi_hardware_present = TRUE; 204 acpi_gbl_acpi_hardware_present = TRUE;
205 acpi_gbl_last_owner_id_index = 0; 205 acpi_gbl_last_owner_id_index = 0;
206 acpi_gbl_next_owner_id_offset = 0; 206 acpi_gbl_next_owner_id_offset = 0;
207 acpi_gbl_trace_dbg_level = 0;
208 acpi_gbl_trace_dbg_layer = 0;
209 acpi_gbl_debugger_configuration = DEBUGGER_THREADING; 207 acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
210 acpi_gbl_osi_mutex = NULL; 208 acpi_gbl_osi_mutex = NULL;
211 acpi_gbl_reg_methods_executed = FALSE; 209 acpi_gbl_reg_methods_executed = FALSE;
210 acpi_gbl_max_loop_iterations = 0xFFFF;
212 211
213 /* Hardware oriented */ 212 /* Hardware oriented */
214 213
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 71b66537f826..bd4443bdcbad 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -75,7 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
75 return (FALSE); 75 return (FALSE);
76} 76}
77 77
78#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP) 78#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
79/******************************************************************************* 79/*******************************************************************************
80 * 80 *
81 * FUNCTION: acpi_ut_is_aml_table 81 * FUNCTION: acpi_ut_is_aml_table
@@ -376,7 +376,7 @@ acpi_ut_display_init_pathname(u8 type,
376 /* Get the full pathname to the node */ 376 /* Get the full pathname to the node */
377 377
378 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 378 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
379 status = acpi_ns_handle_to_pathname(obj_handle, &buffer); 379 status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
380 if (ACPI_FAILURE(status)) { 380 if (ACPI_FAILURE(status)) {
381 return; 381 return;
382 } 382 }
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
new file mode 100644
index 000000000000..1d5f6b17b766
--- /dev/null
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -0,0 +1,380 @@
1/*******************************************************************************
2 *
3 * Module Name: utnonansi - Non-ansi C library functions
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46
47#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utnonansi")
49
50/*
51 * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
52 * version of strtoul.
53 */
54/*******************************************************************************
55 *
56 * FUNCTION: acpi_ut_strlwr (strlwr)
57 *
58 * PARAMETERS: src_string - The source string to convert
59 *
60 * RETURN: None
61 *
62 * DESCRIPTION: Convert a string to lowercase
63 *
64 ******************************************************************************/
65void acpi_ut_strlwr(char *src_string)
66{
67 char *string;
68
69 ACPI_FUNCTION_ENTRY();
70
71 if (!src_string) {
72 return;
73 }
74
75 /* Walk entire string, lowercasing the letters */
76
77 for (string = src_string; *string; string++) {
78 *string = (char)tolower((int)*string);
79 }
80}
81
82/*******************************************************************************
83 *
84 * FUNCTION: acpi_ut_strupr (strupr)
85 *
86 * PARAMETERS: src_string - The source string to convert
87 *
88 * RETURN: None
89 *
90 * DESCRIPTION: Convert a string to uppercase
91 *
92 ******************************************************************************/
93
94void acpi_ut_strupr(char *src_string)
95{
96 char *string;
97
98 ACPI_FUNCTION_ENTRY();
99
100 if (!src_string) {
101 return;
102 }
103
104 /* Walk entire string, uppercasing the letters */
105
106 for (string = src_string; *string; string++) {
107 *string = (char)toupper((int)*string);
108 }
109}
110
111/******************************************************************************
112 *
113 * FUNCTION: acpi_ut_stricmp (stricmp)
114 *
115 * PARAMETERS: string1 - first string to compare
116 * string2 - second string to compare
117 *
118 * RETURN: int that signifies string relationship. Zero means strings
119 * are equal.
120 *
121 * DESCRIPTION: Case-insensitive string compare. Implementation of the
122 * non-ANSI stricmp function.
123 *
124 ******************************************************************************/
125
126int acpi_ut_stricmp(char *string1, char *string2)
127{
128 int c1;
129 int c2;
130
131 do {
132 c1 = tolower((int)*string1);
133 c2 = tolower((int)*string2);
134
135 string1++;
136 string2++;
137 }
138 while ((c1 == c2) && (c1));
139
140 return (c1 - c2);
141}
142
143/*******************************************************************************
144 *
145 * FUNCTION: acpi_ut_strtoul64
146 *
147 * PARAMETERS: string - Null terminated string
148 * base - Radix of the string: 16 or ACPI_ANY_BASE;
149 * ACPI_ANY_BASE means 'in behalf of to_integer'
150 * ret_integer - Where the converted integer is returned
151 *
152 * RETURN: Status and Converted value
153 *
154 * DESCRIPTION: Convert a string into an unsigned value. Performs either a
155 * 32-bit or 64-bit conversion, depending on the current mode
156 * of the interpreter.
157 *
158 * NOTE: Does not support Octal strings, not needed.
159 *
160 ******************************************************************************/
161
162acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
163{
164 u32 this_digit = 0;
165 u64 return_value = 0;
166 u64 quotient;
167 u64 dividend;
168 u32 to_integer_op = (base == ACPI_ANY_BASE);
169 u32 mode32 = (acpi_gbl_integer_byte_width == 4);
170 u8 valid_digits = 0;
171 u8 sign_of0x = 0;
172 u8 term = 0;
173
174 ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
175
176 switch (base) {
177 case ACPI_ANY_BASE:
178 case 16:
179
180 break;
181
182 default:
183
184 /* Invalid Base */
185
186 return_ACPI_STATUS(AE_BAD_PARAMETER);
187 }
188
189 if (!string) {
190 goto error_exit;
191 }
192
193 /* Skip over any white space in the buffer */
194
195 while ((*string) && (isspace((int)*string) || *string == '\t')) {
196 string++;
197 }
198
199 if (to_integer_op) {
200 /*
201 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
202 * We need to determine if it is decimal or hexadecimal.
203 */
204 if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
205 sign_of0x = 1;
206 base = 16;
207
208 /* Skip over the leading '0x' */
209 string += 2;
210 } else {
211 base = 10;
212 }
213 }
214
215 /* Any string left? Check that '0x' is not followed by white space. */
216
217 if (!(*string) || isspace((int)*string) || *string == '\t') {
218 if (to_integer_op) {
219 goto error_exit;
220 } else {
221 goto all_done;
222 }
223 }
224
225 /*
226 * Perform a 32-bit or 64-bit conversion, depending upon the current
227 * execution mode of the interpreter
228 */
229 dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
230
231 /* Main loop: convert the string to a 32- or 64-bit integer */
232
233 while (*string) {
234 if (isdigit((int)*string)) {
235
236 /* Convert ASCII 0-9 to Decimal value */
237
238 this_digit = ((u8)*string) - '0';
239 } else if (base == 10) {
240
241 /* Digit is out of range; possible in to_integer case only */
242
243 term = 1;
244 } else {
245 this_digit = (u8)toupper((int)*string);
246 if (isxdigit((int)this_digit)) {
247
248 /* Convert ASCII Hex char to value */
249
250 this_digit = this_digit - 'A' + 10;
251 } else {
252 term = 1;
253 }
254 }
255
256 if (term) {
257 if (to_integer_op) {
258 goto error_exit;
259 } else {
260 break;
261 }
262 } else if ((valid_digits == 0) && (this_digit == 0)
263 && !sign_of0x) {
264
265 /* Skip zeros */
266 string++;
267 continue;
268 }
269
270 valid_digits++;
271
272 if (sign_of0x
273 && ((valid_digits > 16)
274 || ((valid_digits > 8) && mode32))) {
275 /*
276 * This is to_integer operation case.
277 * No any restrictions for string-to-integer conversion,
278 * see ACPI spec.
279 */
280 goto error_exit;
281 }
282
283 /* Divide the digit into the correct position */
284
285 (void)acpi_ut_short_divide((dividend - (u64)this_digit),
286 base, &quotient, NULL);
287
288 if (return_value > quotient) {
289 if (to_integer_op) {
290 goto error_exit;
291 } else {
292 break;
293 }
294 }
295
296 return_value *= base;
297 return_value += this_digit;
298 string++;
299 }
300
301 /* All done, normal exit */
302
303all_done:
304
305 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
306 ACPI_FORMAT_UINT64(return_value)));
307
308 *ret_integer = return_value;
309 return_ACPI_STATUS(AE_OK);
310
311error_exit:
312 /* Base was set/validated above */
313
314 if (base == 10) {
315 return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
316 } else {
317 return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
318 }
319}
320
321#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
322/*******************************************************************************
323 *
324 * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
325 *
326 * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
327 * functions. This is the size of the Destination buffer.
328 *
329 * RETURN: TRUE if the operation would overflow the destination buffer.
330 *
331 * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
332 * the result of the operation will not overflow the output string
333 * buffer.
334 *
335 * NOTE: These functions are typically only helpful for processing
336 * user input and command lines. For most ACPICA code, the
337 * required buffer length is precisely calculated before buffer
338 * allocation, so the use of these functions is unnecessary.
339 *
340 ******************************************************************************/
341
342u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
343{
344
345 if (strlen(source) >= dest_size) {
346 return (TRUE);
347 }
348
349 strcpy(dest, source);
350 return (FALSE);
351}
352
353u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
354{
355
356 if ((strlen(dest) + strlen(source)) >= dest_size) {
357 return (TRUE);
358 }
359
360 strcat(dest, source);
361 return (FALSE);
362}
363
364u8
365acpi_ut_safe_strncat(char *dest,
366 acpi_size dest_size,
367 char *source, acpi_size max_transfer_length)
368{
369 acpi_size actual_transfer_length;
370
371 actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
372
373 if ((strlen(dest) + actual_transfer_length) >= dest_size) {
374 return (TRUE);
375 }
376
377 strncat(dest, source, max_transfer_length);
378 return (FALSE);
379}
380#endif
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 8f3c883dfe0e..4ddd105d9741 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -48,286 +48,6 @@
48#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utstring") 49ACPI_MODULE_NAME("utstring")
50 50
51/*
52 * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
53 * version of strtoul.
54 */
55#ifdef ACPI_ASL_COMPILER
56/*******************************************************************************
57 *
58 * FUNCTION: acpi_ut_strlwr (strlwr)
59 *
60 * PARAMETERS: src_string - The source string to convert
61 *
62 * RETURN: None
63 *
64 * DESCRIPTION: Convert string to lowercase
65 *
66 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
67 *
68 ******************************************************************************/
69void acpi_ut_strlwr(char *src_string)
70{
71 char *string;
72
73 ACPI_FUNCTION_ENTRY();
74
75 if (!src_string) {
76 return;
77 }
78
79 /* Walk entire string, lowercasing the letters */
80
81 for (string = src_string; *string; string++) {
82 *string = (char)tolower((int)*string);
83 }
84
85 return;
86}
87
88/******************************************************************************
89 *
90 * FUNCTION: acpi_ut_stricmp (stricmp)
91 *
92 * PARAMETERS: string1 - first string to compare
93 * string2 - second string to compare
94 *
95 * RETURN: int that signifies string relationship. Zero means strings
96 * are equal.
97 *
98 * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
99 * strings with no case sensitivity)
100 *
101 ******************************************************************************/
102
103int acpi_ut_stricmp(char *string1, char *string2)
104{
105 int c1;
106 int c2;
107
108 do {
109 c1 = tolower((int)*string1);
110 c2 = tolower((int)*string2);
111
112 string1++;
113 string2++;
114 }
115 while ((c1 == c2) && (c1));
116
117 return (c1 - c2);
118}
119#endif
120
121/*******************************************************************************
122 *
123 * FUNCTION: acpi_ut_strupr (strupr)
124 *
125 * PARAMETERS: src_string - The source string to convert
126 *
127 * RETURN: None
128 *
129 * DESCRIPTION: Convert string to uppercase
130 *
131 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
132 *
133 ******************************************************************************/
134
135void acpi_ut_strupr(char *src_string)
136{
137 char *string;
138
139 ACPI_FUNCTION_ENTRY();
140
141 if (!src_string) {
142 return;
143 }
144
145 /* Walk entire string, uppercasing the letters */
146
147 for (string = src_string; *string; string++) {
148 *string = (char)toupper((int)*string);
149 }
150
151 return;
152}
153
154/*******************************************************************************
155 *
156 * FUNCTION: acpi_ut_strtoul64
157 *
158 * PARAMETERS: string - Null terminated string
159 * base - Radix of the string: 16 or ACPI_ANY_BASE;
160 * ACPI_ANY_BASE means 'in behalf of to_integer'
161 * ret_integer - Where the converted integer is returned
162 *
163 * RETURN: Status and Converted value
164 *
165 * DESCRIPTION: Convert a string into an unsigned value. Performs either a
166 * 32-bit or 64-bit conversion, depending on the current mode
167 * of the interpreter.
168 * NOTE: Does not support Octal strings, not needed.
169 *
170 ******************************************************************************/
171
172acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
173{
174 u32 this_digit = 0;
175 u64 return_value = 0;
176 u64 quotient;
177 u64 dividend;
178 u32 to_integer_op = (base == ACPI_ANY_BASE);
179 u32 mode32 = (acpi_gbl_integer_byte_width == 4);
180 u8 valid_digits = 0;
181 u8 sign_of0x = 0;
182 u8 term = 0;
183
184 ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
185
186 switch (base) {
187 case ACPI_ANY_BASE:
188 case 16:
189
190 break;
191
192 default:
193
194 /* Invalid Base */
195
196 return_ACPI_STATUS(AE_BAD_PARAMETER);
197 }
198
199 if (!string) {
200 goto error_exit;
201 }
202
203 /* Skip over any white space in the buffer */
204
205 while ((*string) && (isspace((int)*string) || *string == '\t')) {
206 string++;
207 }
208
209 if (to_integer_op) {
210 /*
211 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
212 * We need to determine if it is decimal or hexadecimal.
213 */
214 if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
215 sign_of0x = 1;
216 base = 16;
217
218 /* Skip over the leading '0x' */
219 string += 2;
220 } else {
221 base = 10;
222 }
223 }
224
225 /* Any string left? Check that '0x' is not followed by white space. */
226
227 if (!(*string) || isspace((int)*string) || *string == '\t') {
228 if (to_integer_op) {
229 goto error_exit;
230 } else {
231 goto all_done;
232 }
233 }
234
235 /*
236 * Perform a 32-bit or 64-bit conversion, depending upon the current
237 * execution mode of the interpreter
238 */
239 dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
240
241 /* Main loop: convert the string to a 32- or 64-bit integer */
242
243 while (*string) {
244 if (isdigit((int)*string)) {
245
246 /* Convert ASCII 0-9 to Decimal value */
247
248 this_digit = ((u8)*string) - '0';
249 } else if (base == 10) {
250
251 /* Digit is out of range; possible in to_integer case only */
252
253 term = 1;
254 } else {
255 this_digit = (u8)toupper((int)*string);
256 if (isxdigit((int)this_digit)) {
257
258 /* Convert ASCII Hex char to value */
259
260 this_digit = this_digit - 'A' + 10;
261 } else {
262 term = 1;
263 }
264 }
265
266 if (term) {
267 if (to_integer_op) {
268 goto error_exit;
269 } else {
270 break;
271 }
272 } else if ((valid_digits == 0) && (this_digit == 0)
273 && !sign_of0x) {
274
275 /* Skip zeros */
276 string++;
277 continue;
278 }
279
280 valid_digits++;
281
282 if (sign_of0x
283 && ((valid_digits > 16)
284 || ((valid_digits > 8) && mode32))) {
285 /*
286 * This is to_integer operation case.
287 * No any restrictions for string-to-integer conversion,
288 * see ACPI spec.
289 */
290 goto error_exit;
291 }
292
293 /* Divide the digit into the correct position */
294
295 (void)acpi_ut_short_divide((dividend - (u64)this_digit),
296 base, &quotient, NULL);
297
298 if (return_value > quotient) {
299 if (to_integer_op) {
300 goto error_exit;
301 } else {
302 break;
303 }
304 }
305
306 return_value *= base;
307 return_value += this_digit;
308 string++;
309 }
310
311 /* All done, normal exit */
312
313all_done:
314
315 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
316 ACPI_FORMAT_UINT64(return_value)));
317
318 *ret_integer = return_value;
319 return_ACPI_STATUS(AE_OK);
320
321error_exit:
322 /* Base was set/validated above */
323
324 if (base == 10) {
325 return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
326 } else {
327 return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
328 }
329}
330
331/******************************************************************************* 51/*******************************************************************************
332 * 52 *
333 * FUNCTION: acpi_ut_print_string 53 * FUNCTION: acpi_ut_print_string
@@ -342,7 +62,6 @@ error_exit:
342 * sequences. 62 * sequences.
343 * 63 *
344 ******************************************************************************/ 64 ******************************************************************************/
345
346void acpi_ut_print_string(char *string, u16 max_length) 65void acpi_ut_print_string(char *string, u16 max_length)
347{ 66{
348 u32 i; 67 u32 i;
@@ -584,64 +303,3 @@ void ut_convert_backslashes(char *pathname)
584 } 303 }
585} 304}
586#endif 305#endif
587
588#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
589/*******************************************************************************
590 *
591 * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
592 *
593 * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
594 * functions. This is the size of the Destination buffer.
595 *
596 * RETURN: TRUE if the operation would overflow the destination buffer.
597 *
598 * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
599 * the result of the operation will not overflow the output string
600 * buffer.
601 *
602 * NOTE: These functions are typically only helpful for processing
603 * user input and command lines. For most ACPICA code, the
604 * required buffer length is precisely calculated before buffer
605 * allocation, so the use of these functions is unnecessary.
606 *
607 ******************************************************************************/
608
609u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
610{
611
612 if (strlen(source) >= dest_size) {
613 return (TRUE);
614 }
615
616 strcpy(dest, source);
617 return (FALSE);
618}
619
620u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
621{
622
623 if ((strlen(dest) + strlen(source)) >= dest_size) {
624 return (TRUE);
625 }
626
627 strcat(dest, source);
628 return (FALSE);
629}
630
631u8
632acpi_ut_safe_strncat(char *dest,
633 acpi_size dest_size,
634 char *source, acpi_size max_transfer_length)
635{
636 acpi_size actual_transfer_length;
637
638 actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
639
640 if ((strlen(dest) + actual_transfer_length) >= dest_size) {
641 return (TRUE);
642 }
643
644 strncat(dest, source, max_transfer_length);
645 return (FALSE);
646}
647#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 51cf52d52243..4f332815db00 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -92,13 +92,6 @@ acpi_status __init acpi_terminate(void)
92 92
93 acpi_ut_mutex_terminate(); 93 acpi_ut_mutex_terminate();
94 94
95#ifdef ACPI_DEBUGGER
96
97 /* Shut down the debugger */
98
99 acpi_db_terminate();
100#endif
101
102 /* Now we can shutdown the OS-dependent layer */ 95 /* Now we can shutdown the OS-dependent layer */
103 96
104 status = acpi_os_terminate(); 97 status = acpi_os_terminate();
@@ -517,7 +510,8 @@ acpi_decode_pld_buffer(u8 *in_buffer,
517 510
518 /* Parameter validation */ 511 /* Parameter validation */
519 512
520 if (!in_buffer || !return_buffer || (length < 16)) { 513 if (!in_buffer || !return_buffer
514 || (length < ACPI_PLD_REV1_BUFFER_SIZE)) {
521 return (AE_BAD_PARAMETER); 515 return (AE_BAD_PARAMETER);
522 } 516 }
523 517
@@ -567,7 +561,7 @@ acpi_decode_pld_buffer(u8 *in_buffer,
567 pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword); 561 pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
568 pld_info->order = ACPI_PLD_GET_ORDER(&dword); 562 pld_info->order = ACPI_PLD_GET_ORDER(&dword);
569 563
570 if (length >= ACPI_PLD_BUFFER_SIZE) { 564 if (length >= ACPI_PLD_REV2_BUFFER_SIZE) {
571 565
572 /* Fifth 32-bit DWord (Revision 2 of _PLD) */ 566 /* Fifth 32-bit DWord (Revision 2 of _PLD) */
573 567
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 42a32a66ef22..a7137ec28447 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -124,17 +124,6 @@ acpi_status __init acpi_initialize_subsystem(void)
124 return_ACPI_STATUS(status); 124 return_ACPI_STATUS(status);
125 } 125 }
126 126
127 /* If configured, initialize the AML debugger */
128
129#ifdef ACPI_DEBUGGER
130 status = acpi_db_initialize();
131 if (ACPI_FAILURE(status)) {
132 ACPI_EXCEPTION((AE_INFO, status,
133 "During Debugger initialization"));
134 return_ACPI_STATUS(status);
135 }
136#endif
137
138 return_ACPI_STATUS(AE_OK); 127 return_ACPI_STATUS(AE_OK);
139} 128}
140 129
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index a85ac07f3da3..a2c8d7adb6eb 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -24,10 +24,6 @@
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details. 26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 */ 27 */
32 28
33#include <linux/kernel.h> 29#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index a095d4f858da..0431883653be 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 21 */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 04ab5c9d3ced..6330f557a2c8 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -17,10 +17,6 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */ 20 */
25 21
26#include <linux/kernel.h> 22#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 3670bbab57a3..6682c5daf742 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 21 */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 2bfd53cbfe80..23981ac1c6c2 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -23,10 +23,6 @@
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details. 25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 06e9b411a0a2..20b3fcf4007c 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -21,10 +21,6 @@
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details. 23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */ 24 */
29 25
30#include <linux/kernel.h> 26#include <linux/kernel.h>
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b3628cc01a53..b719ab3090bb 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -18,10 +18,6 @@
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details. 19 * General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */ 22 */
27 23
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 278dc4be992a..96809cd99ace 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -20,10 +20,6 @@
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details. 21 * General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 *
27 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 */ 24 */
29 25
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 513e7230e3d0..46506e7687cd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
@@ -423,6 +419,406 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
423 acpi_evaluate_ost(handle, type, ost_code, NULL); 419 acpi_evaluate_ost(handle, type, ost_code, NULL);
424} 420}
425 421
422static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
423{
424 struct acpi_device *device = data;
425
426 device->driver->ops.notify(device, event);
427}
428
429static void acpi_device_notify_fixed(void *data)
430{
431 struct acpi_device *device = data;
432
433 /* Fixed hardware devices have no handles */
434 acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
435}
436
437static u32 acpi_device_fixed_event(void *data)
438{
439 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
440 return ACPI_INTERRUPT_HANDLED;
441}
442
443static int acpi_device_install_notify_handler(struct acpi_device *device)
444{
445 acpi_status status;
446
447 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
448 status =
449 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
450 acpi_device_fixed_event,
451 device);
452 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
453 status =
454 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
455 acpi_device_fixed_event,
456 device);
457 else
458 status = acpi_install_notify_handler(device->handle,
459 ACPI_DEVICE_NOTIFY,
460 acpi_device_notify,
461 device);
462
463 if (ACPI_FAILURE(status))
464 return -EINVAL;
465 return 0;
466}
467
468static void acpi_device_remove_notify_handler(struct acpi_device *device)
469{
470 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
471 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
472 acpi_device_fixed_event);
473 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
474 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
475 acpi_device_fixed_event);
476 else
477 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
478 acpi_device_notify);
479}
480
481/* --------------------------------------------------------------------------
482 Device Matching
483 -------------------------------------------------------------------------- */
484
485static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
486 const struct device *dev)
487{
488 struct mutex *physical_node_lock = &adev->physical_node_lock;
489
490 mutex_lock(physical_node_lock);
491 if (list_empty(&adev->physical_node_list)) {
492 adev = NULL;
493 } else {
494 const struct acpi_device_physical_node *node;
495
496 node = list_first_entry(&adev->physical_node_list,
497 struct acpi_device_physical_node, node);
498 if (node->dev != dev)
499 adev = NULL;
500 }
501 mutex_unlock(physical_node_lock);
502 return adev;
503}
504
505/**
506 * acpi_device_is_first_physical_node - Is given dev first physical node
507 * @adev: ACPI companion device
508 * @dev: Physical device to check
509 *
510 * Function checks if given @dev is the first physical devices attached to
511 * the ACPI companion device. This distinction is needed in some cases
512 * where the same companion device is shared between many physical devices.
513 *
514 * Note that the caller have to provide valid @adev pointer.
515 */
516bool acpi_device_is_first_physical_node(struct acpi_device *adev,
517 const struct device *dev)
518{
519 return !!acpi_primary_dev_companion(adev, dev);
520}
521
522/*
523 * acpi_companion_match() - Can we match via ACPI companion device
524 * @dev: Device in question
525 *
526 * Check if the given device has an ACPI companion and if that companion has
527 * a valid list of PNP IDs, and if the device is the first (primary) physical
528 * device associated with it. Return the companion pointer if that's the case
529 * or NULL otherwise.
530 *
531 * If multiple physical devices are attached to a single ACPI companion, we need
532 * to be careful. The usage scenario for this kind of relationship is that all
533 * of the physical devices in question use resources provided by the ACPI
534 * companion. A typical case is an MFD device where all the sub-devices share
535 * the parent's ACPI companion. In such cases we can only allow the primary
536 * (first) physical device to be matched with the help of the companion's PNP
537 * IDs.
538 *
539 * Additional physical devices sharing the ACPI companion can still use
540 * resources available from it but they will be matched normally using functions
541 * provided by their bus types (and analogously for their modalias).
542 */
543struct acpi_device *acpi_companion_match(const struct device *dev)
544{
545 struct acpi_device *adev;
546
547 adev = ACPI_COMPANION(dev);
548 if (!adev)
549 return NULL;
550
551 if (list_empty(&adev->pnp.ids))
552 return NULL;
553
554 return acpi_primary_dev_companion(adev, dev);
555}
556
557/**
558 * acpi_of_match_device - Match device object using the "compatible" property.
559 * @adev: ACPI device object to match.
560 * @of_match_table: List of device IDs to match against.
561 *
562 * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
563 * identifiers and a _DSD object with the "compatible" property, use that
564 * property to match against the given list of identifiers.
565 */
566static bool acpi_of_match_device(struct acpi_device *adev,
567 const struct of_device_id *of_match_table)
568{
569 const union acpi_object *of_compatible, *obj;
570 int i, nval;
571
572 if (!adev)
573 return false;
574
575 of_compatible = adev->data.of_compatible;
576 if (!of_match_table || !of_compatible)
577 return false;
578
579 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
580 nval = of_compatible->package.count;
581 obj = of_compatible->package.elements;
582 } else { /* Must be ACPI_TYPE_STRING. */
583 nval = 1;
584 obj = of_compatible;
585 }
586 /* Now we can look for the driver DT compatible strings */
587 for (i = 0; i < nval; i++, obj++) {
588 const struct of_device_id *id;
589
590 for (id = of_match_table; id->compatible[0]; id++)
591 if (!strcasecmp(obj->string.pointer, id->compatible))
592 return true;
593 }
594
595 return false;
596}
597
598static bool __acpi_match_device_cls(const struct acpi_device_id *id,
599 struct acpi_hardware_id *hwid)
600{
601 int i, msk, byte_shift;
602 char buf[3];
603
604 if (!id->cls)
605 return false;
606
607 /* Apply class-code bitmask, before checking each class-code byte */
608 for (i = 1; i <= 3; i++) {
609 byte_shift = 8 * (3 - i);
610 msk = (id->cls_msk >> byte_shift) & 0xFF;
611 if (!msk)
612 continue;
613
614 sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
615 if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
616 return false;
617 }
618 return true;
619}
620
621static const struct acpi_device_id *__acpi_match_device(
622 struct acpi_device *device,
623 const struct acpi_device_id *ids,
624 const struct of_device_id *of_ids)
625{
626 const struct acpi_device_id *id;
627 struct acpi_hardware_id *hwid;
628
629 /*
630 * If the device is not present, it is unnecessary to load device
631 * driver for it.
632 */
633 if (!device || !device->status.present)
634 return NULL;
635
636 list_for_each_entry(hwid, &device->pnp.ids, list) {
637 /* First, check the ACPI/PNP IDs provided by the caller. */
638 for (id = ids; id->id[0] || id->cls; id++) {
639 if (id->id[0] && !strcmp((char *) id->id, hwid->id))
640 return id;
641 else if (id->cls && __acpi_match_device_cls(id, hwid))
642 return id;
643 }
644
645 /*
646 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
647 * "compatible" property if found.
648 *
649 * The id returned by the below is not valid, but the only
650 * caller passing non-NULL of_ids here is only interested in
651 * whether or not the return value is NULL.
652 */
653 if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
654 && acpi_of_match_device(device, of_ids))
655 return id;
656 }
657 return NULL;
658}
659
660/**
661 * acpi_match_device - Match a struct device against a given list of ACPI IDs
662 * @ids: Array of struct acpi_device_id object to match against.
663 * @dev: The device structure to match.
664 *
665 * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
666 * object for that handle and use that object to match against a given list of
667 * device IDs.
668 *
669 * Return a pointer to the first matching ID on success or %NULL on failure.
670 */
671const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
672 const struct device *dev)
673{
674 return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
675}
676EXPORT_SYMBOL_GPL(acpi_match_device);
677
678int acpi_match_device_ids(struct acpi_device *device,
679 const struct acpi_device_id *ids)
680{
681 return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
682}
683EXPORT_SYMBOL(acpi_match_device_ids);
684
685bool acpi_driver_match_device(struct device *dev,
686 const struct device_driver *drv)
687{
688 if (!drv->acpi_match_table)
689 return acpi_of_match_device(ACPI_COMPANION(dev),
690 drv->of_match_table);
691
692 return !!__acpi_match_device(acpi_companion_match(dev),
693 drv->acpi_match_table, drv->of_match_table);
694}
695EXPORT_SYMBOL_GPL(acpi_driver_match_device);
696
697/* --------------------------------------------------------------------------
698 ACPI Driver Management
699 -------------------------------------------------------------------------- */
700
701/**
702 * acpi_bus_register_driver - register a driver with the ACPI bus
703 * @driver: driver being registered
704 *
705 * Registers a driver with the ACPI bus. Searches the namespace for all
706 * devices that match the driver's criteria and binds. Returns zero for
707 * success or a negative error status for failure.
708 */
709int acpi_bus_register_driver(struct acpi_driver *driver)
710{
711 int ret;
712
713 if (acpi_disabled)
714 return -ENODEV;
715 driver->drv.name = driver->name;
716 driver->drv.bus = &acpi_bus_type;
717 driver->drv.owner = driver->owner;
718
719 ret = driver_register(&driver->drv);
720 return ret;
721}
722
723EXPORT_SYMBOL(acpi_bus_register_driver);
724
725/**
726 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
727 * @driver: driver to unregister
728 *
729 * Unregisters a driver with the ACPI bus. Searches the namespace for all
730 * devices that match the driver's criteria and unbinds.
731 */
732void acpi_bus_unregister_driver(struct acpi_driver *driver)
733{
734 driver_unregister(&driver->drv);
735}
736
737EXPORT_SYMBOL(acpi_bus_unregister_driver);
738
739/* --------------------------------------------------------------------------
740 ACPI Bus operations
741 -------------------------------------------------------------------------- */
742
743static int acpi_bus_match(struct device *dev, struct device_driver *drv)
744{
745 struct acpi_device *acpi_dev = to_acpi_device(dev);
746 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
747
748 return acpi_dev->flags.match_driver
749 && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
750}
751
752static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
753{
754 return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
755}
756
757static int acpi_device_probe(struct device *dev)
758{
759 struct acpi_device *acpi_dev = to_acpi_device(dev);
760 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
761 int ret;
762
763 if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
764 return -EINVAL;
765
766 if (!acpi_drv->ops.add)
767 return -ENOSYS;
768
769 ret = acpi_drv->ops.add(acpi_dev);
770 if (ret)
771 return ret;
772
773 acpi_dev->driver = acpi_drv;
774 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
775 "Driver [%s] successfully bound to device [%s]\n",
776 acpi_drv->name, acpi_dev->pnp.bus_id));
777
778 if (acpi_drv->ops.notify) {
779 ret = acpi_device_install_notify_handler(acpi_dev);
780 if (ret) {
781 if (acpi_drv->ops.remove)
782 acpi_drv->ops.remove(acpi_dev);
783
784 acpi_dev->driver = NULL;
785 acpi_dev->driver_data = NULL;
786 return ret;
787 }
788 }
789
790 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
791 acpi_drv->name, acpi_dev->pnp.bus_id));
792 get_device(dev);
793 return 0;
794}
795
796static int acpi_device_remove(struct device * dev)
797{
798 struct acpi_device *acpi_dev = to_acpi_device(dev);
799 struct acpi_driver *acpi_drv = acpi_dev->driver;
800
801 if (acpi_drv) {
802 if (acpi_drv->ops.notify)
803 acpi_device_remove_notify_handler(acpi_dev);
804 if (acpi_drv->ops.remove)
805 acpi_drv->ops.remove(acpi_dev);
806 }
807 acpi_dev->driver = NULL;
808 acpi_dev->driver_data = NULL;
809
810 put_device(dev);
811 return 0;
812}
813
814struct bus_type acpi_bus_type = {
815 .name = "acpi",
816 .match = acpi_bus_match,
817 .probe = acpi_device_probe,
818 .remove = acpi_device_remove,
819 .uevent = acpi_device_uevent,
820};
821
426/* -------------------------------------------------------------------------- 822/* --------------------------------------------------------------------------
427 Initialization/Cleanup 823 Initialization/Cleanup
428 -------------------------------------------------------------------------- */ 824 -------------------------------------------------------------------------- */
@@ -661,7 +1057,9 @@ static int __init acpi_bus_init(void)
661 */ 1057 */
662 acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); 1058 acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
663 1059
664 return 0; 1060 result = bus_register(&acpi_bus_type);
1061 if (!result)
1062 return 0;
665 1063
666 /* Mimic structured exception handling */ 1064 /* Mimic structured exception handling */
667 error1: 1065 error1:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5d1832a588..5c3b0918d5fd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 6c9ee68e46fb..d0918d421f90 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -11,10 +11,6 @@
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */ 15 */
20 16
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index c8ead9f97375..12c240903c18 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -20,10 +20,6 @@
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details. 21 * General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 *
27 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 */ 24 */
29#include <linux/acpi.h> 25#include <linux/acpi.h>
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 6b1919f6bd82..68bb305b977f 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -7,6 +7,8 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/acpi.h> 8#include <linux/acpi.h>
9 9
10#include "internal.h"
11
10#define _COMPONENT ACPI_SYSTEM_COMPONENT 12#define _COMPONENT ACPI_SYSTEM_COMPONENT
11ACPI_MODULE_NAME("debugfs"); 13ACPI_MODULE_NAME("debugfs");
12 14
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 88dbbb115285..4806b7f856c4 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
@@ -1123,6 +1119,14 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
1123 if (dev->pm_domain) 1119 if (dev->pm_domain)
1124 return -EEXIST; 1120 return -EEXIST;
1125 1121
1122 /*
1123 * Only attach the power domain to the first device if the
1124 * companion is shared by multiple. This is to prevent doing power
1125 * management twice.
1126 */
1127 if (!acpi_device_is_first_physical_node(adev, dev))
1128 return -EBUSY;
1129
1126 acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func); 1130 acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
1127 dev->pm_domain = &acpi_general_pm_domain; 1131 dev->pm_domain = &acpi_general_pm_domain;
1128 if (power_on) { 1132 if (power_on) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
new file mode 100644
index 000000000000..4ab4582e586b
--- /dev/null
+++ b/drivers/acpi/device_sysfs.c
@@ -0,0 +1,521 @@
1/*
2 * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
3 *
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21
22#include <linux/acpi.h>
23#include <linux/device.h>
24#include <linux/export.h>
25#include <linux/nls.h>
26
27#include "internal.h"
28
29/**
30 * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
31 * @acpi_dev: ACPI device object.
32 * @modalias: Buffer to print into.
33 * @size: Size of the buffer.
34 *
35 * Creates hid/cid(s) string needed for modalias and uevent
36 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
37 * char *modalias: "acpi:IBM0001:ACPI0001"
38 * Return: 0: no _HID and no _CID
39 * -EINVAL: output error
40 * -ENOMEM: output is truncated
41*/
42static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
43 int size)
44{
45 int len;
46 int count;
47 struct acpi_hardware_id *id;
48
49 /*
50 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
51 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
52 * device's list.
53 */
54 count = 0;
55 list_for_each_entry(id, &acpi_dev->pnp.ids, list)
56 if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
57 count++;
58
59 if (!count)
60 return 0;
61
62 len = snprintf(modalias, size, "acpi:");
63 if (len <= 0)
64 return len;
65
66 size -= len;
67
68 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
69 if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
70 continue;
71
72 count = snprintf(&modalias[len], size, "%s:", id->id);
73 if (count < 0)
74 return -EINVAL;
75
76 if (count >= size)
77 return -ENOMEM;
78
79 len += count;
80 size -= count;
81 }
82 modalias[len] = '\0';
83 return len;
84}
85
86/**
87 * create_of_modalias - Creates DT compatible string for modalias and uevent
88 * @acpi_dev: ACPI device object.
89 * @modalias: Buffer to print into.
90 * @size: Size of the buffer.
91 *
92 * Expose DT compatible modalias as of:NnameTCcompatible. This function should
93 * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
94 * ACPI/PNP IDs.
95 */
96static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
97 int size)
98{
99 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
100 const union acpi_object *of_compatible, *obj;
101 int len, count;
102 int i, nval;
103 char *c;
104
105 acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
106 /* DT strings are all in lower case */
107 for (c = buf.pointer; *c != '\0'; c++)
108 *c = tolower(*c);
109
110 len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
111 ACPI_FREE(buf.pointer);
112
113 if (len <= 0)
114 return len;
115
116 of_compatible = acpi_dev->data.of_compatible;
117 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
118 nval = of_compatible->package.count;
119 obj = of_compatible->package.elements;
120 } else { /* Must be ACPI_TYPE_STRING. */
121 nval = 1;
122 obj = of_compatible;
123 }
124 for (i = 0; i < nval; i++, obj++) {
125 count = snprintf(&modalias[len], size, "C%s",
126 obj->string.pointer);
127 if (count < 0)
128 return -EINVAL;
129
130 if (count >= size)
131 return -ENOMEM;
132
133 len += count;
134 size -= count;
135 }
136 modalias[len] = '\0';
137 return len;
138}
139
140int __acpi_device_uevent_modalias(struct acpi_device *adev,
141 struct kobj_uevent_env *env)
142{
143 int len;
144
145 if (!adev)
146 return -ENODEV;
147
148 if (list_empty(&adev->pnp.ids))
149 return 0;
150
151 if (add_uevent_var(env, "MODALIAS="))
152 return -ENOMEM;
153
154 len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
155 sizeof(env->buf) - env->buflen);
156 if (len < 0)
157 return len;
158
159 env->buflen += len;
160 if (!adev->data.of_compatible)
161 return 0;
162
163 if (len > 0 && add_uevent_var(env, "MODALIAS="))
164 return -ENOMEM;
165
166 len = create_of_modalias(adev, &env->buf[env->buflen - 1],
167 sizeof(env->buf) - env->buflen);
168 if (len < 0)
169 return len;
170
171 env->buflen += len;
172
173 return 0;
174}
175
176/**
177 * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
178 *
179 * Create the uevent modalias field for ACPI-enumerated devices.
180 *
181 * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
182 * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
183 */
184int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
185{
186 return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
187}
188EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
189
190static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
191{
192 int len, count;
193
194 if (!adev)
195 return -ENODEV;
196
197 if (list_empty(&adev->pnp.ids))
198 return 0;
199
200 len = create_pnp_modalias(adev, buf, size - 1);
201 if (len < 0) {
202 return len;
203 } else if (len > 0) {
204 buf[len++] = '\n';
205 size -= len;
206 }
207 if (!adev->data.of_compatible)
208 return len;
209
210 count = create_of_modalias(adev, buf + len, size - 1);
211 if (count < 0) {
212 return count;
213 } else if (count > 0) {
214 len += count;
215 buf[len++] = '\n';
216 }
217
218 return len;
219}
220
221/**
222 * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
223 *
224 * Create the modalias sysfs attribute for ACPI-enumerated devices.
225 *
226 * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
227 * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
228 */
229int acpi_device_modalias(struct device *dev, char *buf, int size)
230{
231 return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
232}
233EXPORT_SYMBOL_GPL(acpi_device_modalias);
234
235static ssize_t
236acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
237 return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
238}
239static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
240
241static ssize_t real_power_state_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
243{
244 struct acpi_device *adev = to_acpi_device(dev);
245 int state;
246 int ret;
247
248 ret = acpi_device_get_power(adev, &state);
249 if (ret)
250 return ret;
251
252 return sprintf(buf, "%s\n", acpi_power_state_string(state));
253}
254
255static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
256
257static ssize_t power_state_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 struct acpi_device *adev = to_acpi_device(dev);
261
262 return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
263}
264
265static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
266
267static ssize_t
268acpi_eject_store(struct device *d, struct device_attribute *attr,
269 const char *buf, size_t count)
270{
271 struct acpi_device *acpi_device = to_acpi_device(d);
272 acpi_object_type not_used;
273 acpi_status status;
274
275 if (!count || buf[0] != '1')
276 return -EINVAL;
277
278 if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
279 && !acpi_device->driver)
280 return -ENODEV;
281
282 status = acpi_get_type(acpi_device->handle, &not_used);
283 if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
284 return -ENODEV;
285
286 get_device(&acpi_device->dev);
287 status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
288 if (ACPI_SUCCESS(status))
289 return count;
290
291 put_device(&acpi_device->dev);
292 acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
293 ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
294 return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
295}
296
297static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
298
299static ssize_t
300acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
301 struct acpi_device *acpi_dev = to_acpi_device(dev);
302
303 return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
304}
305static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
306
307static ssize_t acpi_device_uid_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309{
310 struct acpi_device *acpi_dev = to_acpi_device(dev);
311
312 return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
313}
314static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
315
316static ssize_t acpi_device_adr_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct acpi_device *acpi_dev = to_acpi_device(dev);
320
321 return sprintf(buf, "0x%08x\n",
322 (unsigned int)(acpi_dev->pnp.bus_address));
323}
324static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
325
326static ssize_t
327acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
328 struct acpi_device *acpi_dev = to_acpi_device(dev);
329 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
330 int result;
331
332 result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
333 if (result)
334 goto end;
335
336 result = sprintf(buf, "%s\n", (char*)path.pointer);
337 kfree(path.pointer);
338end:
339 return result;
340}
341static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
342
343/* sysfs file that shows description text from the ACPI _STR method */
344static ssize_t description_show(struct device *dev,
345 struct device_attribute *attr,
346 char *buf) {
347 struct acpi_device *acpi_dev = to_acpi_device(dev);
348 int result;
349
350 if (acpi_dev->pnp.str_obj == NULL)
351 return 0;
352
353 /*
354 * The _STR object contains a Unicode identifier for a device.
355 * We need to convert to utf-8 so it can be displayed.
356 */
357 result = utf16s_to_utf8s(
358 (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
359 acpi_dev->pnp.str_obj->buffer.length,
360 UTF16_LITTLE_ENDIAN, buf,
361 PAGE_SIZE);
362
363 buf[result++] = '\n';
364
365 return result;
366}
367static DEVICE_ATTR(description, 0444, description_show, NULL);
368
369static ssize_t
370acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
371 char *buf) {
372 struct acpi_device *acpi_dev = to_acpi_device(dev);
373 acpi_status status;
374 unsigned long long sun;
375
376 status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
377 if (ACPI_FAILURE(status))
378 return -ENODEV;
379
380 return sprintf(buf, "%llu\n", sun);
381}
382static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
383
384static ssize_t status_show(struct device *dev, struct device_attribute *attr,
385 char *buf) {
386 struct acpi_device *acpi_dev = to_acpi_device(dev);
387 acpi_status status;
388 unsigned long long sta;
389
390 status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
391 if (ACPI_FAILURE(status))
392 return -ENODEV;
393
394 return sprintf(buf, "%llu\n", sta);
395}
396static DEVICE_ATTR_RO(status);
397
398/**
399 * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
400 * @dev: ACPI device object.
401 */
402int acpi_device_setup_files(struct acpi_device *dev)
403{
404 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
405 acpi_status status;
406 int result = 0;
407
408 /*
409 * Devices gotten from FADT don't have a "path" attribute
410 */
411 if (dev->handle) {
412 result = device_create_file(&dev->dev, &dev_attr_path);
413 if (result)
414 goto end;
415 }
416
417 if (!list_empty(&dev->pnp.ids)) {
418 result = device_create_file(&dev->dev, &dev_attr_hid);
419 if (result)
420 goto end;
421
422 result = device_create_file(&dev->dev, &dev_attr_modalias);
423 if (result)
424 goto end;
425 }
426
427 /*
428 * If device has _STR, 'description' file is created
429 */
430 if (acpi_has_method(dev->handle, "_STR")) {
431 status = acpi_evaluate_object(dev->handle, "_STR",
432 NULL, &buffer);
433 if (ACPI_FAILURE(status))
434 buffer.pointer = NULL;
435 dev->pnp.str_obj = buffer.pointer;
436 result = device_create_file(&dev->dev, &dev_attr_description);
437 if (result)
438 goto end;
439 }
440
441 if (dev->pnp.type.bus_address)
442 result = device_create_file(&dev->dev, &dev_attr_adr);
443 if (dev->pnp.unique_id)
444 result = device_create_file(&dev->dev, &dev_attr_uid);
445
446 if (acpi_has_method(dev->handle, "_SUN")) {
447 result = device_create_file(&dev->dev, &dev_attr_sun);
448 if (result)
449 goto end;
450 }
451
452 if (acpi_has_method(dev->handle, "_STA")) {
453 result = device_create_file(&dev->dev, &dev_attr_status);
454 if (result)
455 goto end;
456 }
457
458 /*
459 * If device has _EJ0, 'eject' file is created that is used to trigger
460 * hot-removal function from userland.
461 */
462 if (acpi_has_method(dev->handle, "_EJ0")) {
463 result = device_create_file(&dev->dev, &dev_attr_eject);
464 if (result)
465 return result;
466 }
467
468 if (dev->flags.power_manageable) {
469 result = device_create_file(&dev->dev, &dev_attr_power_state);
470 if (result)
471 return result;
472
473 if (dev->power.flags.power_resources)
474 result = device_create_file(&dev->dev,
475 &dev_attr_real_power_state);
476 }
477
478end:
479 return result;
480}
481
482/**
483 * acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
484 * @dev: ACPI device object.
485 */
486void acpi_device_remove_files(struct acpi_device *dev)
487{
488 if (dev->flags.power_manageable) {
489 device_remove_file(&dev->dev, &dev_attr_power_state);
490 if (dev->power.flags.power_resources)
491 device_remove_file(&dev->dev,
492 &dev_attr_real_power_state);
493 }
494
495 /*
496 * If device has _STR, remove 'description' file
497 */
498 if (acpi_has_method(dev->handle, "_STR")) {
499 kfree(dev->pnp.str_obj);
500 device_remove_file(&dev->dev, &dev_attr_description);
501 }
502 /*
503 * If device has _EJ0, remove 'eject' file.
504 */
505 if (acpi_has_method(dev->handle, "_EJ0"))
506 device_remove_file(&dev->dev, &dev_attr_eject);
507
508 if (acpi_has_method(dev->handle, "_SUN"))
509 device_remove_file(&dev->dev, &dev_attr_sun);
510
511 if (dev->pnp.unique_id)
512 device_remove_file(&dev->dev, &dev_attr_uid);
513 if (dev->pnp.type.bus_address)
514 device_remove_file(&dev->dev, &dev_attr_adr);
515 device_remove_file(&dev->dev, &dev_attr_modalias);
516 device_remove_file(&dev->dev, &dev_attr_hid);
517 if (acpi_has_method(dev->handle, "_STA"))
518 device_remove_file(&dev->dev, &dev_attr_status);
519 if (dev->handle)
520 device_remove_file(&dev->dev, &dev_attr_path);
521}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a688aa243f6c..e8e128dede29 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -17,10 +17,6 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details. 18 * General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */ 21 */
26 22
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 9d4761d2f6b7..2614a839c60d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -22,10 +22,6 @@
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details. 23 * General Public License for more details.
24 * 24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 *
29 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
30 */ 26 */
31 27
@@ -165,8 +161,16 @@ struct transaction {
165 u8 flags; 161 u8 flags;
166}; 162};
167 163
164struct acpi_ec_query {
165 struct transaction transaction;
166 struct work_struct work;
167 struct acpi_ec_query_handler *handler;
168};
169
168static int acpi_ec_query(struct acpi_ec *ec, u8 *data); 170static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
169static void advance_transaction(struct acpi_ec *ec); 171static void advance_transaction(struct acpi_ec *ec);
172static void acpi_ec_event_handler(struct work_struct *work);
173static void acpi_ec_event_processor(struct work_struct *work);
170 174
171struct acpi_ec *boot_ec, *first_ec; 175struct acpi_ec *boot_ec, *first_ec;
172EXPORT_SYMBOL(first_ec); 176EXPORT_SYMBOL(first_ec);
@@ -978,60 +982,90 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
978} 982}
979EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 983EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
980 984
981static void acpi_ec_run(void *cxt) 985static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
982{ 986{
983 struct acpi_ec_query_handler *handler = cxt; 987 struct acpi_ec_query *q;
988 struct transaction *t;
989
990 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
991 if (!q)
992 return NULL;
993 INIT_WORK(&q->work, acpi_ec_event_processor);
994 t = &q->transaction;
995 t->command = ACPI_EC_COMMAND_QUERY;
996 t->rdata = pval;
997 t->rlen = 1;
998 return q;
999}
1000
1001static void acpi_ec_delete_query(struct acpi_ec_query *q)
1002{
1003 if (q) {
1004 if (q->handler)
1005 acpi_ec_put_query_handler(q->handler);
1006 kfree(q);
1007 }
1008}
1009
1010static void acpi_ec_event_processor(struct work_struct *work)
1011{
1012 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1013 struct acpi_ec_query_handler *handler = q->handler;
984 1014
985 if (!handler)
986 return;
987 ec_dbg_evt("Query(0x%02x) started", handler->query_bit); 1015 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
988 if (handler->func) 1016 if (handler->func)
989 handler->func(handler->data); 1017 handler->func(handler->data);
990 else if (handler->handle) 1018 else if (handler->handle)
991 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 1019 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
992 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); 1020 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
993 acpi_ec_put_query_handler(handler); 1021 acpi_ec_delete_query(q);
994} 1022}
995 1023
996static int acpi_ec_query(struct acpi_ec *ec, u8 *data) 1024static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
997{ 1025{
998 u8 value = 0; 1026 u8 value = 0;
999 int result; 1027 int result;
1000 acpi_status status;
1001 struct acpi_ec_query_handler *handler; 1028 struct acpi_ec_query_handler *handler;
1002 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 1029 struct acpi_ec_query *q;
1003 .wdata = NULL, .rdata = &value, 1030
1004 .wlen = 0, .rlen = 1}; 1031 q = acpi_ec_create_query(&value);
1032 if (!q)
1033 return -ENOMEM;
1005 1034
1006 /* 1035 /*
1007 * Query the EC to find out which _Qxx method we need to evaluate. 1036 * Query the EC to find out which _Qxx method we need to evaluate.
1008 * Note that successful completion of the query causes the ACPI_EC_SCI 1037 * Note that successful completion of the query causes the ACPI_EC_SCI
1009 * bit to be cleared (and thus clearing the interrupt source). 1038 * bit to be cleared (and thus clearing the interrupt source).
1010 */ 1039 */
1011 result = acpi_ec_transaction(ec, &t); 1040 result = acpi_ec_transaction(ec, &q->transaction);
1012 if (result)
1013 return result;
1014 if (data)
1015 *data = value;
1016 if (!value) 1041 if (!value)
1017 return -ENODATA; 1042 result = -ENODATA;
1043 if (result)
1044 goto err_exit;
1018 1045
1019 mutex_lock(&ec->mutex); 1046 mutex_lock(&ec->mutex);
1020 list_for_each_entry(handler, &ec->list, node) { 1047 list_for_each_entry(handler, &ec->list, node) {
1021 if (value == handler->query_bit) { 1048 if (value == handler->query_bit) {
1022 /* have custom handler for this bit */ 1049 q->handler = acpi_ec_get_query_handler(handler);
1023 handler = acpi_ec_get_query_handler(handler);
1024 ec_dbg_evt("Query(0x%02x) scheduled", 1050 ec_dbg_evt("Query(0x%02x) scheduled",
1025 handler->query_bit); 1051 q->handler->query_bit);
1026 status = acpi_os_execute((handler->func) ? 1052 /*
1027 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 1053 * It is reported that _Qxx are evaluated in a
1028 acpi_ec_run, handler); 1054 * parallel way on Windows:
1029 if (ACPI_FAILURE(status)) 1055 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1056 */
1057 if (!schedule_work(&q->work))
1030 result = -EBUSY; 1058 result = -EBUSY;
1031 break; 1059 break;
1032 } 1060 }
1033 } 1061 }
1034 mutex_unlock(&ec->mutex); 1062 mutex_unlock(&ec->mutex);
1063
1064err_exit:
1065 if (result && q)
1066 acpi_ec_delete_query(q);
1067 if (data)
1068 *data = value;
1035 return result; 1069 return result;
1036} 1070}
1037 1071
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index bea0bbaafa97..e297a480e135 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a322710b5ba4..5c67a6d8f803 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 18 */
23 19
24#include <linux/kernel.h> 20#include <linux/kernel.h>
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4683a96932b9..9e426210c2a8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -13,9 +13,6 @@
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details. 14 * more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */ 16 */
20 17
21#ifndef _ACPI_INTERNAL_H_ 18#ifndef _ACPI_INTERNAL_H_
@@ -70,7 +67,7 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
70 67
71#ifdef CONFIG_DEBUG_FS 68#ifdef CONFIG_DEBUG_FS
72extern struct dentry *acpi_debugfs_dir; 69extern struct dentry *acpi_debugfs_dir;
73int acpi_debugfs_init(void); 70void acpi_debugfs_init(void);
74#else 71#else
75static inline void acpi_debugfs_init(void) { return; } 72static inline void acpi_debugfs_init(void) { return; }
76#endif 73#endif
@@ -93,10 +90,21 @@ int acpi_device_add(struct acpi_device *device,
93 void (*release)(struct device *)); 90 void (*release)(struct device *));
94void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 91void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
95 int type, unsigned long long sta); 92 int type, unsigned long long sta);
93int acpi_device_setup_files(struct acpi_device *dev);
94void acpi_device_remove_files(struct acpi_device *dev);
96void acpi_device_add_finalize(struct acpi_device *device); 95void acpi_device_add_finalize(struct acpi_device *device);
97void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); 96void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
98bool acpi_device_is_present(struct acpi_device *adev); 97bool acpi_device_is_present(struct acpi_device *adev);
99bool acpi_device_is_battery(struct acpi_device *adev); 98bool acpi_device_is_battery(struct acpi_device *adev);
99bool acpi_device_is_first_physical_node(struct acpi_device *adev,
100 const struct device *dev);
101
102/* --------------------------------------------------------------------------
103 Device Matching and Notification
104 -------------------------------------------------------------------------- */
105struct acpi_device *acpi_companion_match(const struct device *dev);
106int __acpi_device_uevent_modalias(struct acpi_device *adev,
107 struct kobj_uevent_env *env);
100 108
101/* -------------------------------------------------------------------------- 109/* --------------------------------------------------------------------------
102 Power Resource 110 Power Resource
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index acaa3b4ea504..72b6e9ef0ae9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 * 19 *
24 */ 20 */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3b8963f21b36..739a4a6b3b9b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -19,10 +19,6 @@
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 * 23 *
28 */ 24 */
@@ -47,6 +43,7 @@
47 43
48#include <asm/io.h> 44#include <asm/io.h>
49#include <asm/uaccess.h> 45#include <asm/uaccess.h>
46#include <asm-generic/io-64-nonatomic-lo-hi.h>
50 47
51#include "internal.h" 48#include "internal.h"
52 49
@@ -83,6 +80,7 @@ static void *acpi_irq_context;
83static struct workqueue_struct *kacpid_wq; 80static struct workqueue_struct *kacpid_wq;
84static struct workqueue_struct *kacpi_notify_wq; 81static struct workqueue_struct *kacpi_notify_wq;
85static struct workqueue_struct *kacpi_hotplug_wq; 82static struct workqueue_struct *kacpi_hotplug_wq;
83static bool acpi_os_initialized;
86 84
87/* 85/*
88 * This list of permanent mappings is for memory that may be accessed from 86 * This list of permanent mappings is for memory that may be accessed from
@@ -947,21 +945,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
947 945
948EXPORT_SYMBOL(acpi_os_write_port); 946EXPORT_SYMBOL(acpi_os_write_port);
949 947
950#ifdef readq
951static inline u64 read64(const volatile void __iomem *addr)
952{
953 return readq(addr);
954}
955#else
956static inline u64 read64(const volatile void __iomem *addr)
957{
958 u64 l, h;
959 l = readl(addr);
960 h = readl(addr+4);
961 return l | (h << 32);
962}
963#endif
964
965acpi_status 948acpi_status
966acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 949acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
967{ 950{
@@ -994,7 +977,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
994 *(u32 *) value = readl(virt_addr); 977 *(u32 *) value = readl(virt_addr);
995 break; 978 break;
996 case 64: 979 case 64:
997 *(u64 *) value = read64(virt_addr); 980 *(u64 *) value = readq(virt_addr);
998 break; 981 break;
999 default: 982 default:
1000 BUG(); 983 BUG();
@@ -1008,19 +991,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
1008 return AE_OK; 991 return AE_OK;
1009} 992}
1010 993
1011#ifdef writeq
1012static inline void write64(u64 val, volatile void __iomem *addr)
1013{
1014 writeq(val, addr);
1015}
1016#else
1017static inline void write64(u64 val, volatile void __iomem *addr)
1018{
1019 writel(val, addr);
1020 writel(val>>32, addr+4);
1021}
1022#endif
1023
1024acpi_status 994acpi_status
1025acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 995acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1026{ 996{
@@ -1049,7 +1019,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1049 writel(value, virt_addr); 1019 writel(value, virt_addr);
1050 break; 1020 break;
1051 case 64: 1021 case 64:
1052 write64(value, virt_addr); 1022 writeq(value, virt_addr);
1053 break; 1023 break;
1054 default: 1024 default:
1055 BUG(); 1025 BUG();
@@ -1316,6 +1286,9 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1316 long jiffies; 1286 long jiffies;
1317 int ret = 0; 1287 int ret = 0;
1318 1288
1289 if (!acpi_os_initialized)
1290 return AE_OK;
1291
1319 if (!sem || (units < 1)) 1292 if (!sem || (units < 1))
1320 return AE_BAD_PARAMETER; 1293 return AE_BAD_PARAMETER;
1321 1294
@@ -1355,6 +1328,9 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1355{ 1328{
1356 struct semaphore *sem = (struct semaphore *)handle; 1329 struct semaphore *sem = (struct semaphore *)handle;
1357 1330
1331 if (!acpi_os_initialized)
1332 return AE_OK;
1333
1358 if (!sem || (units < 1)) 1334 if (!sem || (units < 1))
1359 return AE_BAD_PARAMETER; 1335 return AE_BAD_PARAMETER;
1360 1336
@@ -1863,6 +1839,7 @@ acpi_status __init acpi_os_initialize(void)
1863 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1839 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1864 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1840 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1865 } 1841 }
1842 acpi_os_initialized = true;
1866 1843
1867 return AE_OK; 1844 return AE_OK;
1868} 1845}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index afa16c557c17..6da0f9beab19 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -19,10 +19,6 @@
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details. 20 * General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */ 23 */
28 24
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cfd7581cc19f..3b4ea98e3ea0 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -17,10 +17,6 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details. 18 * General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 * 21 *
26 * TBD: 22 * TBD:
@@ -826,6 +822,22 @@ void acpi_penalize_isa_irq(int irq, int active)
826} 822}
827 823
828/* 824/*
825 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
826 * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
827 * PCI IRQs.
828 */
829void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
830{
831 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
832 if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
833 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
834 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
835 else
836 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
837 }
838}
839
840/*
829 * Over-ride default table to reserve additional IRQs for use by ISA 841 * Over-ride default table to reserve additional IRQs for use by ISA
830 * e.g. acpi_irq_isa=5 842 * e.g. acpi_irq_isa=5
831 * Useful for telling ACPI how not to interfere with your ISA sound card. 843 * Useful for telling ACPI how not to interfere with your ISA sound card.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1b5569c092c6..393706a5261b 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 139d9e479370..7188e53b6b7c 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -20,10 +20,6 @@
20 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details. 22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 93eac53b5110..fcd4ce6f78d5 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -1,8 +1,10 @@
1/* 1/*
2 * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $) 2 * drivers/acpi/power.c - ACPI Power Resources management.
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001 - 2015 Intel Corp.
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Author: Andy Grover <andrew.grover@intel.com>
6 * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 * 8 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * 10 *
@@ -16,10 +18,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 19 * General Public License for more details.
18 * 20 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 22 */
25 23
@@ -27,10 +25,11 @@
27 * ACPI power-managed devices may be controlled in two ways: 25 * ACPI power-managed devices may be controlled in two ways:
28 * 1. via "Device Specific (D-State) Control" 26 * 1. via "Device Specific (D-State) Control"
29 * 2. via "Power Resource Control". 27 * 2. via "Power Resource Control".
30 * This module is used to manage devices relying on Power Resource Control. 28 * The code below deals with ACPI Power Resources control.
31 * 29 *
32 * An ACPI "power resource object" describes a software controllable power 30 * An ACPI "power resource object" represents a software controllable power
33 * plane, clock plane, or other resource used by a power managed device. 31 * plane, clock plane, or other resource depended on by a device.
32 *
34 * A device may rely on multiple power resources, and a power resource 33 * A device may rely on multiple power resources, and a power resource
35 * may be shared by multiple devices. 34 * may be shared by multiple devices.
36 */ 35 */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d9f71581b79b..51e658f21e95 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -21,10 +21,6 @@
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details. 22 * General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */ 25 */
30 26
@@ -159,38 +155,28 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
159 return NOTIFY_OK; 155 return NOTIFY_OK;
160} 156}
161 157
162static struct notifier_block __refdata acpi_cpu_notifier = { 158static struct notifier_block acpi_cpu_notifier = {
163 .notifier_call = acpi_cpu_soft_notify, 159 .notifier_call = acpi_cpu_soft_notify,
164}; 160};
165 161
166static int __acpi_processor_start(struct acpi_device *device) 162#ifdef CONFIG_ACPI_CPU_FREQ_PSS
163static int acpi_pss_perf_init(struct acpi_processor *pr,
164 struct acpi_device *device)
167{ 165{
168 struct acpi_processor *pr = acpi_driver_data(device);
169 acpi_status status;
170 int result = 0; 166 int result = 0;
171 167
172 if (!pr)
173 return -ENODEV;
174
175 if (pr->flags.need_hotplug_init)
176 return 0;
177
178#ifdef CONFIG_CPU_FREQ
179 acpi_processor_ppc_has_changed(pr, 0); 168 acpi_processor_ppc_has_changed(pr, 0);
180#endif 169
181 acpi_processor_get_throttling_info(pr); 170 acpi_processor_get_throttling_info(pr);
182 171
183 if (pr->flags.throttling) 172 if (pr->flags.throttling)
184 pr->flags.limit = 1; 173 pr->flags.limit = 1;
185 174
186 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
187 acpi_processor_power_init(pr);
188
189 pr->cdev = thermal_cooling_device_register("Processor", device, 175 pr->cdev = thermal_cooling_device_register("Processor", device,
190 &processor_cooling_ops); 176 &processor_cooling_ops);
191 if (IS_ERR(pr->cdev)) { 177 if (IS_ERR(pr->cdev)) {
192 result = PTR_ERR(pr->cdev); 178 result = PTR_ERR(pr->cdev);
193 goto err_power_exit; 179 return result;
194 } 180 }
195 181
196 dev_dbg(&device->dev, "registered as cooling_device%d\n", 182 dev_dbg(&device->dev, "registered as cooling_device%d\n",
@@ -204,6 +190,7 @@ static int __acpi_processor_start(struct acpi_device *device)
204 "Failed to create sysfs link 'thermal_cooling'\n"); 190 "Failed to create sysfs link 'thermal_cooling'\n");
205 goto err_thermal_unregister; 191 goto err_thermal_unregister;
206 } 192 }
193
207 result = sysfs_create_link(&pr->cdev->device.kobj, 194 result = sysfs_create_link(&pr->cdev->device.kobj,
208 &device->dev.kobj, 195 &device->dev.kobj,
209 "device"); 196 "device");
@@ -213,17 +200,61 @@ static int __acpi_processor_start(struct acpi_device *device)
213 goto err_remove_sysfs_thermal; 200 goto err_remove_sysfs_thermal;
214 } 201 }
215 202
216 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
217 acpi_processor_notify, device);
218 if (ACPI_SUCCESS(status))
219 return 0;
220
221 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 203 sysfs_remove_link(&pr->cdev->device.kobj, "device");
222 err_remove_sysfs_thermal: 204 err_remove_sysfs_thermal:
223 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 205 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
224 err_thermal_unregister: 206 err_thermal_unregister:
225 thermal_cooling_device_unregister(pr->cdev); 207 thermal_cooling_device_unregister(pr->cdev);
226 err_power_exit: 208
209 return result;
210}
211
212static void acpi_pss_perf_exit(struct acpi_processor *pr,
213 struct acpi_device *device)
214{
215 if (pr->cdev) {
216 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
217 sysfs_remove_link(&pr->cdev->device.kobj, "device");
218 thermal_cooling_device_unregister(pr->cdev);
219 pr->cdev = NULL;
220 }
221}
222#else
223static inline int acpi_pss_perf_init(struct acpi_processor *pr,
224 struct acpi_device *device)
225{
226 return 0;
227}
228
229static inline void acpi_pss_perf_exit(struct acpi_processor *pr,
230 struct acpi_device *device) {}
231#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
232
233static int __acpi_processor_start(struct acpi_device *device)
234{
235 struct acpi_processor *pr = acpi_driver_data(device);
236 acpi_status status;
237 int result = 0;
238
239 if (!pr)
240 return -ENODEV;
241
242 if (pr->flags.need_hotplug_init)
243 return 0;
244
245 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
246 acpi_processor_power_init(pr);
247
248 result = acpi_pss_perf_init(pr, device);
249 if (result)
250 goto err_power_exit;
251
252 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
253 acpi_processor_notify, device);
254 if (ACPI_SUCCESS(status))
255 return 0;
256
257err_power_exit:
227 acpi_processor_power_exit(pr); 258 acpi_processor_power_exit(pr);
228 return result; 259 return result;
229} 260}
@@ -252,15 +283,10 @@ static int acpi_processor_stop(struct device *dev)
252 pr = acpi_driver_data(device); 283 pr = acpi_driver_data(device);
253 if (!pr) 284 if (!pr)
254 return 0; 285 return 0;
255
256 acpi_processor_power_exit(pr); 286 acpi_processor_power_exit(pr);
257 287
258 if (pr->cdev) { 288 acpi_pss_perf_exit(pr, device);
259 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 289
260 sysfs_remove_link(&pr->cdev->device.kobj, "device");
261 thermal_cooling_device_unregister(pr->cdev);
262 pr->cdev = NULL;
263 }
264 return 0; 290 return 0;
265} 291}
266 292
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d540f42c9232..175c86bee3a9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -21,10 +21,6 @@
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details. 22 * General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */ 25 */
30 26
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cfc8aba72f86..bb01dea39fdc 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,10 +20,6 @@
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details. 21 * General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 *
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -87,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
87 if (ignore_ppc) 83 if (ignore_ppc)
88 return 0; 84 return 0;
89 85
90 if (event != CPUFREQ_INCOMPATIBLE) 86 if (event != CPUFREQ_ADJUST)
91 return 0; 87 return 0;
92 88
93 mutex_lock(&performance_mutex); 89 mutex_lock(&performance_mutex);
@@ -784,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
784 780
785EXPORT_SYMBOL(acpi_processor_register_performance); 781EXPORT_SYMBOL(acpi_processor_register_performance);
786 782
787void 783void acpi_processor_unregister_performance(unsigned int cpu)
788acpi_processor_unregister_performance(struct acpi_processor_performance
789 *performance, unsigned int cpu)
790{ 784{
791 struct acpi_processor *pr; 785 struct acpi_processor *pr;
792 786
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e003663b2f8e..1fed84a092c2 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -19,10 +19,6 @@
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details. 20 * General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */ 23 */
28 24
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 84243c32e29c..f170d746336d 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -19,10 +19,6 @@
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details. 20 * General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */ 23 */
28 24
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 7836e2e980f4..6d99450549c5 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -528,13 +528,14 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
528 528
529 if (!val) 529 if (!val)
530 return obj->package.count; 530 return obj->package.count;
531 else if (nval <= 0)
532 return -EINVAL;
533 531
534 if (nval > obj->package.count) 532 if (nval > obj->package.count)
535 return -EOVERFLOW; 533 return -EOVERFLOW;
534 else if (nval <= 0)
535 return -EINVAL;
536 536
537 items = obj->package.elements; 537 items = obj->package.elements;
538
538 switch (proptype) { 539 switch (proptype) {
539 case DEV_PROP_U8: 540 case DEV_PROP_U8:
540 ret = acpi_copy_property_array_u8(items, (u8 *)val, nval); 541 ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index f1c966e05078..15d22db05054 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 01504c819e8f..cb3dedb1beae 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -17,10 +17,6 @@
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details. 18 * General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */ 21 */
26 22
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ec256352f423..01136b879038 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -115,264 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
115 return 0; 115 return 0;
116} 116}
117 117
118/**
119 * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
120 * @acpi_dev: ACPI device object.
121 * @modalias: Buffer to print into.
122 * @size: Size of the buffer.
123 *
124 * Creates hid/cid(s) string needed for modalias and uevent
125 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
126 * char *modalias: "acpi:IBM0001:ACPI0001"
127 * Return: 0: no _HID and no _CID
128 * -EINVAL: output error
129 * -ENOMEM: output is truncated
130*/
131static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
132 int size)
133{
134 int len;
135 int count;
136 struct acpi_hardware_id *id;
137
138 /*
139 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
140 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
141 * device's list.
142 */
143 count = 0;
144 list_for_each_entry(id, &acpi_dev->pnp.ids, list)
145 if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
146 count++;
147
148 if (!count)
149 return 0;
150
151 len = snprintf(modalias, size, "acpi:");
152 if (len <= 0)
153 return len;
154
155 size -= len;
156
157 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
158 if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
159 continue;
160
161 count = snprintf(&modalias[len], size, "%s:", id->id);
162 if (count < 0)
163 return -EINVAL;
164
165 if (count >= size)
166 return -ENOMEM;
167
168 len += count;
169 size -= count;
170 }
171 modalias[len] = '\0';
172 return len;
173}
174
175/**
176 * create_of_modalias - Creates DT compatible string for modalias and uevent
177 * @acpi_dev: ACPI device object.
178 * @modalias: Buffer to print into.
179 * @size: Size of the buffer.
180 *
181 * Expose DT compatible modalias as of:NnameTCcompatible. This function should
182 * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
183 * ACPI/PNP IDs.
184 */
185static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
186 int size)
187{
188 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
189 const union acpi_object *of_compatible, *obj;
190 int len, count;
191 int i, nval;
192 char *c;
193
194 acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
195 /* DT strings are all in lower case */
196 for (c = buf.pointer; *c != '\0'; c++)
197 *c = tolower(*c);
198
199 len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
200 ACPI_FREE(buf.pointer);
201
202 if (len <= 0)
203 return len;
204
205 of_compatible = acpi_dev->data.of_compatible;
206 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
207 nval = of_compatible->package.count;
208 obj = of_compatible->package.elements;
209 } else { /* Must be ACPI_TYPE_STRING. */
210 nval = 1;
211 obj = of_compatible;
212 }
213 for (i = 0; i < nval; i++, obj++) {
214 count = snprintf(&modalias[len], size, "C%s",
215 obj->string.pointer);
216 if (count < 0)
217 return -EINVAL;
218
219 if (count >= size)
220 return -ENOMEM;
221
222 len += count;
223 size -= count;
224 }
225 modalias[len] = '\0';
226 return len;
227}
228
229/*
230 * acpi_companion_match() - Can we match via ACPI companion device
231 * @dev: Device in question
232 *
233 * Check if the given device has an ACPI companion and if that companion has
234 * a valid list of PNP IDs, and if the device is the first (primary) physical
235 * device associated with it. Return the companion pointer if that's the case
236 * or NULL otherwise.
237 *
238 * If multiple physical devices are attached to a single ACPI companion, we need
239 * to be careful. The usage scenario for this kind of relationship is that all
240 * of the physical devices in question use resources provided by the ACPI
241 * companion. A typical case is an MFD device where all the sub-devices share
242 * the parent's ACPI companion. In such cases we can only allow the primary
243 * (first) physical device to be matched with the help of the companion's PNP
244 * IDs.
245 *
246 * Additional physical devices sharing the ACPI companion can still use
247 * resources available from it but they will be matched normally using functions
248 * provided by their bus types (and analogously for their modalias).
249 */
250static struct acpi_device *acpi_companion_match(const struct device *dev)
251{
252 struct acpi_device *adev;
253 struct mutex *physical_node_lock;
254
255 adev = ACPI_COMPANION(dev);
256 if (!adev)
257 return NULL;
258
259 if (list_empty(&adev->pnp.ids))
260 return NULL;
261
262 physical_node_lock = &adev->physical_node_lock;
263 mutex_lock(physical_node_lock);
264 if (list_empty(&adev->physical_node_list)) {
265 adev = NULL;
266 } else {
267 const struct acpi_device_physical_node *node;
268
269 node = list_first_entry(&adev->physical_node_list,
270 struct acpi_device_physical_node, node);
271 if (node->dev != dev)
272 adev = NULL;
273 }
274 mutex_unlock(physical_node_lock);
275
276 return adev;
277}
278
279static int __acpi_device_uevent_modalias(struct acpi_device *adev,
280 struct kobj_uevent_env *env)
281{
282 int len;
283
284 if (!adev)
285 return -ENODEV;
286
287 if (list_empty(&adev->pnp.ids))
288 return 0;
289
290 if (add_uevent_var(env, "MODALIAS="))
291 return -ENOMEM;
292
293 len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
294 sizeof(env->buf) - env->buflen);
295 if (len < 0)
296 return len;
297
298 env->buflen += len;
299 if (!adev->data.of_compatible)
300 return 0;
301
302 if (len > 0 && add_uevent_var(env, "MODALIAS="))
303 return -ENOMEM;
304
305 len = create_of_modalias(adev, &env->buf[env->buflen - 1],
306 sizeof(env->buf) - env->buflen);
307 if (len < 0)
308 return len;
309
310 env->buflen += len;
311
312 return 0;
313}
314
315/*
316 * Creates uevent modalias field for ACPI enumerated devices.
317 * Because the other buses does not support ACPI HIDs & CIDs.
318 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
319 * "acpi:IBM0001:ACPI0001"
320 */
321int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
322{
323 return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
324}
325EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
326
327static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
328{
329 int len, count;
330
331 if (!adev)
332 return -ENODEV;
333
334 if (list_empty(&adev->pnp.ids))
335 return 0;
336
337 len = create_pnp_modalias(adev, buf, size - 1);
338 if (len < 0) {
339 return len;
340 } else if (len > 0) {
341 buf[len++] = '\n';
342 size -= len;
343 }
344 if (!adev->data.of_compatible)
345 return len;
346
347 count = create_of_modalias(adev, buf + len, size - 1);
348 if (count < 0) {
349 return count;
350 } else if (count > 0) {
351 len += count;
352 buf[len++] = '\n';
353 }
354
355 return len;
356}
357
358/*
359 * Creates modalias sysfs attribute for ACPI enumerated devices.
360 * Because the other buses does not support ACPI HIDs & CIDs.
361 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
362 * "acpi:IBM0001:ACPI0001"
363 */
364int acpi_device_modalias(struct device *dev, char *buf, int size)
365{
366 return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
367}
368EXPORT_SYMBOL_GPL(acpi_device_modalias);
369
370static ssize_t
371acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
372 return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
373}
374static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
375
376bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) 118bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
377{ 119{
378 struct acpi_device_physical_node *pn; 120 struct acpi_device_physical_node *pn;
@@ -701,423 +443,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
701 unlock_device_hotplug(); 443 unlock_device_hotplug();
702} 444}
703 445
704static ssize_t real_power_state_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
706{
707 struct acpi_device *adev = to_acpi_device(dev);
708 int state;
709 int ret;
710
711 ret = acpi_device_get_power(adev, &state);
712 if (ret)
713 return ret;
714
715 return sprintf(buf, "%s\n", acpi_power_state_string(state));
716}
717
718static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
719
720static ssize_t power_state_show(struct device *dev,
721 struct device_attribute *attr, char *buf)
722{
723 struct acpi_device *adev = to_acpi_device(dev);
724
725 return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
726}
727
728static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
729
730static ssize_t
731acpi_eject_store(struct device *d, struct device_attribute *attr,
732 const char *buf, size_t count)
733{
734 struct acpi_device *acpi_device = to_acpi_device(d);
735 acpi_object_type not_used;
736 acpi_status status;
737
738 if (!count || buf[0] != '1')
739 return -EINVAL;
740
741 if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
742 && !acpi_device->driver)
743 return -ENODEV;
744
745 status = acpi_get_type(acpi_device->handle, &not_used);
746 if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
747 return -ENODEV;
748
749 get_device(&acpi_device->dev);
750 status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
751 if (ACPI_SUCCESS(status))
752 return count;
753
754 put_device(&acpi_device->dev);
755 acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
756 ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
757 return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
758}
759
760static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
761
762static ssize_t
763acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
764 struct acpi_device *acpi_dev = to_acpi_device(dev);
765
766 return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
767}
768static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
769
770static ssize_t acpi_device_uid_show(struct device *dev,
771 struct device_attribute *attr, char *buf)
772{
773 struct acpi_device *acpi_dev = to_acpi_device(dev);
774
775 return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
776}
777static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
778
779static ssize_t acpi_device_adr_show(struct device *dev,
780 struct device_attribute *attr, char *buf)
781{
782 struct acpi_device *acpi_dev = to_acpi_device(dev);
783
784 return sprintf(buf, "0x%08x\n",
785 (unsigned int)(acpi_dev->pnp.bus_address));
786}
787static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
788
789static ssize_t
790acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
791 struct acpi_device *acpi_dev = to_acpi_device(dev);
792 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
793 int result;
794
795 result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
796 if (result)
797 goto end;
798
799 result = sprintf(buf, "%s\n", (char*)path.pointer);
800 kfree(path.pointer);
801end:
802 return result;
803}
804static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
805
806/* sysfs file that shows description text from the ACPI _STR method */
807static ssize_t description_show(struct device *dev,
808 struct device_attribute *attr,
809 char *buf) {
810 struct acpi_device *acpi_dev = to_acpi_device(dev);
811 int result;
812
813 if (acpi_dev->pnp.str_obj == NULL)
814 return 0;
815
816 /*
817 * The _STR object contains a Unicode identifier for a device.
818 * We need to convert to utf-8 so it can be displayed.
819 */
820 result = utf16s_to_utf8s(
821 (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
822 acpi_dev->pnp.str_obj->buffer.length,
823 UTF16_LITTLE_ENDIAN, buf,
824 PAGE_SIZE);
825
826 buf[result++] = '\n';
827
828 return result;
829}
830static DEVICE_ATTR(description, 0444, description_show, NULL);
831
832static ssize_t
833acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
834 char *buf) {
835 struct acpi_device *acpi_dev = to_acpi_device(dev);
836 acpi_status status;
837 unsigned long long sun;
838
839 status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
840 if (ACPI_FAILURE(status))
841 return -ENODEV;
842
843 return sprintf(buf, "%llu\n", sun);
844}
845static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
846
847static ssize_t status_show(struct device *dev, struct device_attribute *attr,
848 char *buf) {
849 struct acpi_device *acpi_dev = to_acpi_device(dev);
850 acpi_status status;
851 unsigned long long sta;
852
853 status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
854 if (ACPI_FAILURE(status))
855 return -ENODEV;
856
857 return sprintf(buf, "%llu\n", sta);
858}
859static DEVICE_ATTR_RO(status);
860
861static int acpi_device_setup_files(struct acpi_device *dev)
862{
863 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
864 acpi_status status;
865 int result = 0;
866
867 /*
868 * Devices gotten from FADT don't have a "path" attribute
869 */
870 if (dev->handle) {
871 result = device_create_file(&dev->dev, &dev_attr_path);
872 if (result)
873 goto end;
874 }
875
876 if (!list_empty(&dev->pnp.ids)) {
877 result = device_create_file(&dev->dev, &dev_attr_hid);
878 if (result)
879 goto end;
880
881 result = device_create_file(&dev->dev, &dev_attr_modalias);
882 if (result)
883 goto end;
884 }
885
886 /*
887 * If device has _STR, 'description' file is created
888 */
889 if (acpi_has_method(dev->handle, "_STR")) {
890 status = acpi_evaluate_object(dev->handle, "_STR",
891 NULL, &buffer);
892 if (ACPI_FAILURE(status))
893 buffer.pointer = NULL;
894 dev->pnp.str_obj = buffer.pointer;
895 result = device_create_file(&dev->dev, &dev_attr_description);
896 if (result)
897 goto end;
898 }
899
900 if (dev->pnp.type.bus_address)
901 result = device_create_file(&dev->dev, &dev_attr_adr);
902 if (dev->pnp.unique_id)
903 result = device_create_file(&dev->dev, &dev_attr_uid);
904
905 if (acpi_has_method(dev->handle, "_SUN")) {
906 result = device_create_file(&dev->dev, &dev_attr_sun);
907 if (result)
908 goto end;
909 }
910
911 if (acpi_has_method(dev->handle, "_STA")) {
912 result = device_create_file(&dev->dev, &dev_attr_status);
913 if (result)
914 goto end;
915 }
916
917 /*
918 * If device has _EJ0, 'eject' file is created that is used to trigger
919 * hot-removal function from userland.
920 */
921 if (acpi_has_method(dev->handle, "_EJ0")) {
922 result = device_create_file(&dev->dev, &dev_attr_eject);
923 if (result)
924 return result;
925 }
926
927 if (dev->flags.power_manageable) {
928 result = device_create_file(&dev->dev, &dev_attr_power_state);
929 if (result)
930 return result;
931
932 if (dev->power.flags.power_resources)
933 result = device_create_file(&dev->dev,
934 &dev_attr_real_power_state);
935 }
936
937end:
938 return result;
939}
940
941static void acpi_device_remove_files(struct acpi_device *dev)
942{
943 if (dev->flags.power_manageable) {
944 device_remove_file(&dev->dev, &dev_attr_power_state);
945 if (dev->power.flags.power_resources)
946 device_remove_file(&dev->dev,
947 &dev_attr_real_power_state);
948 }
949
950 /*
951 * If device has _STR, remove 'description' file
952 */
953 if (acpi_has_method(dev->handle, "_STR")) {
954 kfree(dev->pnp.str_obj);
955 device_remove_file(&dev->dev, &dev_attr_description);
956 }
957 /*
958 * If device has _EJ0, remove 'eject' file.
959 */
960 if (acpi_has_method(dev->handle, "_EJ0"))
961 device_remove_file(&dev->dev, &dev_attr_eject);
962
963 if (acpi_has_method(dev->handle, "_SUN"))
964 device_remove_file(&dev->dev, &dev_attr_sun);
965
966 if (dev->pnp.unique_id)
967 device_remove_file(&dev->dev, &dev_attr_uid);
968 if (dev->pnp.type.bus_address)
969 device_remove_file(&dev->dev, &dev_attr_adr);
970 device_remove_file(&dev->dev, &dev_attr_modalias);
971 device_remove_file(&dev->dev, &dev_attr_hid);
972 if (acpi_has_method(dev->handle, "_STA"))
973 device_remove_file(&dev->dev, &dev_attr_status);
974 if (dev->handle)
975 device_remove_file(&dev->dev, &dev_attr_path);
976}
977/* --------------------------------------------------------------------------
978 ACPI Bus operations
979 -------------------------------------------------------------------------- */
980
981/**
982 * acpi_of_match_device - Match device object using the "compatible" property.
983 * @adev: ACPI device object to match.
984 * @of_match_table: List of device IDs to match against.
985 *
986 * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
987 * identifiers and a _DSD object with the "compatible" property, use that
988 * property to match against the given list of identifiers.
989 */
990static bool acpi_of_match_device(struct acpi_device *adev,
991 const struct of_device_id *of_match_table)
992{
993 const union acpi_object *of_compatible, *obj;
994 int i, nval;
995
996 if (!adev)
997 return false;
998
999 of_compatible = adev->data.of_compatible;
1000 if (!of_match_table || !of_compatible)
1001 return false;
1002
1003 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
1004 nval = of_compatible->package.count;
1005 obj = of_compatible->package.elements;
1006 } else { /* Must be ACPI_TYPE_STRING. */
1007 nval = 1;
1008 obj = of_compatible;
1009 }
1010 /* Now we can look for the driver DT compatible strings */
1011 for (i = 0; i < nval; i++, obj++) {
1012 const struct of_device_id *id;
1013
1014 for (id = of_match_table; id->compatible[0]; id++)
1015 if (!strcasecmp(obj->string.pointer, id->compatible))
1016 return true;
1017 }
1018
1019 return false;
1020}
1021
1022static bool __acpi_match_device_cls(const struct acpi_device_id *id,
1023 struct acpi_hardware_id *hwid)
1024{
1025 int i, msk, byte_shift;
1026 char buf[3];
1027
1028 if (!id->cls)
1029 return false;
1030
1031 /* Apply class-code bitmask, before checking each class-code byte */
1032 for (i = 1; i <= 3; i++) {
1033 byte_shift = 8 * (3 - i);
1034 msk = (id->cls_msk >> byte_shift) & 0xFF;
1035 if (!msk)
1036 continue;
1037
1038 sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
1039 if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
1040 return false;
1041 }
1042 return true;
1043}
1044
1045static const struct acpi_device_id *__acpi_match_device(
1046 struct acpi_device *device,
1047 const struct acpi_device_id *ids,
1048 const struct of_device_id *of_ids)
1049{
1050 const struct acpi_device_id *id;
1051 struct acpi_hardware_id *hwid;
1052
1053 /*
1054 * If the device is not present, it is unnecessary to load device
1055 * driver for it.
1056 */
1057 if (!device || !device->status.present)
1058 return NULL;
1059
1060 list_for_each_entry(hwid, &device->pnp.ids, list) {
1061 /* First, check the ACPI/PNP IDs provided by the caller. */
1062 for (id = ids; id->id[0] || id->cls; id++) {
1063 if (id->id[0] && !strcmp((char *) id->id, hwid->id))
1064 return id;
1065 else if (id->cls && __acpi_match_device_cls(id, hwid))
1066 return id;
1067 }
1068
1069 /*
1070 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
1071 * "compatible" property if found.
1072 *
1073 * The id returned by the below is not valid, but the only
1074 * caller passing non-NULL of_ids here is only interested in
1075 * whether or not the return value is NULL.
1076 */
1077 if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
1078 && acpi_of_match_device(device, of_ids))
1079 return id;
1080 }
1081 return NULL;
1082}
1083
1084/**
1085 * acpi_match_device - Match a struct device against a given list of ACPI IDs
1086 * @ids: Array of struct acpi_device_id object to match against.
1087 * @dev: The device structure to match.
1088 *
1089 * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
1090 * object for that handle and use that object to match against a given list of
1091 * device IDs.
1092 *
1093 * Return a pointer to the first matching ID on success or %NULL on failure.
1094 */
1095const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
1096 const struct device *dev)
1097{
1098 return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
1099}
1100EXPORT_SYMBOL_GPL(acpi_match_device);
1101
1102int acpi_match_device_ids(struct acpi_device *device,
1103 const struct acpi_device_id *ids)
1104{
1105 return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
1106}
1107EXPORT_SYMBOL(acpi_match_device_ids);
1108
1109bool acpi_driver_match_device(struct device *dev,
1110 const struct device_driver *drv)
1111{
1112 if (!drv->acpi_match_table)
1113 return acpi_of_match_device(ACPI_COMPANION(dev),
1114 drv->of_match_table);
1115
1116 return !!__acpi_match_device(acpi_companion_match(dev),
1117 drv->acpi_match_table, drv->of_match_table);
1118}
1119EXPORT_SYMBOL_GPL(acpi_driver_match_device);
1120
1121static void acpi_free_power_resources_lists(struct acpi_device *device) 446static void acpi_free_power_resources_lists(struct acpi_device *device)
1122{ 447{
1123 int i; 448 int i;
@@ -1144,144 +469,6 @@ static void acpi_device_release(struct device *dev)
1144 kfree(acpi_dev); 469 kfree(acpi_dev);
1145} 470}
1146 471
1147static int acpi_bus_match(struct device *dev, struct device_driver *drv)
1148{
1149 struct acpi_device *acpi_dev = to_acpi_device(dev);
1150 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
1151
1152 return acpi_dev->flags.match_driver
1153 && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
1154}
1155
1156static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1157{
1158 return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
1159}
1160
1161static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
1162{
1163 struct acpi_device *device = data;
1164
1165 device->driver->ops.notify(device, event);
1166}
1167
1168static void acpi_device_notify_fixed(void *data)
1169{
1170 struct acpi_device *device = data;
1171
1172 /* Fixed hardware devices have no handles */
1173 acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
1174}
1175
1176static u32 acpi_device_fixed_event(void *data)
1177{
1178 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
1179 return ACPI_INTERRUPT_HANDLED;
1180}
1181
1182static int acpi_device_install_notify_handler(struct acpi_device *device)
1183{
1184 acpi_status status;
1185
1186 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1187 status =
1188 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1189 acpi_device_fixed_event,
1190 device);
1191 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1192 status =
1193 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1194 acpi_device_fixed_event,
1195 device);
1196 else
1197 status = acpi_install_notify_handler(device->handle,
1198 ACPI_DEVICE_NOTIFY,
1199 acpi_device_notify,
1200 device);
1201
1202 if (ACPI_FAILURE(status))
1203 return -EINVAL;
1204 return 0;
1205}
1206
1207static void acpi_device_remove_notify_handler(struct acpi_device *device)
1208{
1209 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1210 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1211 acpi_device_fixed_event);
1212 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1213 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1214 acpi_device_fixed_event);
1215 else
1216 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
1217 acpi_device_notify);
1218}
1219
1220static int acpi_device_probe(struct device *dev)
1221{
1222 struct acpi_device *acpi_dev = to_acpi_device(dev);
1223 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
1224 int ret;
1225
1226 if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
1227 return -EINVAL;
1228
1229 if (!acpi_drv->ops.add)
1230 return -ENOSYS;
1231
1232 ret = acpi_drv->ops.add(acpi_dev);
1233 if (ret)
1234 return ret;
1235
1236 acpi_dev->driver = acpi_drv;
1237 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1238 "Driver [%s] successfully bound to device [%s]\n",
1239 acpi_drv->name, acpi_dev->pnp.bus_id));
1240
1241 if (acpi_drv->ops.notify) {
1242 ret = acpi_device_install_notify_handler(acpi_dev);
1243 if (ret) {
1244 if (acpi_drv->ops.remove)
1245 acpi_drv->ops.remove(acpi_dev);
1246
1247 acpi_dev->driver = NULL;
1248 acpi_dev->driver_data = NULL;
1249 return ret;
1250 }
1251 }
1252
1253 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
1254 acpi_drv->name, acpi_dev->pnp.bus_id));
1255 get_device(dev);
1256 return 0;
1257}
1258
1259static int acpi_device_remove(struct device * dev)
1260{
1261 struct acpi_device *acpi_dev = to_acpi_device(dev);
1262 struct acpi_driver *acpi_drv = acpi_dev->driver;
1263
1264 if (acpi_drv) {
1265 if (acpi_drv->ops.notify)
1266 acpi_device_remove_notify_handler(acpi_dev);
1267 if (acpi_drv->ops.remove)
1268 acpi_drv->ops.remove(acpi_dev);
1269 }
1270 acpi_dev->driver = NULL;
1271 acpi_dev->driver_data = NULL;
1272
1273 put_device(dev);
1274 return 0;
1275}
1276
1277struct bus_type acpi_bus_type = {
1278 .name = "acpi",
1279 .match = acpi_bus_match,
1280 .probe = acpi_device_probe,
1281 .remove = acpi_device_remove,
1282 .uevent = acpi_device_uevent,
1283};
1284
1285static void acpi_device_del(struct acpi_device *device) 472static void acpi_device_del(struct acpi_device *device)
1286{ 473{
1287 mutex_lock(&acpi_device_lock); 474 mutex_lock(&acpi_device_lock);
@@ -1529,47 +716,6 @@ struct acpi_device *acpi_get_next_child(struct device *dev,
1529} 716}
1530 717
1531/* -------------------------------------------------------------------------- 718/* --------------------------------------------------------------------------
1532 Driver Management
1533 -------------------------------------------------------------------------- */
1534/**
1535 * acpi_bus_register_driver - register a driver with the ACPI bus
1536 * @driver: driver being registered
1537 *
1538 * Registers a driver with the ACPI bus. Searches the namespace for all
1539 * devices that match the driver's criteria and binds. Returns zero for
1540 * success or a negative error status for failure.
1541 */
1542int acpi_bus_register_driver(struct acpi_driver *driver)
1543{
1544 int ret;
1545
1546 if (acpi_disabled)
1547 return -ENODEV;
1548 driver->drv.name = driver->name;
1549 driver->drv.bus = &acpi_bus_type;
1550 driver->drv.owner = driver->owner;
1551
1552 ret = driver_register(&driver->drv);
1553 return ret;
1554}
1555
1556EXPORT_SYMBOL(acpi_bus_register_driver);
1557
1558/**
1559 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1560 * @driver: driver to unregister
1561 *
1562 * Unregisters a driver with the ACPI bus. Searches the namespace for all
1563 * devices that match the driver's criteria and unbinds.
1564 */
1565void acpi_bus_unregister_driver(struct acpi_driver *driver)
1566{
1567 driver_unregister(&driver->drv);
1568}
1569
1570EXPORT_SYMBOL(acpi_bus_unregister_driver);
1571
1572/* --------------------------------------------------------------------------
1573 Device Enumeration 719 Device Enumeration
1574 -------------------------------------------------------------------------- */ 720 -------------------------------------------------------------------------- */
1575static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) 721static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
@@ -2744,12 +1890,6 @@ int __init acpi_scan_init(void)
2744{ 1890{
2745 int result; 1891 int result;
2746 1892
2747 result = bus_register(&acpi_bus_type);
2748 if (result) {
2749 /* We don't want to quit even if we failed to add suspend/resume */
2750 printk(KERN_ERR PREFIX "Could not register bus type\n");
2751 }
2752
2753 acpi_pci_root_init(); 1893 acpi_pci_root_init();
2754 acpi_pci_link_init(); 1894 acpi_pci_link_init();
2755 acpi_processor_init(); 1895 acpi_processor_init();
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0876d77b3206..40a42655227c 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -69,6 +69,8 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
69 ACPI_DEBUG_INIT(ACPI_LV_INIT), 69 ACPI_DEBUG_INIT(ACPI_LV_INIT),
70 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 70 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
71 ACPI_DEBUG_INIT(ACPI_LV_INFO), 71 ACPI_DEBUG_INIT(ACPI_LV_INFO),
72 ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
73 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
72 74
73 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), 75 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
74 ACPI_DEBUG_INIT(ACPI_LV_PARSE), 76 ACPI_DEBUG_INIT(ACPI_LV_PARSE),
@@ -162,55 +164,116 @@ static const struct kernel_param_ops param_ops_debug_level = {
162module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644); 164module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
163module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644); 165module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
164 166
165static char trace_method_name[6]; 167static char trace_method_name[1024];
166module_param_string(trace_method_name, trace_method_name, 6, 0644);
167static unsigned int trace_debug_layer;
168module_param(trace_debug_layer, uint, 0644);
169static unsigned int trace_debug_level;
170module_param(trace_debug_level, uint, 0644);
171 168
172static int param_set_trace_state(const char *val, struct kernel_param *kp) 169int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
173{ 170{
174 int result = 0; 171 u32 saved_flags = 0;
172 bool is_abs_path = true;
175 173
176 if (!strncmp(val, "enable", sizeof("enable") - 1)) { 174 if (*val != '\\')
177 result = acpi_debug_trace(trace_method_name, trace_debug_level, 175 is_abs_path = false;
178 trace_debug_layer, 0);
179 if (result)
180 result = -EBUSY;
181 goto exit;
182 }
183 176
184 if (!strncmp(val, "disable", sizeof("disable") - 1)) { 177 if ((is_abs_path && strlen(val) > 1023) ||
185 int name = 0; 178 (!is_abs_path && strlen(val) > 1022)) {
186 result = acpi_debug_trace((char *)&name, trace_debug_level, 179 pr_err("%s: string parameter too long\n", kp->name);
187 trace_debug_layer, 0); 180 return -ENOSPC;
188 if (result)
189 result = -EBUSY;
190 goto exit;
191 } 181 }
192 182
193 if (!strncmp(val, "1", 1)) { 183 /*
194 result = acpi_debug_trace(trace_method_name, trace_debug_level, 184 * It's not safe to update acpi_gbl_trace_method_name without
195 trace_debug_layer, 1); 185 * having the tracer stopped, so we save the original tracer
196 if (result) 186 * state and disable it.
197 result = -EBUSY; 187 */
198 goto exit; 188 saved_flags = acpi_gbl_trace_flags;
189 (void)acpi_debug_trace(NULL,
190 acpi_gbl_trace_dbg_level,
191 acpi_gbl_trace_dbg_layer,
192 0);
193
194 /* This is a hack. We can't kmalloc in early boot. */
195 if (is_abs_path)
196 strcpy(trace_method_name, val);
197 else {
198 trace_method_name[0] = '\\';
199 strcpy(trace_method_name+1, val);
199 } 200 }
200 201
201 result = -EINVAL; 202 /* Restore the original tracer state */
202exit: 203 (void)acpi_debug_trace(trace_method_name,
203 return result; 204 acpi_gbl_trace_dbg_level,
205 acpi_gbl_trace_dbg_layer,
206 saved_flags);
207
208 return 0;
209}
210
211static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
212{
213 return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
214}
215
216static const struct kernel_param_ops param_ops_trace_method = {
217 .set = param_set_trace_method_name,
218 .get = param_get_trace_method_name,
219};
220
221static const struct kernel_param_ops param_ops_trace_attrib = {
222 .set = param_set_uint,
223 .get = param_get_uint,
224};
225
226module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
227module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
228module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
229
230static int param_set_trace_state(const char *val, struct kernel_param *kp)
231{
232 acpi_status status;
233 const char *method = trace_method_name;
234 u32 flags = 0;
235
236/* So "xxx-once" comparison should go prior than "xxx" comparison */
237#define acpi_compare_param(val, key) \
238 strncmp((val), (key), sizeof(key) - 1)
239
240 if (!acpi_compare_param(val, "enable")) {
241 method = NULL;
242 flags = ACPI_TRACE_ENABLED;
243 } else if (!acpi_compare_param(val, "disable"))
244 method = NULL;
245 else if (!acpi_compare_param(val, "method-once"))
246 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
247 else if (!acpi_compare_param(val, "method"))
248 flags = ACPI_TRACE_ENABLED;
249 else if (!acpi_compare_param(val, "opcode-once"))
250 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
251 else if (!acpi_compare_param(val, "opcode"))
252 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
253 else
254 return -EINVAL;
255
256 status = acpi_debug_trace(method,
257 acpi_gbl_trace_dbg_level,
258 acpi_gbl_trace_dbg_layer,
259 flags);
260 if (ACPI_FAILURE(status))
261 return -EBUSY;
262
263 return 0;
204} 264}
205 265
206static int param_get_trace_state(char *buffer, struct kernel_param *kp) 266static int param_get_trace_state(char *buffer, struct kernel_param *kp)
207{ 267{
208 if (!acpi_gbl_trace_method_name) 268 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
209 return sprintf(buffer, "disable"); 269 return sprintf(buffer, "disable");
210 else { 270 else {
211 if (acpi_gbl_trace_flags & 1) 271 if (acpi_gbl_trace_method_name) {
212 return sprintf(buffer, "1"); 272 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
213 else 273 return sprintf(buffer, "method-once");
274 else
275 return sprintf(buffer, "method");
276 } else
214 return sprintf(buffer, "enable"); 277 return sprintf(buffer, "enable");
215 } 278 }
216 return 0; 279 return 0;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2e19189da0ee..17a6fa01a338 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 * 19 *
24 */ 20 */
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6d4e44ea74ac..fc28b9f5aa84 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 * 20 *
25 * This driver fully implements the ACPI thermal policy as described in the 21 * This driver fully implements the ACPI thermal policy as described in the
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 67c548ad3764..475c9079bf85 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f6947d0abc2f..334ec7ef1960 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1301,6 +1301,19 @@ void device_unregister(struct device *dev)
1301} 1301}
1302EXPORT_SYMBOL_GPL(device_unregister); 1302EXPORT_SYMBOL_GPL(device_unregister);
1303 1303
1304static struct device *prev_device(struct klist_iter *i)
1305{
1306 struct klist_node *n = klist_prev(i);
1307 struct device *dev = NULL;
1308 struct device_private *p;
1309
1310 if (n) {
1311 p = to_device_private_parent(n);
1312 dev = p->device;
1313 }
1314 return dev;
1315}
1316
1304static struct device *next_device(struct klist_iter *i) 1317static struct device *next_device(struct klist_iter *i)
1305{ 1318{
1306 struct klist_node *n = klist_next(i); 1319 struct klist_node *n = klist_next(i);
@@ -1390,6 +1403,36 @@ int device_for_each_child(struct device *parent, void *data,
1390EXPORT_SYMBOL_GPL(device_for_each_child); 1403EXPORT_SYMBOL_GPL(device_for_each_child);
1391 1404
1392/** 1405/**
1406 * device_for_each_child_reverse - device child iterator in reversed order.
1407 * @parent: parent struct device.
1408 * @fn: function to be called for each device.
1409 * @data: data for the callback.
1410 *
1411 * Iterate over @parent's child devices, and call @fn for each,
1412 * passing it @data.
1413 *
1414 * We check the return of @fn each time. If it returns anything
1415 * other than 0, we break out and return that value.
1416 */
1417int device_for_each_child_reverse(struct device *parent, void *data,
1418 int (*fn)(struct device *dev, void *data))
1419{
1420 struct klist_iter i;
1421 struct device *child;
1422 int error = 0;
1423
1424 if (!parent->p)
1425 return 0;
1426
1427 klist_iter_init(&parent->p->klist_children, &i);
1428 while ((child = prev_device(&i)) && !error)
1429 error = fn(child, data);
1430 klist_iter_exit(&i);
1431 return error;
1432}
1433EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
1434
1435/**
1393 * device_find_child - device iterator for locating a particular device. 1436 * device_find_child - device iterator for locating a particular device.
1394 * @parent: parent struct device 1437 * @parent: parent struct device
1395 * @match: Callback function to check device 1438 * @match: Callback function to check device
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index cc2b1d4801fd..be0eb4639128 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
407 * 407 *
408 * This function must be called with @dev lock held. When called for a 408 * This function must be called with @dev lock held. When called for a
409 * USB interface, @dev->parent lock must be held as well. 409 * USB interface, @dev->parent lock must be held as well.
410 *
411 * If the device has a parent, runtime-resume the parent before driver probing.
410 */ 412 */
411int driver_probe_device(struct device_driver *drv, struct device *dev) 413int driver_probe_device(struct device_driver *drv, struct device *dev)
412{ 414{
@@ -418,10 +420,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
418 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 420 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
419 drv->bus->name, __func__, dev_name(dev), drv->name); 421 drv->bus->name, __func__, dev_name(dev), drv->name);
420 422
423 if (dev->parent)
424 pm_runtime_get_sync(dev->parent);
425
421 pm_runtime_barrier(dev); 426 pm_runtime_barrier(dev);
422 ret = really_probe(dev, drv); 427 ret = really_probe(dev, drv);
423 pm_request_idle(dev); 428 pm_request_idle(dev);
424 429
430 if (dev->parent)
431 pm_runtime_put(dev->parent);
432
425 return ret; 433 return ret;
426} 434}
427 435
@@ -515,11 +523,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
515 523
516 device_lock(dev); 524 device_lock(dev);
517 525
526 if (dev->parent)
527 pm_runtime_get_sync(dev->parent);
528
518 bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); 529 bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
519 dev_dbg(dev, "async probe completed\n"); 530 dev_dbg(dev, "async probe completed\n");
520 531
521 pm_request_idle(dev); 532 pm_request_idle(dev);
522 533
534 if (dev->parent)
535 pm_runtime_put(dev->parent);
536
523 device_unlock(dev); 537 device_unlock(dev);
524 538
525 put_device(dev); 539 put_device(dev);
@@ -549,6 +563,9 @@ static int __device_attach(struct device *dev, bool allow_async)
549 .want_async = false, 563 .want_async = false,
550 }; 564 };
551 565
566 if (dev->parent)
567 pm_runtime_get_sync(dev->parent);
568
552 ret = bus_for_each_drv(dev->bus, NULL, &data, 569 ret = bus_for_each_drv(dev->bus, NULL, &data,
553 __device_attach_driver); 570 __device_attach_driver);
554 if (!ret && allow_async && data.have_async) { 571 if (!ret && allow_async && data.have_async) {
@@ -565,6 +582,9 @@ static int __device_attach(struct device *dev, bool allow_async)
565 } else { 582 } else {
566 pm_request_idle(dev); 583 pm_request_idle(dev);
567 } 584 }
585
586 if (dev->parent)
587 pm_runtime_put(dev->parent);
568 } 588 }
569out_unlock: 589out_unlock:
570 device_unlock(dev); 590 device_unlock(dev);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index acef9f9f759a..652b5a367c1f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -38,7 +38,7 @@ struct pm_clock_entry {
38 * @dev: The device for the given clock 38 * @dev: The device for the given clock
39 * @ce: PM clock entry corresponding to the clock. 39 * @ce: PM clock entry corresponding to the clock.
40 */ 40 */
41static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) 41static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
42{ 42{
43 int ret; 43 int ret;
44 44
@@ -50,8 +50,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
50 dev_err(dev, "%s: failed to enable clk %p, error %d\n", 50 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
51 __func__, ce->clk, ret); 51 __func__, ce->clk, ret);
52 } 52 }
53
54 return ret;
55} 53}
56 54
57/** 55/**
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0ee43c1056e0..416720159e96 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
114 stop_latency_ns, "stop"); 114 stop_latency_ns, "stop");
115} 115}
116 116
117static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 117static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
118 bool timed)
118{ 119{
120 if (!timed)
121 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
122
119 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 123 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
120 start_latency_ns, "start"); 124 start_latency_ns, "start");
121} 125}
@@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
136 smp_mb__after_atomic(); 140 smp_mb__after_atomic();
137} 141}
138 142
139static void genpd_acquire_lock(struct generic_pm_domain *genpd)
140{
141 DEFINE_WAIT(wait);
142
143 mutex_lock(&genpd->lock);
144 /*
145 * Wait for the domain to transition into either the active,
146 * or the power off state.
147 */
148 for (;;) {
149 prepare_to_wait(&genpd->status_wait_queue, &wait,
150 TASK_UNINTERRUPTIBLE);
151 if (genpd->status == GPD_STATE_ACTIVE
152 || genpd->status == GPD_STATE_POWER_OFF)
153 break;
154 mutex_unlock(&genpd->lock);
155
156 schedule();
157
158 mutex_lock(&genpd->lock);
159 }
160 finish_wait(&genpd->status_wait_queue, &wait);
161}
162
163static void genpd_release_lock(struct generic_pm_domain *genpd)
164{
165 mutex_unlock(&genpd->lock);
166}
167
168static void genpd_set_active(struct generic_pm_domain *genpd)
169{
170 if (genpd->resume_count == 0)
171 genpd->status = GPD_STATE_ACTIVE;
172}
173
174static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 143static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
175{ 144{
176 s64 usecs64; 145 s64 usecs64;
@@ -251,35 +220,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
251 * resume a device belonging to it. 220 * resume a device belonging to it.
252 */ 221 */
253static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 222static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
254 __releases(&genpd->lock) __acquires(&genpd->lock)
255{ 223{
256 struct gpd_link *link; 224 struct gpd_link *link;
257 DEFINE_WAIT(wait);
258 int ret = 0; 225 int ret = 0;
259 226
260 /* If the domain's master is being waited for, we have to wait too. */
261 for (;;) {
262 prepare_to_wait(&genpd->status_wait_queue, &wait,
263 TASK_UNINTERRUPTIBLE);
264 if (genpd->status != GPD_STATE_WAIT_MASTER)
265 break;
266 mutex_unlock(&genpd->lock);
267
268 schedule();
269
270 mutex_lock(&genpd->lock);
271 }
272 finish_wait(&genpd->status_wait_queue, &wait);
273
274 if (genpd->status == GPD_STATE_ACTIVE 227 if (genpd->status == GPD_STATE_ACTIVE
275 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 228 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
276 return 0; 229 return 0;
277 230
278 if (genpd->status != GPD_STATE_POWER_OFF) {
279 genpd_set_active(genpd);
280 return 0;
281 }
282
283 if (genpd->cpuidle_data) { 231 if (genpd->cpuidle_data) {
284 cpuidle_pause_and_lock(); 232 cpuidle_pause_and_lock();
285 genpd->cpuidle_data->idle_state->disabled = true; 233 genpd->cpuidle_data->idle_state->disabled = true;
@@ -294,20 +242,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
294 */ 242 */
295 list_for_each_entry(link, &genpd->slave_links, slave_node) { 243 list_for_each_entry(link, &genpd->slave_links, slave_node) {
296 genpd_sd_counter_inc(link->master); 244 genpd_sd_counter_inc(link->master);
297 genpd->status = GPD_STATE_WAIT_MASTER;
298
299 mutex_unlock(&genpd->lock);
300 245
301 ret = pm_genpd_poweron(link->master); 246 ret = pm_genpd_poweron(link->master);
302
303 mutex_lock(&genpd->lock);
304
305 /*
306 * The "wait for parent" status is guaranteed not to change
307 * while the master is powering on.
308 */
309 genpd->status = GPD_STATE_POWER_OFF;
310 wake_up_all(&genpd->status_wait_queue);
311 if (ret) { 247 if (ret) {
312 genpd_sd_counter_dec(link->master); 248 genpd_sd_counter_dec(link->master);
313 goto err; 249 goto err;
@@ -319,8 +255,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
319 goto err; 255 goto err;
320 256
321 out: 257 out:
322 genpd_set_active(genpd); 258 genpd->status = GPD_STATE_ACTIVE;
323
324 return 0; 259 return 0;
325 260
326 err: 261 err:
@@ -356,20 +291,18 @@ int pm_genpd_name_poweron(const char *domain_name)
356 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 291 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
357} 292}
358 293
359static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
360 struct device *dev)
361{
362 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
363}
364
365static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 294static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
366{ 295{
367 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 296 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
368 save_state_latency_ns, "state save"); 297 save_state_latency_ns, "state save");
369} 298}
370 299
371static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 300static int genpd_restore_dev(struct generic_pm_domain *genpd,
301 struct device *dev, bool timed)
372{ 302{
303 if (!timed)
304 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
305
373 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 306 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
374 restore_state_latency_ns, 307 restore_state_latency_ns,
375 "state restore"); 308 "state restore");
@@ -416,89 +349,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
416} 349}
417 350
418/** 351/**
419 * __pm_genpd_save_device - Save the pre-suspend state of a device.
420 * @pdd: Domain data of the device to save the state of.
421 * @genpd: PM domain the device belongs to.
422 */
423static int __pm_genpd_save_device(struct pm_domain_data *pdd,
424 struct generic_pm_domain *genpd)
425 __releases(&genpd->lock) __acquires(&genpd->lock)
426{
427 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
428 struct device *dev = pdd->dev;
429 int ret = 0;
430
431 if (gpd_data->need_restore > 0)
432 return 0;
433
434 /*
435 * If the value of the need_restore flag is still unknown at this point,
436 * we trust that pm_genpd_poweroff() has verified that the device is
437 * already runtime PM suspended.
438 */
439 if (gpd_data->need_restore < 0) {
440 gpd_data->need_restore = 1;
441 return 0;
442 }
443
444 mutex_unlock(&genpd->lock);
445
446 genpd_start_dev(genpd, dev);
447 ret = genpd_save_dev(genpd, dev);
448 genpd_stop_dev(genpd, dev);
449
450 mutex_lock(&genpd->lock);
451
452 if (!ret)
453 gpd_data->need_restore = 1;
454
455 return ret;
456}
457
458/**
459 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
460 * @pdd: Domain data of the device to restore the state of.
461 * @genpd: PM domain the device belongs to.
462 */
463static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
464 struct generic_pm_domain *genpd)
465 __releases(&genpd->lock) __acquires(&genpd->lock)
466{
467 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
468 struct device *dev = pdd->dev;
469 int need_restore = gpd_data->need_restore;
470
471 gpd_data->need_restore = 0;
472 mutex_unlock(&genpd->lock);
473
474 genpd_start_dev(genpd, dev);
475
476 /*
477 * Call genpd_restore_dev() for recently added devices too (need_restore
478 * is negative then).
479 */
480 if (need_restore)
481 genpd_restore_dev(genpd, dev);
482
483 mutex_lock(&genpd->lock);
484}
485
486/**
487 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
488 * @genpd: PM domain to check.
489 *
490 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
491 * a "power off" operation, which means that a "power on" has occured in the
492 * meantime, or if its resume_count field is different from zero, which means
493 * that one of its devices has been resumed in the meantime.
494 */
495static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
496{
497 return genpd->status == GPD_STATE_WAIT_MASTER
498 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
499}
500
501/**
502 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 352 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
503 * @genpd: PM domait to power off. 353 * @genpd: PM domait to power off.
504 * 354 *
@@ -515,34 +365,26 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
515 * @genpd: PM domain to power down. 365 * @genpd: PM domain to power down.
516 * 366 *
517 * If all of the @genpd's devices have been suspended and all of its subdomains 367 * If all of the @genpd's devices have been suspended and all of its subdomains
518 * have been powered down, run the runtime suspend callbacks provided by all of 368 * have been powered down, remove power from @genpd.
519 * the @genpd's devices' drivers and remove power from @genpd.
520 */ 369 */
521static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 370static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
522 __releases(&genpd->lock) __acquires(&genpd->lock)
523{ 371{
524 struct pm_domain_data *pdd; 372 struct pm_domain_data *pdd;
525 struct gpd_link *link; 373 struct gpd_link *link;
526 unsigned int not_suspended; 374 unsigned int not_suspended = 0;
527 int ret = 0;
528 375
529 start:
530 /* 376 /*
531 * Do not try to power off the domain in the following situations: 377 * Do not try to power off the domain in the following situations:
532 * (1) The domain is already in the "power off" state. 378 * (1) The domain is already in the "power off" state.
533 * (2) The domain is waiting for its master to power up. 379 * (2) System suspend is in progress.
534 * (3) One of the domain's devices is being resumed right now.
535 * (4) System suspend is in progress.
536 */ 380 */
537 if (genpd->status == GPD_STATE_POWER_OFF 381 if (genpd->status == GPD_STATE_POWER_OFF
538 || genpd->status == GPD_STATE_WAIT_MASTER 382 || genpd->prepared_count > 0)
539 || genpd->resume_count > 0 || genpd->prepared_count > 0)
540 return 0; 383 return 0;
541 384
542 if (atomic_read(&genpd->sd_count) > 0) 385 if (atomic_read(&genpd->sd_count) > 0)
543 return -EBUSY; 386 return -EBUSY;
544 387
545 not_suspended = 0;
546 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 388 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
547 enum pm_qos_flags_status stat; 389 enum pm_qos_flags_status stat;
548 390
@@ -560,41 +402,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
560 if (not_suspended > genpd->in_progress) 402 if (not_suspended > genpd->in_progress)
561 return -EBUSY; 403 return -EBUSY;
562 404
563 if (genpd->poweroff_task) {
564 /*
565 * Another instance of pm_genpd_poweroff() is executing
566 * callbacks, so tell it to start over and return.
567 */
568 genpd->status = GPD_STATE_REPEAT;
569 return 0;
570 }
571
572 if (genpd->gov && genpd->gov->power_down_ok) { 405 if (genpd->gov && genpd->gov->power_down_ok) {
573 if (!genpd->gov->power_down_ok(&genpd->domain)) 406 if (!genpd->gov->power_down_ok(&genpd->domain))
574 return -EAGAIN; 407 return -EAGAIN;
575 } 408 }
576 409
577 genpd->status = GPD_STATE_BUSY;
578 genpd->poweroff_task = current;
579
580 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
581 ret = atomic_read(&genpd->sd_count) == 0 ?
582 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
583
584 if (genpd_abort_poweroff(genpd))
585 goto out;
586
587 if (ret) {
588 genpd_set_active(genpd);
589 goto out;
590 }
591
592 if (genpd->status == GPD_STATE_REPEAT) {
593 genpd->poweroff_task = NULL;
594 goto start;
595 }
596 }
597
598 if (genpd->cpuidle_data) { 410 if (genpd->cpuidle_data) {
599 /* 411 /*
600 * If cpuidle_data is set, cpuidle should turn the domain off 412 * If cpuidle_data is set, cpuidle should turn the domain off
@@ -607,14 +419,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
607 cpuidle_pause_and_lock(); 419 cpuidle_pause_and_lock();
608 genpd->cpuidle_data->idle_state->disabled = false; 420 genpd->cpuidle_data->idle_state->disabled = false;
609 cpuidle_resume_and_unlock(); 421 cpuidle_resume_and_unlock();
610 goto out; 422 return 0;
611 } 423 }
612 424
613 if (genpd->power_off) { 425 if (genpd->power_off) {
614 if (atomic_read(&genpd->sd_count) > 0) { 426 int ret;
615 ret = -EBUSY; 427
616 goto out; 428 if (atomic_read(&genpd->sd_count) > 0)
617 } 429 return -EBUSY;
618 430
619 /* 431 /*
620 * If sd_count > 0 at this point, one of the subdomains hasn't 432 * If sd_count > 0 at this point, one of the subdomains hasn't
@@ -625,10 +437,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
625 * happen very often). 437 * happen very often).
626 */ 438 */
627 ret = genpd_power_off(genpd, true); 439 ret = genpd_power_off(genpd, true);
628 if (ret == -EBUSY) { 440 if (ret)
629 genpd_set_active(genpd); 441 return ret;
630 goto out;
631 }
632 } 442 }
633 443
634 genpd->status = GPD_STATE_POWER_OFF; 444 genpd->status = GPD_STATE_POWER_OFF;
@@ -638,10 +448,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
638 genpd_queue_power_off_work(link->master); 448 genpd_queue_power_off_work(link->master);
639 } 449 }
640 450
641 out: 451 return 0;
642 genpd->poweroff_task = NULL;
643 wake_up_all(&genpd->status_wait_queue);
644 return ret;
645} 452}
646 453
647/** 454/**
@@ -654,9 +461,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
654 461
655 genpd = container_of(work, struct generic_pm_domain, power_off_work); 462 genpd = container_of(work, struct generic_pm_domain, power_off_work);
656 463
657 genpd_acquire_lock(genpd); 464 mutex_lock(&genpd->lock);
658 pm_genpd_poweroff(genpd); 465 pm_genpd_poweroff(genpd);
659 genpd_release_lock(genpd); 466 mutex_unlock(&genpd->lock);
660} 467}
661 468
662/** 469/**
@@ -670,7 +477,6 @@ static void genpd_power_off_work_fn(struct work_struct *work)
670static int pm_genpd_runtime_suspend(struct device *dev) 477static int pm_genpd_runtime_suspend(struct device *dev)
671{ 478{
672 struct generic_pm_domain *genpd; 479 struct generic_pm_domain *genpd;
673 struct generic_pm_domain_data *gpd_data;
674 bool (*stop_ok)(struct device *__dev); 480 bool (*stop_ok)(struct device *__dev);
675 int ret; 481 int ret;
676 482
@@ -684,10 +490,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
684 if (stop_ok && !stop_ok(dev)) 490 if (stop_ok && !stop_ok(dev))
685 return -EBUSY; 491 return -EBUSY;
686 492
687 ret = genpd_stop_dev(genpd, dev); 493 ret = genpd_save_dev(genpd, dev);
688 if (ret) 494 if (ret)
689 return ret; 495 return ret;
690 496
497 ret = genpd_stop_dev(genpd, dev);
498 if (ret) {
499 genpd_restore_dev(genpd, dev, true);
500 return ret;
501 }
502
691 /* 503 /*
692 * If power.irq_safe is set, this routine will be run with interrupts 504 * If power.irq_safe is set, this routine will be run with interrupts
693 * off, so it can't use mutexes. 505 * off, so it can't use mutexes.
@@ -696,16 +508,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
696 return 0; 508 return 0;
697 509
698 mutex_lock(&genpd->lock); 510 mutex_lock(&genpd->lock);
699
700 /*
701 * If we have an unknown state of the need_restore flag, it means none
702 * of the runtime PM callbacks has been invoked yet. Let's update the
703 * flag to reflect that the current state is active.
704 */
705 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
706 if (gpd_data->need_restore < 0)
707 gpd_data->need_restore = 0;
708
709 genpd->in_progress++; 511 genpd->in_progress++;
710 pm_genpd_poweroff(genpd); 512 pm_genpd_poweroff(genpd);
711 genpd->in_progress--; 513 genpd->in_progress--;
@@ -725,8 +527,8 @@ static int pm_genpd_runtime_suspend(struct device *dev)
725static int pm_genpd_runtime_resume(struct device *dev) 527static int pm_genpd_runtime_resume(struct device *dev)
726{ 528{
727 struct generic_pm_domain *genpd; 529 struct generic_pm_domain *genpd;
728 DEFINE_WAIT(wait);
729 int ret; 530 int ret;
531 bool timed = true;
730 532
731 dev_dbg(dev, "%s()\n", __func__); 533 dev_dbg(dev, "%s()\n", __func__);
732 534
@@ -735,39 +537,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
735 return -EINVAL; 537 return -EINVAL;
736 538
737 /* If power.irq_safe, the PM domain is never powered off. */ 539 /* If power.irq_safe, the PM domain is never powered off. */
738 if (dev->power.irq_safe) 540 if (dev->power.irq_safe) {
739 return genpd_start_dev_no_timing(genpd, dev); 541 timed = false;
542 goto out;
543 }
740 544
741 mutex_lock(&genpd->lock); 545 mutex_lock(&genpd->lock);
742 ret = __pm_genpd_poweron(genpd); 546 ret = __pm_genpd_poweron(genpd);
743 if (ret) { 547 mutex_unlock(&genpd->lock);
744 mutex_unlock(&genpd->lock);
745 return ret;
746 }
747 genpd->status = GPD_STATE_BUSY;
748 genpd->resume_count++;
749 for (;;) {
750 prepare_to_wait(&genpd->status_wait_queue, &wait,
751 TASK_UNINTERRUPTIBLE);
752 /*
753 * If current is the powering off task, we have been called
754 * reentrantly from one of the device callbacks, so we should
755 * not wait.
756 */
757 if (!genpd->poweroff_task || genpd->poweroff_task == current)
758 break;
759 mutex_unlock(&genpd->lock);
760 548
761 schedule(); 549 if (ret)
550 return ret;
762 551
763 mutex_lock(&genpd->lock); 552 out:
764 } 553 genpd_start_dev(genpd, dev, timed);
765 finish_wait(&genpd->status_wait_queue, &wait); 554 genpd_restore_dev(genpd, dev, timed);
766 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
767 genpd->resume_count--;
768 genpd_set_active(genpd);
769 wake_up_all(&genpd->status_wait_queue);
770 mutex_unlock(&genpd->lock);
771 555
772 return 0; 556 return 0;
773} 557}
@@ -883,7 +667,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
883{ 667{
884 struct gpd_link *link; 668 struct gpd_link *link;
885 669
886 if (genpd->status != GPD_STATE_POWER_OFF) 670 if (genpd->status == GPD_STATE_ACTIVE)
887 return; 671 return;
888 672
889 list_for_each_entry(link, &genpd->slave_links, slave_node) { 673 list_for_each_entry(link, &genpd->slave_links, slave_node) {
@@ -960,14 +744,14 @@ static int pm_genpd_prepare(struct device *dev)
960 if (resume_needed(dev, genpd)) 744 if (resume_needed(dev, genpd))
961 pm_runtime_resume(dev); 745 pm_runtime_resume(dev);
962 746
963 genpd_acquire_lock(genpd); 747 mutex_lock(&genpd->lock);
964 748
965 if (genpd->prepared_count++ == 0) { 749 if (genpd->prepared_count++ == 0) {
966 genpd->suspended_count = 0; 750 genpd->suspended_count = 0;
967 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 751 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
968 } 752 }
969 753
970 genpd_release_lock(genpd); 754 mutex_unlock(&genpd->lock);
971 755
972 if (genpd->suspend_power_off) { 756 if (genpd->suspend_power_off) {
973 pm_runtime_put_noidle(dev); 757 pm_runtime_put_noidle(dev);
@@ -1102,7 +886,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
1102 pm_genpd_sync_poweron(genpd, true); 886 pm_genpd_sync_poweron(genpd, true);
1103 genpd->suspended_count--; 887 genpd->suspended_count--;
1104 888
1105 return genpd_start_dev(genpd, dev); 889 return genpd_start_dev(genpd, dev, true);
1106} 890}
1107 891
1108/** 892/**
@@ -1230,7 +1014,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
1230 if (IS_ERR(genpd)) 1014 if (IS_ERR(genpd))
1231 return -EINVAL; 1015 return -EINVAL;
1232 1016
1233 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1017 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
1234} 1018}
1235 1019
1236/** 1020/**
@@ -1324,7 +1108,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
1324 1108
1325 pm_genpd_sync_poweron(genpd, true); 1109 pm_genpd_sync_poweron(genpd, true);
1326 1110
1327 return genpd_start_dev(genpd, dev); 1111 return genpd_start_dev(genpd, dev, true);
1328} 1112}
1329 1113
1330/** 1114/**
@@ -1440,7 +1224,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1440 gpd_data->td = *td; 1224 gpd_data->td = *td;
1441 1225
1442 gpd_data->base.dev = dev; 1226 gpd_data->base.dev = dev;
1443 gpd_data->need_restore = -1;
1444 gpd_data->td.constraint_changed = true; 1227 gpd_data->td.constraint_changed = true;
1445 gpd_data->td.effective_constraint_ns = -1; 1228 gpd_data->td.effective_constraint_ns = -1;
1446 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1229 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
@@ -1502,7 +1285,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1502 if (IS_ERR(gpd_data)) 1285 if (IS_ERR(gpd_data))
1503 return PTR_ERR(gpd_data); 1286 return PTR_ERR(gpd_data);
1504 1287
1505 genpd_acquire_lock(genpd); 1288 mutex_lock(&genpd->lock);
1506 1289
1507 if (genpd->prepared_count > 0) { 1290 if (genpd->prepared_count > 0) {
1508 ret = -EAGAIN; 1291 ret = -EAGAIN;
@@ -1519,7 +1302,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1519 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1302 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1520 1303
1521 out: 1304 out:
1522 genpd_release_lock(genpd); 1305 mutex_unlock(&genpd->lock);
1523 1306
1524 if (ret) 1307 if (ret)
1525 genpd_free_dev_data(dev, gpd_data); 1308 genpd_free_dev_data(dev, gpd_data);
@@ -1563,7 +1346,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1563 gpd_data = to_gpd_data(pdd); 1346 gpd_data = to_gpd_data(pdd);
1564 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1347 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1565 1348
1566 genpd_acquire_lock(genpd); 1349 mutex_lock(&genpd->lock);
1567 1350
1568 if (genpd->prepared_count > 0) { 1351 if (genpd->prepared_count > 0) {
1569 ret = -EAGAIN; 1352 ret = -EAGAIN;
@@ -1578,14 +1361,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1578 1361
1579 list_del_init(&pdd->list_node); 1362 list_del_init(&pdd->list_node);
1580 1363
1581 genpd_release_lock(genpd); 1364 mutex_unlock(&genpd->lock);
1582 1365
1583 genpd_free_dev_data(dev, gpd_data); 1366 genpd_free_dev_data(dev, gpd_data);
1584 1367
1585 return 0; 1368 return 0;
1586 1369
1587 out: 1370 out:
1588 genpd_release_lock(genpd); 1371 mutex_unlock(&genpd->lock);
1589 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1372 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1590 1373
1591 return ret; 1374 return ret;
@@ -1606,17 +1389,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1606 || genpd == subdomain) 1389 || genpd == subdomain)
1607 return -EINVAL; 1390 return -EINVAL;
1608 1391
1609 start: 1392 mutex_lock(&genpd->lock);
1610 genpd_acquire_lock(genpd);
1611 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1393 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1612 1394
1613 if (subdomain->status != GPD_STATE_POWER_OFF
1614 && subdomain->status != GPD_STATE_ACTIVE) {
1615 mutex_unlock(&subdomain->lock);
1616 genpd_release_lock(genpd);
1617 goto start;
1618 }
1619
1620 if (genpd->status == GPD_STATE_POWER_OFF 1395 if (genpd->status == GPD_STATE_POWER_OFF
1621 && subdomain->status != GPD_STATE_POWER_OFF) { 1396 && subdomain->status != GPD_STATE_POWER_OFF) {
1622 ret = -EINVAL; 1397 ret = -EINVAL;
@@ -1644,7 +1419,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1644 1419
1645 out: 1420 out:
1646 mutex_unlock(&subdomain->lock); 1421 mutex_unlock(&subdomain->lock);
1647 genpd_release_lock(genpd); 1422 mutex_unlock(&genpd->lock);
1648 1423
1649 return ret; 1424 return ret;
1650} 1425}
@@ -1692,8 +1467,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1692 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1467 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1693 return -EINVAL; 1468 return -EINVAL;
1694 1469
1695 start: 1470 mutex_lock(&genpd->lock);
1696 genpd_acquire_lock(genpd);
1697 1471
1698 list_for_each_entry(link, &genpd->master_links, master_node) { 1472 list_for_each_entry(link, &genpd->master_links, master_node) {
1699 if (link->slave != subdomain) 1473 if (link->slave != subdomain)
@@ -1701,13 +1475,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1701 1475
1702 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1476 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1703 1477
1704 if (subdomain->status != GPD_STATE_POWER_OFF
1705 && subdomain->status != GPD_STATE_ACTIVE) {
1706 mutex_unlock(&subdomain->lock);
1707 genpd_release_lock(genpd);
1708 goto start;
1709 }
1710
1711 list_del(&link->master_node); 1478 list_del(&link->master_node);
1712 list_del(&link->slave_node); 1479 list_del(&link->slave_node);
1713 kfree(link); 1480 kfree(link);
@@ -1720,7 +1487,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1720 break; 1487 break;
1721 } 1488 }
1722 1489
1723 genpd_release_lock(genpd); 1490 mutex_unlock(&genpd->lock);
1724 1491
1725 return ret; 1492 return ret;
1726} 1493}
@@ -1744,7 +1511,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1744 if (IS_ERR_OR_NULL(genpd) || state < 0) 1511 if (IS_ERR_OR_NULL(genpd) || state < 0)
1745 return -EINVAL; 1512 return -EINVAL;
1746 1513
1747 genpd_acquire_lock(genpd); 1514 mutex_lock(&genpd->lock);
1748 1515
1749 if (genpd->cpuidle_data) { 1516 if (genpd->cpuidle_data) {
1750 ret = -EEXIST; 1517 ret = -EEXIST;
@@ -1775,7 +1542,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1775 genpd_recalc_cpu_exit_latency(genpd); 1542 genpd_recalc_cpu_exit_latency(genpd);
1776 1543
1777 out: 1544 out:
1778 genpd_release_lock(genpd); 1545 mutex_unlock(&genpd->lock);
1779 return ret; 1546 return ret;
1780 1547
1781 err: 1548 err:
@@ -1812,7 +1579,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1812 if (IS_ERR_OR_NULL(genpd)) 1579 if (IS_ERR_OR_NULL(genpd))
1813 return -EINVAL; 1580 return -EINVAL;
1814 1581
1815 genpd_acquire_lock(genpd); 1582 mutex_lock(&genpd->lock);
1816 1583
1817 cpuidle_data = genpd->cpuidle_data; 1584 cpuidle_data = genpd->cpuidle_data;
1818 if (!cpuidle_data) { 1585 if (!cpuidle_data) {
@@ -1830,7 +1597,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1830 kfree(cpuidle_data); 1597 kfree(cpuidle_data);
1831 1598
1832 out: 1599 out:
1833 genpd_release_lock(genpd); 1600 mutex_unlock(&genpd->lock);
1834 return ret; 1601 return ret;
1835} 1602}
1836 1603
@@ -1912,9 +1679,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1912 genpd->in_progress = 0; 1679 genpd->in_progress = 0;
1913 atomic_set(&genpd->sd_count, 0); 1680 atomic_set(&genpd->sd_count, 0);
1914 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1681 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1915 init_waitqueue_head(&genpd->status_wait_queue);
1916 genpd->poweroff_task = NULL;
1917 genpd->resume_count = 0;
1918 genpd->device_count = 0; 1682 genpd->device_count = 0;
1919 genpd->max_off_time_ns = -1; 1683 genpd->max_off_time_ns = -1;
1920 genpd->max_off_time_changed = true; 1684 genpd->max_off_time_changed = true;
@@ -1952,6 +1716,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1952 list_add(&genpd->gpd_list_node, &gpd_list); 1716 list_add(&genpd->gpd_list_node, &gpd_list);
1953 mutex_unlock(&gpd_list_lock); 1717 mutex_unlock(&gpd_list_lock);
1954} 1718}
1719EXPORT_SYMBOL_GPL(pm_genpd_init);
1955 1720
1956#ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1721#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1957/* 1722/*
@@ -2125,7 +1890,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2125 1890
2126/** 1891/**
2127 * genpd_dev_pm_detach - Detach a device from its PM domain. 1892 * genpd_dev_pm_detach - Detach a device from its PM domain.
2128 * @dev: Device to attach. 1893 * @dev: Device to detach.
2129 * @power_off: Currently not used 1894 * @power_off: Currently not used
2130 * 1895 *
2131 * Try to locate a corresponding generic PM domain, which the device was 1896 * Try to locate a corresponding generic PM domain, which the device was
@@ -2183,7 +1948,10 @@ static void genpd_dev_pm_sync(struct device *dev)
2183 * Both generic and legacy Samsung-specific DT bindings are supported to keep 1948 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2184 * backwards compatibility with existing DTBs. 1949 * backwards compatibility with existing DTBs.
2185 * 1950 *
2186 * Returns 0 on successfully attached PM domain or negative error code. 1951 * Returns 0 on successfully attached PM domain or negative error code. Note
1952 * that if a power-domain exists for the device, but it cannot be found or
1953 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1954 * probed and to re-try again later.
2187 */ 1955 */
2188int genpd_dev_pm_attach(struct device *dev) 1956int genpd_dev_pm_attach(struct device *dev)
2189{ 1957{
@@ -2220,7 +1988,7 @@ int genpd_dev_pm_attach(struct device *dev)
2220 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1988 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2221 __func__, PTR_ERR(pd)); 1989 __func__, PTR_ERR(pd));
2222 of_node_put(dev->of_node); 1990 of_node_put(dev->of_node);
2223 return PTR_ERR(pd); 1991 return -EPROBE_DEFER;
2224 } 1992 }
2225 1993
2226 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 1994 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
@@ -2238,14 +2006,15 @@ int genpd_dev_pm_attach(struct device *dev)
2238 dev_err(dev, "failed to add to PM domain %s: %d", 2006 dev_err(dev, "failed to add to PM domain %s: %d",
2239 pd->name, ret); 2007 pd->name, ret);
2240 of_node_put(dev->of_node); 2008 of_node_put(dev->of_node);
2241 return ret; 2009 goto out;
2242 } 2010 }
2243 2011
2244 dev->pm_domain->detach = genpd_dev_pm_detach; 2012 dev->pm_domain->detach = genpd_dev_pm_detach;
2245 dev->pm_domain->sync = genpd_dev_pm_sync; 2013 dev->pm_domain->sync = genpd_dev_pm_sync;
2246 pm_genpd_poweron(pd); 2014 ret = pm_genpd_poweron(pd);
2247 2015
2248 return 0; 2016out:
2017 return ret ? -EPROBE_DEFER : 0;
2249} 2018}
2250EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2019EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2251#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2020#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -2293,9 +2062,6 @@ static int pm_genpd_summary_one(struct seq_file *s,
2293{ 2062{
2294 static const char * const status_lookup[] = { 2063 static const char * const status_lookup[] = {
2295 [GPD_STATE_ACTIVE] = "on", 2064 [GPD_STATE_ACTIVE] = "on",
2296 [GPD_STATE_WAIT_MASTER] = "wait-master",
2297 [GPD_STATE_BUSY] = "busy",
2298 [GPD_STATE_REPEAT] = "off-in-progress",
2299 [GPD_STATE_POWER_OFF] = "off" 2065 [GPD_STATE_POWER_OFF] = "off"
2300 }; 2066 };
2301 struct pm_domain_data *pm_data; 2067 struct pm_domain_data *pm_data;
@@ -2309,7 +2075,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
2309 2075
2310 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 2076 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2311 goto exit; 2077 goto exit;
2312 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); 2078 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
2313 2079
2314 /* 2080 /*
2315 * Modifications on the list require holding locks on both 2081 * Modifications on the list require holding locks on both
@@ -2344,8 +2110,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
2344 struct generic_pm_domain *genpd; 2110 struct generic_pm_domain *genpd;
2345 int ret = 0; 2111 int ret = 0;
2346 2112
2347 seq_puts(s, " domain status slaves\n"); 2113 seq_puts(s, "domain status slaves\n");
2348 seq_puts(s, " /device runtime status\n"); 2114 seq_puts(s, " /device runtime status\n");
2349 seq_puts(s, "----------------------------------------------------------------------\n"); 2115 seq_puts(s, "----------------------------------------------------------------------\n");
2350 2116
2351 ret = mutex_lock_interruptible(&gpd_list_lock); 2117 ret = mutex_lock_interruptible(&gpd_list_lock);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 30b7bbfdc558..1710c26ba097 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1377 if (dev->power.direct_complete) { 1377 if (dev->power.direct_complete) {
1378 if (pm_runtime_status_suspended(dev)) { 1378 if (pm_runtime_status_suspended(dev)) {
1379 pm_runtime_disable(dev); 1379 pm_runtime_disable(dev);
1380 if (pm_runtime_suspended_if_enabled(dev)) 1380 if (pm_runtime_status_suspended(dev))
1381 goto Complete; 1381 goto Complete;
1382 1382
1383 pm_runtime_enable(dev); 1383 pm_runtime_enable(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 3b188f20b43f..eb254497a494 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -11,6 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/cpu.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/err.h> 17#include <linux/err.h>
@@ -51,10 +52,17 @@
51 * order. 52 * order.
52 * @dynamic: not-created from static DT entries. 53 * @dynamic: not-created from static DT entries.
53 * @available: true/false - marks if this OPP as available or not 54 * @available: true/false - marks if this OPP as available or not
55 * @turbo: true if turbo (boost) OPP
54 * @rate: Frequency in hertz 56 * @rate: Frequency in hertz
55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 57 * @u_volt: Target voltage in microvolts corresponding to this OPP
58 * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
59 * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
60 * @u_amp: Maximum current drawn by the device in microamperes
61 * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
62 * frequency from any other OPP's frequency.
56 * @dev_opp: points back to the device_opp struct this opp belongs to 63 * @dev_opp: points back to the device_opp struct this opp belongs to
57 * @rcu_head: RCU callback head used for deferred freeing 64 * @rcu_head: RCU callback head used for deferred freeing
65 * @np: OPP's device node.
58 * 66 *
59 * This structure stores the OPP information for a given device. 67 * This structure stores the OPP information for a given device.
60 */ 68 */
@@ -63,11 +71,34 @@ struct dev_pm_opp {
63 71
64 bool available; 72 bool available;
65 bool dynamic; 73 bool dynamic;
74 bool turbo;
66 unsigned long rate; 75 unsigned long rate;
76
67 unsigned long u_volt; 77 unsigned long u_volt;
78 unsigned long u_volt_min;
79 unsigned long u_volt_max;
80 unsigned long u_amp;
81 unsigned long clock_latency_ns;
68 82
69 struct device_opp *dev_opp; 83 struct device_opp *dev_opp;
70 struct rcu_head rcu_head; 84 struct rcu_head rcu_head;
85
86 struct device_node *np;
87};
88
89/**
90 * struct device_list_opp - devices managed by 'struct device_opp'
91 * @node: list node
92 * @dev: device to which the struct object belongs
93 * @rcu_head: RCU callback head used for deferred freeing
94 *
95 * This is an internal data structure maintaining the list of devices that are
96 * managed by 'struct device_opp'.
97 */
98struct device_list_opp {
99 struct list_head node;
100 const struct device *dev;
101 struct rcu_head rcu_head;
71}; 102};
72 103
73/** 104/**
@@ -77,10 +108,12 @@ struct dev_pm_opp {
77 * list. 108 * list.
78 * RCU usage: nodes are not modified in the list of device_opp, 109 * RCU usage: nodes are not modified in the list of device_opp,
79 * however addition is possible and is secured by dev_opp_list_lock 110 * however addition is possible and is secured by dev_opp_list_lock
80 * @dev: device pointer
81 * @srcu_head: notifier head to notify the OPP availability changes. 111 * @srcu_head: notifier head to notify the OPP availability changes.
82 * @rcu_head: RCU callback head used for deferred freeing 112 * @rcu_head: RCU callback head used for deferred freeing
113 * @dev_list: list of devices that share these OPPs
83 * @opp_list: list of opps 114 * @opp_list: list of opps
115 * @np: struct device_node pointer for opp's DT node.
116 * @shared_opp: OPP is shared between multiple devices.
84 * 117 *
85 * This is an internal data structure maintaining the link to opps attached to 118 * This is an internal data structure maintaining the link to opps attached to
86 * a device. This structure is not meant to be shared to users as it is 119 * a device. This structure is not meant to be shared to users as it is
@@ -93,10 +126,15 @@ struct dev_pm_opp {
93struct device_opp { 126struct device_opp {
94 struct list_head node; 127 struct list_head node;
95 128
96 struct device *dev;
97 struct srcu_notifier_head srcu_head; 129 struct srcu_notifier_head srcu_head;
98 struct rcu_head rcu_head; 130 struct rcu_head rcu_head;
131 struct list_head dev_list;
99 struct list_head opp_list; 132 struct list_head opp_list;
133
134 struct device_node *np;
135 unsigned long clock_latency_ns_max;
136 bool shared_opp;
137 struct dev_pm_opp *suspend_opp;
100}; 138};
101 139
102/* 140/*
@@ -116,6 +154,38 @@ do { \
116 "dev_opp_list_lock protection"); \ 154 "dev_opp_list_lock protection"); \
117} while (0) 155} while (0)
118 156
157static struct device_list_opp *_find_list_dev(const struct device *dev,
158 struct device_opp *dev_opp)
159{
160 struct device_list_opp *list_dev;
161
162 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
163 if (list_dev->dev == dev)
164 return list_dev;
165
166 return NULL;
167}
168
169static struct device_opp *_managed_opp(const struct device_node *np)
170{
171 struct device_opp *dev_opp;
172
173 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
174 if (dev_opp->np == np) {
175 /*
176 * Multiple devices can point to the same OPP table and
177 * so will have same node-pointer, np.
178 *
179 * But the OPPs will be considered as shared only if the
180 * OPP table contains a "opp-shared" property.
181 */
182 return dev_opp->shared_opp ? dev_opp : NULL;
183 }
184 }
185
186 return NULL;
187}
188
119/** 189/**
120 * _find_device_opp() - find device_opp struct using device pointer 190 * _find_device_opp() - find device_opp struct using device pointer
121 * @dev: device pointer used to lookup device OPPs 191 * @dev: device pointer used to lookup device OPPs
@@ -132,21 +202,18 @@ do { \
132 */ 202 */
133static struct device_opp *_find_device_opp(struct device *dev) 203static struct device_opp *_find_device_opp(struct device *dev)
134{ 204{
135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 205 struct device_opp *dev_opp;
136 206
137 if (unlikely(IS_ERR_OR_NULL(dev))) { 207 if (IS_ERR_OR_NULL(dev)) {
138 pr_err("%s: Invalid parameters\n", __func__); 208 pr_err("%s: Invalid parameters\n", __func__);
139 return ERR_PTR(-EINVAL); 209 return ERR_PTR(-EINVAL);
140 } 210 }
141 211
142 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { 212 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
143 if (tmp_dev_opp->dev == dev) { 213 if (_find_list_dev(dev, dev_opp))
144 dev_opp = tmp_dev_opp; 214 return dev_opp;
145 break;
146 }
147 }
148 215
149 return dev_opp; 216 return ERR_PTR(-ENODEV);
150} 217}
151 218
152/** 219/**
@@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
172 opp_rcu_lockdep_assert(); 239 opp_rcu_lockdep_assert();
173 240
174 tmp_opp = rcu_dereference(opp); 241 tmp_opp = rcu_dereference(opp);
175 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 242 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
176 pr_err("%s: Invalid parameters\n", __func__); 243 pr_err("%s: Invalid parameters\n", __func__);
177 else 244 else
178 v = tmp_opp->u_volt; 245 v = tmp_opp->u_volt;
@@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
204 opp_rcu_lockdep_assert(); 271 opp_rcu_lockdep_assert();
205 272
206 tmp_opp = rcu_dereference(opp); 273 tmp_opp = rcu_dereference(opp);
207 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 274 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
208 pr_err("%s: Invalid parameters\n", __func__); 275 pr_err("%s: Invalid parameters\n", __func__);
209 else 276 else
210 f = tmp_opp->rate; 277 f = tmp_opp->rate;
@@ -214,6 +281,66 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
214EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 281EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
215 282
216/** 283/**
284 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
285 * @opp: opp for which turbo mode is being verified
286 *
287 * Turbo OPPs are not for normal use, and can be enabled (under certain
288 * conditions) for short duration of times to finish high throughput work
289 * quickly. Running on them for longer times may overheat the chip.
290 *
291 * Return: true if opp is turbo opp, else false.
292 *
293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294 * protected pointer. This means that opp which could have been fetched by
295 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
296 * under RCU lock. The pointer returned by the opp_find_freq family must be
297 * used in the same section as the usage of this function with the pointer
298 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
299 * pointer.
300 */
301bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
302{
303 struct dev_pm_opp *tmp_opp;
304
305 opp_rcu_lockdep_assert();
306
307 tmp_opp = rcu_dereference(opp);
308 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
309 pr_err("%s: Invalid parameters\n", __func__);
310 return false;
311 }
312
313 return tmp_opp->turbo;
314}
315EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
316
317/**
318 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
319 * @dev: device for which we do this operation
320 *
321 * Return: This function returns the max clock latency in nanoseconds.
322 *
323 * Locking: This function takes rcu_read_lock().
324 */
325unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
326{
327 struct device_opp *dev_opp;
328 unsigned long clock_latency_ns;
329
330 rcu_read_lock();
331
332 dev_opp = _find_device_opp(dev);
333 if (IS_ERR(dev_opp))
334 clock_latency_ns = 0;
335 else
336 clock_latency_ns = dev_opp->clock_latency_ns_max;
337
338 rcu_read_unlock();
339 return clock_latency_ns;
340}
341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
342
343/**
217 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 344 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
218 * @dev: device for which we do this operation 345 * @dev: device for which we do this operation
219 * 346 *
@@ -407,18 +534,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
407} 534}
408EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 535EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
409 536
537/* List-dev Helpers */
538static void _kfree_list_dev_rcu(struct rcu_head *head)
539{
540 struct device_list_opp *list_dev;
541
542 list_dev = container_of(head, struct device_list_opp, rcu_head);
543 kfree_rcu(list_dev, rcu_head);
544}
545
546static void _remove_list_dev(struct device_list_opp *list_dev,
547 struct device_opp *dev_opp)
548{
549 list_del(&list_dev->node);
550 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
551 _kfree_list_dev_rcu);
552}
553
554static struct device_list_opp *_add_list_dev(const struct device *dev,
555 struct device_opp *dev_opp)
556{
557 struct device_list_opp *list_dev;
558
559 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
560 if (!list_dev)
561 return NULL;
562
563 /* Initialize list-dev */
564 list_dev->dev = dev;
565 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
566
567 return list_dev;
568}
569
410/** 570/**
411 * _add_device_opp() - Allocate a new device OPP table 571 * _add_device_opp() - Find device OPP table or allocate a new one
412 * @dev: device for which we do this operation 572 * @dev: device for which we do this operation
413 * 573 *
414 * New device node which uses OPPs - used when multiple devices with OPP tables 574 * It tries to find an existing table first, if it couldn't find one, it
415 * are maintained. 575 * allocates a new OPP table and returns that.
416 * 576 *
417 * Return: valid device_opp pointer if success, else NULL. 577 * Return: valid device_opp pointer if success, else NULL.
418 */ 578 */
419static struct device_opp *_add_device_opp(struct device *dev) 579static struct device_opp *_add_device_opp(struct device *dev)
420{ 580{
421 struct device_opp *dev_opp; 581 struct device_opp *dev_opp;
582 struct device_list_opp *list_dev;
583
584 /* Check for existing list for 'dev' first */
585 dev_opp = _find_device_opp(dev);
586 if (!IS_ERR(dev_opp))
587 return dev_opp;
422 588
423 /* 589 /*
424 * Allocate a new device OPP table. In the infrequent case where a new 590 * Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +594,14 @@ static struct device_opp *_add_device_opp(struct device *dev)
428 if (!dev_opp) 594 if (!dev_opp)
429 return NULL; 595 return NULL;
430 596
431 dev_opp->dev = dev; 597 INIT_LIST_HEAD(&dev_opp->dev_list);
598
599 list_dev = _add_list_dev(dev, dev_opp);
600 if (!list_dev) {
601 kfree(dev_opp);
602 return NULL;
603 }
604
432 srcu_init_notifier_head(&dev_opp->srcu_head); 605 srcu_init_notifier_head(&dev_opp->srcu_head);
433 INIT_LIST_HEAD(&dev_opp->opp_list); 606 INIT_LIST_HEAD(&dev_opp->opp_list);
434 607
@@ -438,6 +611,185 @@ static struct device_opp *_add_device_opp(struct device *dev)
438} 611}
439 612
440/** 613/**
614 * _kfree_device_rcu() - Free device_opp RCU handler
615 * @head: RCU head
616 */
617static void _kfree_device_rcu(struct rcu_head *head)
618{
619 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
620
621 kfree_rcu(device_opp, rcu_head);
622}
623
624/**
625 * _remove_device_opp() - Removes a device OPP table
626 * @dev_opp: device OPP table to be removed.
627 *
628 * Removes/frees device OPP table it it doesn't contain any OPPs.
629 */
630static void _remove_device_opp(struct device_opp *dev_opp)
631{
632 struct device_list_opp *list_dev;
633
634 if (!list_empty(&dev_opp->opp_list))
635 return;
636
637 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
638 node);
639
640 _remove_list_dev(list_dev, dev_opp);
641
642 /* dev_list must be empty now */
643 WARN_ON(!list_empty(&dev_opp->dev_list));
644
645 list_del_rcu(&dev_opp->node);
646 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
647 _kfree_device_rcu);
648}
649
650/**
651 * _kfree_opp_rcu() - Free OPP RCU handler
652 * @head: RCU head
653 */
654static void _kfree_opp_rcu(struct rcu_head *head)
655{
656 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
657
658 kfree_rcu(opp, rcu_head);
659}
660
661/**
662 * _opp_remove() - Remove an OPP from a table definition
663 * @dev_opp: points back to the device_opp struct this opp belongs to
664 * @opp: pointer to the OPP to remove
665 * @notify: OPP_EVENT_REMOVE notification should be sent or not
666 *
667 * This function removes an opp definition from the opp list.
668 *
669 * Locking: The internal device_opp and opp structures are RCU protected.
670 * It is assumed that the caller holds required mutex for an RCU updater
671 * strategy.
672 */
673static void _opp_remove(struct device_opp *dev_opp,
674 struct dev_pm_opp *opp, bool notify)
675{
676 /*
677 * Notify the changes in the availability of the operable
678 * frequency/voltage list.
679 */
680 if (notify)
681 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
682 list_del_rcu(&opp->node);
683 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
684
685 _remove_device_opp(dev_opp);
686}
687
688/**
689 * dev_pm_opp_remove() - Remove an OPP from OPP list
690 * @dev: device for which we do this operation
691 * @freq: OPP to remove with matching 'freq'
692 *
693 * This function removes an opp from the opp list.
694 *
695 * Locking: The internal device_opp and opp structures are RCU protected.
696 * Hence this function internally uses RCU updater strategy with mutex locks
697 * to keep the integrity of the internal data structures. Callers should ensure
698 * that this function is *NOT* called under RCU protection or in contexts where
699 * mutex cannot be locked.
700 */
701void dev_pm_opp_remove(struct device *dev, unsigned long freq)
702{
703 struct dev_pm_opp *opp;
704 struct device_opp *dev_opp;
705 bool found = false;
706
707 /* Hold our list modification lock here */
708 mutex_lock(&dev_opp_list_lock);
709
710 dev_opp = _find_device_opp(dev);
711 if (IS_ERR(dev_opp))
712 goto unlock;
713
714 list_for_each_entry(opp, &dev_opp->opp_list, node) {
715 if (opp->rate == freq) {
716 found = true;
717 break;
718 }
719 }
720
721 if (!found) {
722 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
723 __func__, freq);
724 goto unlock;
725 }
726
727 _opp_remove(dev_opp, opp, true);
728unlock:
729 mutex_unlock(&dev_opp_list_lock);
730}
731EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
732
733static struct dev_pm_opp *_allocate_opp(struct device *dev,
734 struct device_opp **dev_opp)
735{
736 struct dev_pm_opp *opp;
737
738 /* allocate new OPP node */
739 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
740 if (!opp)
741 return NULL;
742
743 INIT_LIST_HEAD(&opp->node);
744
745 *dev_opp = _add_device_opp(dev);
746 if (!*dev_opp) {
747 kfree(opp);
748 return NULL;
749 }
750
751 return opp;
752}
753
754static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
755 struct device_opp *dev_opp)
756{
757 struct dev_pm_opp *opp;
758 struct list_head *head = &dev_opp->opp_list;
759
760 /*
761 * Insert new OPP in order of increasing frequency and discard if
762 * already present.
763 *
764 * Need to use &dev_opp->opp_list in the condition part of the 'for'
765 * loop, don't replace it with head otherwise it will become an infinite
766 * loop.
767 */
768 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
769 if (new_opp->rate > opp->rate) {
770 head = &opp->node;
771 continue;
772 }
773
774 if (new_opp->rate < opp->rate)
775 break;
776
777 /* Duplicate OPPs */
778 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
779 __func__, opp->rate, opp->u_volt, opp->available,
780 new_opp->rate, new_opp->u_volt, new_opp->available);
781
782 return opp->available && new_opp->u_volt == opp->u_volt ?
783 0 : -EEXIST;
784 }
785
786 new_opp->dev_opp = dev_opp;
787 list_add_rcu(&new_opp->node, head);
788
789 return 0;
790}
791
792/**
441 * _opp_add_dynamic() - Allocate a dynamic OPP. 793 * _opp_add_dynamic() - Allocate a dynamic OPP.
442 * @dev: device for which we do this operation 794 * @dev: device for which we do this operation
443 * @freq: Frequency in Hz for this OPP 795 * @freq: Frequency in Hz for this OPP
@@ -467,64 +819,29 @@ static struct device_opp *_add_device_opp(struct device *dev)
467static int _opp_add_dynamic(struct device *dev, unsigned long freq, 819static int _opp_add_dynamic(struct device *dev, unsigned long freq,
468 long u_volt, bool dynamic) 820 long u_volt, bool dynamic)
469{ 821{
470 struct device_opp *dev_opp = NULL; 822 struct device_opp *dev_opp;
471 struct dev_pm_opp *opp, *new_opp; 823 struct dev_pm_opp *new_opp;
472 struct list_head *head;
473 int ret; 824 int ret;
474 825
475 /* allocate new OPP node */
476 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
477 if (!new_opp)
478 return -ENOMEM;
479
480 /* Hold our list modification lock here */ 826 /* Hold our list modification lock here */
481 mutex_lock(&dev_opp_list_lock); 827 mutex_lock(&dev_opp_list_lock);
482 828
829 new_opp = _allocate_opp(dev, &dev_opp);
830 if (!new_opp) {
831 ret = -ENOMEM;
832 goto unlock;
833 }
834
483 /* populate the opp table */ 835 /* populate the opp table */
484 new_opp->rate = freq; 836 new_opp->rate = freq;
485 new_opp->u_volt = u_volt; 837 new_opp->u_volt = u_volt;
486 new_opp->available = true; 838 new_opp->available = true;
487 new_opp->dynamic = dynamic; 839 new_opp->dynamic = dynamic;
488 840
489 /* Check for existing list for 'dev' */ 841 ret = _opp_add(dev, new_opp, dev_opp);
490 dev_opp = _find_device_opp(dev); 842 if (ret)
491 if (IS_ERR(dev_opp)) {
492 dev_opp = _add_device_opp(dev);
493 if (!dev_opp) {
494 ret = -ENOMEM;
495 goto free_opp;
496 }
497
498 head = &dev_opp->opp_list;
499 goto list_add;
500 }
501
502 /*
503 * Insert new OPP in order of increasing frequency
504 * and discard if already present
505 */
506 head = &dev_opp->opp_list;
507 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
508 if (new_opp->rate <= opp->rate)
509 break;
510 else
511 head = &opp->node;
512 }
513
514 /* Duplicate OPPs ? */
515 if (new_opp->rate == opp->rate) {
516 ret = opp->available && new_opp->u_volt == opp->u_volt ?
517 0 : -EEXIST;
518
519 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
520 __func__, opp->rate, opp->u_volt, opp->available,
521 new_opp->rate, new_opp->u_volt, new_opp->available);
522 goto free_opp; 843 goto free_opp;
523 }
524 844
525list_add:
526 new_opp->dev_opp = dev_opp;
527 list_add_rcu(&new_opp->node, head);
528 mutex_unlock(&dev_opp_list_lock); 845 mutex_unlock(&dev_opp_list_lock);
529 846
530 /* 847 /*
@@ -535,20 +852,52 @@ list_add:
535 return 0; 852 return 0;
536 853
537free_opp: 854free_opp:
855 _opp_remove(dev_opp, new_opp, false);
856unlock:
538 mutex_unlock(&dev_opp_list_lock); 857 mutex_unlock(&dev_opp_list_lock);
539 kfree(new_opp);
540 return ret; 858 return ret;
541} 859}
542 860
861/* TODO: Support multiple regulators */
862static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
863{
864 u32 microvolt[3] = {0};
865 int count, ret;
866
867 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
868 if (!count)
869 return 0;
870
871 /* There can be one or three elements here */
872 if (count != 1 && count != 3) {
873 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
874 __func__, count);
875 return -EINVAL;
876 }
877
878 ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
879 count);
880 if (ret) {
881 dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
882 ret);
883 return -EINVAL;
884 }
885
886 opp->u_volt = microvolt[0];
887 opp->u_volt_min = microvolt[1];
888 opp->u_volt_max = microvolt[2];
889
890 return 0;
891}
892
543/** 893/**
544 * dev_pm_opp_add() - Add an OPP table from a table definitions 894 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
545 * @dev: device for which we do this operation 895 * @dev: device for which we do this operation
546 * @freq: Frequency in Hz for this OPP 896 * @np: device node
547 * @u_volt: Voltage in uVolts for this OPP
548 * 897 *
549 * This function adds an opp definition to the opp list and returns status. 898 * This function adds an opp definition to the opp list and returns status. The
550 * The opp is made available by default and it can be controlled using 899 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
551 * dev_pm_opp_enable/disable functions. 900 * removed by dev_pm_opp_remove.
552 * 901 *
553 * Locking: The internal device_opp and opp structures are RCU protected. 902 * Locking: The internal device_opp and opp structures are RCU protected.
554 * Hence this function internally uses RCU updater strategy with mutex locks 903 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -562,108 +911,119 @@ free_opp:
562 * -EEXIST Freq are same and volt are different OR 911 * -EEXIST Freq are same and volt are different OR
563 * Duplicate OPPs (both freq and volt are same) and !opp->available 912 * Duplicate OPPs (both freq and volt are same) and !opp->available
564 * -ENOMEM Memory allocation failure 913 * -ENOMEM Memory allocation failure
914 * -EINVAL Failed parsing the OPP node
565 */ 915 */
566int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 916static int _opp_add_static_v2(struct device *dev, struct device_node *np)
567{ 917{
568 return _opp_add_dynamic(dev, freq, u_volt, true); 918 struct device_opp *dev_opp;
569} 919 struct dev_pm_opp *new_opp;
570EXPORT_SYMBOL_GPL(dev_pm_opp_add); 920 u64 rate;
921 u32 val;
922 int ret;
571 923
572/** 924 /* Hold our list modification lock here */
573 * _kfree_opp_rcu() - Free OPP RCU handler 925 mutex_lock(&dev_opp_list_lock);
574 * @head: RCU head
575 */
576static void _kfree_opp_rcu(struct rcu_head *head)
577{
578 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
579 926
580 kfree_rcu(opp, rcu_head); 927 new_opp = _allocate_opp(dev, &dev_opp);
581} 928 if (!new_opp) {
929 ret = -ENOMEM;
930 goto unlock;
931 }
582 932
583/** 933 ret = of_property_read_u64(np, "opp-hz", &rate);
584 * _kfree_device_rcu() - Free device_opp RCU handler 934 if (ret < 0) {
585 * @head: RCU head 935 dev_err(dev, "%s: opp-hz not found\n", __func__);
586 */ 936 goto free_opp;
587static void _kfree_device_rcu(struct rcu_head *head) 937 }
588{
589 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
590 938
591 kfree_rcu(device_opp, rcu_head); 939 /*
592} 940 * Rate is defined as an unsigned long in clk API, and so casting
941 * explicitly to its type. Must be fixed once rate is 64 bit
942 * guaranteed in clk API.
943 */
944 new_opp->rate = (unsigned long)rate;
945 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
946
947 new_opp->np = np;
948 new_opp->dynamic = false;
949 new_opp->available = true;
950
951 if (!of_property_read_u32(np, "clock-latency-ns", &val))
952 new_opp->clock_latency_ns = val;
953
954 ret = opp_get_microvolt(new_opp, dev);
955 if (ret)
956 goto free_opp;
957
958 if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
959 new_opp->u_amp = val;
960
961 ret = _opp_add(dev, new_opp, dev_opp);
962 if (ret)
963 goto free_opp;
964
965 /* OPP to select on device suspend */
966 if (of_property_read_bool(np, "opp-suspend")) {
967 if (dev_opp->suspend_opp)
968 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
969 __func__, dev_opp->suspend_opp->rate,
970 new_opp->rate);
971 else
972 dev_opp->suspend_opp = new_opp;
973 }
974
975 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
976 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
977
978 mutex_unlock(&dev_opp_list_lock);
979
980 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
981 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
982 new_opp->u_volt_min, new_opp->u_volt_max,
983 new_opp->clock_latency_ns);
593 984
594/**
595 * _opp_remove() - Remove an OPP from a table definition
596 * @dev_opp: points back to the device_opp struct this opp belongs to
597 * @opp: pointer to the OPP to remove
598 *
599 * This function removes an opp definition from the opp list.
600 *
601 * Locking: The internal device_opp and opp structures are RCU protected.
602 * It is assumed that the caller holds required mutex for an RCU updater
603 * strategy.
604 */
605static void _opp_remove(struct device_opp *dev_opp,
606 struct dev_pm_opp *opp)
607{
608 /* 985 /*
609 * Notify the changes in the availability of the operable 986 * Notify the changes in the availability of the operable
610 * frequency/voltage list. 987 * frequency/voltage list.
611 */ 988 */
612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 989 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
613 list_del_rcu(&opp->node); 990 return 0;
614 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
615 991
616 if (list_empty(&dev_opp->opp_list)) { 992free_opp:
617 list_del_rcu(&dev_opp->node); 993 _opp_remove(dev_opp, new_opp, false);
618 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 994unlock:
619 _kfree_device_rcu); 995 mutex_unlock(&dev_opp_list_lock);
620 } 996 return ret;
621} 997}
622 998
623/** 999/**
624 * dev_pm_opp_remove() - Remove an OPP from OPP list 1000 * dev_pm_opp_add() - Add an OPP table from a table definitions
625 * @dev: device for which we do this operation 1001 * @dev: device for which we do this operation
626 * @freq: OPP to remove with matching 'freq' 1002 * @freq: Frequency in Hz for this OPP
1003 * @u_volt: Voltage in uVolts for this OPP
627 * 1004 *
628 * This function removes an opp from the opp list. 1005 * This function adds an opp definition to the opp list and returns status.
1006 * The opp is made available by default and it can be controlled using
1007 * dev_pm_opp_enable/disable functions.
629 * 1008 *
630 * Locking: The internal device_opp and opp structures are RCU protected. 1009 * Locking: The internal device_opp and opp structures are RCU protected.
631 * Hence this function internally uses RCU updater strategy with mutex locks 1010 * Hence this function internally uses RCU updater strategy with mutex locks
632 * to keep the integrity of the internal data structures. Callers should ensure 1011 * to keep the integrity of the internal data structures. Callers should ensure
633 * that this function is *NOT* called under RCU protection or in contexts where 1012 * that this function is *NOT* called under RCU protection or in contexts where
634 * mutex cannot be locked. 1013 * mutex cannot be locked.
1014 *
1015 * Return:
1016 * 0 On success OR
1017 * Duplicate OPPs (both freq and volt are same) and opp->available
1018 * -EEXIST Freq are same and volt are different OR
1019 * Duplicate OPPs (both freq and volt are same) and !opp->available
1020 * -ENOMEM Memory allocation failure
635 */ 1021 */
636void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1022int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
637{ 1023{
638 struct dev_pm_opp *opp; 1024 return _opp_add_dynamic(dev, freq, u_volt, true);
639 struct device_opp *dev_opp;
640 bool found = false;
641
642 /* Hold our list modification lock here */
643 mutex_lock(&dev_opp_list_lock);
644
645 dev_opp = _find_device_opp(dev);
646 if (IS_ERR(dev_opp))
647 goto unlock;
648
649 list_for_each_entry(opp, &dev_opp->opp_list, node) {
650 if (opp->rate == freq) {
651 found = true;
652 break;
653 }
654 }
655
656 if (!found) {
657 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
658 __func__, freq);
659 goto unlock;
660 }
661
662 _opp_remove(dev_opp, opp);
663unlock:
664 mutex_unlock(&dev_opp_list_lock);
665} 1025}
666EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1026EXPORT_SYMBOL_GPL(dev_pm_opp_add);
667 1027
668/** 1028/**
669 * _opp_set_availability() - helper to set the availability of an opp 1029 * _opp_set_availability() - helper to set the availability of an opp
@@ -825,28 +1185,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
825 1185
826#ifdef CONFIG_OF 1186#ifdef CONFIG_OF
827/** 1187/**
828 * of_init_opp_table() - Initialize opp table from device tree 1188 * of_free_opp_table() - Free OPP table entries created from static DT entries
829 * @dev: device pointer used to lookup device OPPs. 1189 * @dev: device pointer used to lookup device OPPs.
830 * 1190 *
831 * Register the initial OPP table with the OPP library for given device. 1191 * Free OPPs created using static entries present in DT.
832 * 1192 *
833 * Locking: The internal device_opp and opp structures are RCU protected. 1193 * Locking: The internal device_opp and opp structures are RCU protected.
834 * Hence this function indirectly uses RCU updater strategy with mutex locks 1194 * Hence this function indirectly uses RCU updater strategy with mutex locks
835 * to keep the integrity of the internal data structures. Callers should ensure 1195 * to keep the integrity of the internal data structures. Callers should ensure
836 * that this function is *NOT* called under RCU protection or in contexts where 1196 * that this function is *NOT* called under RCU protection or in contexts where
837 * mutex cannot be locked. 1197 * mutex cannot be locked.
838 *
839 * Return:
840 * 0 On success OR
841 * Duplicate OPPs (both freq and volt are same) and opp->available
842 * -EEXIST Freq are same and volt are different OR
843 * Duplicate OPPs (both freq and volt are same) and !opp->available
844 * -ENOMEM Memory allocation failure
845 * -ENODEV when 'operating-points' property is not found or is invalid data
846 * in device node.
847 * -ENODATA when empty 'operating-points' property is found
848 */ 1198 */
849int of_init_opp_table(struct device *dev) 1199void of_free_opp_table(struct device *dev)
1200{
1201 struct device_opp *dev_opp;
1202 struct dev_pm_opp *opp, *tmp;
1203
1204 /* Hold our list modification lock here */
1205 mutex_lock(&dev_opp_list_lock);
1206
1207 /* Check for existing list for 'dev' */
1208 dev_opp = _find_device_opp(dev);
1209 if (IS_ERR(dev_opp)) {
1210 int error = PTR_ERR(dev_opp);
1211
1212 if (error != -ENODEV)
1213 WARN(1, "%s: dev_opp: %d\n",
1214 IS_ERR_OR_NULL(dev) ?
1215 "Invalid device" : dev_name(dev),
1216 error);
1217 goto unlock;
1218 }
1219
1220 /* Find if dev_opp manages a single device */
1221 if (list_is_singular(&dev_opp->dev_list)) {
1222 /* Free static OPPs */
1223 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1224 if (!opp->dynamic)
1225 _opp_remove(dev_opp, opp, true);
1226 }
1227 } else {
1228 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1229 }
1230
1231unlock:
1232 mutex_unlock(&dev_opp_list_lock);
1233}
1234EXPORT_SYMBOL_GPL(of_free_opp_table);
1235
1236void of_cpumask_free_opp_table(cpumask_var_t cpumask)
1237{
1238 struct device *cpu_dev;
1239 int cpu;
1240
1241 WARN_ON(cpumask_empty(cpumask));
1242
1243 for_each_cpu(cpu, cpumask) {
1244 cpu_dev = get_cpu_device(cpu);
1245 if (!cpu_dev) {
1246 pr_err("%s: failed to get cpu%d device\n", __func__,
1247 cpu);
1248 continue;
1249 }
1250
1251 of_free_opp_table(cpu_dev);
1252 }
1253}
1254EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
1255
1256/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
1257static struct device_node *
1258_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
1259{
1260 struct device_node *opp_np;
1261
1262 opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
1263 if (!opp_np) {
1264 dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
1265 __func__, prop->name);
1266 return ERR_PTR(-EINVAL);
1267 }
1268
1269 return opp_np;
1270}
1271
1272/* Returns opp descriptor node for a device. Caller must do of_node_put() */
1273static struct device_node *_of_get_opp_desc_node(struct device *dev)
1274{
1275 const struct property *prop;
1276
1277 prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
1278 if (!prop)
1279 return ERR_PTR(-ENODEV);
1280 if (!prop->value)
1281 return ERR_PTR(-ENODATA);
1282
1283 /*
1284 * TODO: Support for multiple OPP tables.
1285 *
1286 * There should be only ONE phandle present in "operating-points-v2"
1287 * property.
1288 */
1289 if (prop->length != sizeof(__be32)) {
1290 dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
1291 return ERR_PTR(-EINVAL);
1292 }
1293
1294 return _of_get_opp_desc_node_from_prop(dev, prop);
1295}
1296
1297/* Initializes OPP tables based on new bindings */
1298static int _of_init_opp_table_v2(struct device *dev,
1299 const struct property *prop)
1300{
1301 struct device_node *opp_np, *np;
1302 struct device_opp *dev_opp;
1303 int ret = 0, count = 0;
1304
1305 if (!prop->value)
1306 return -ENODATA;
1307
1308 /* Get opp node */
1309 opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
1310 if (IS_ERR(opp_np))
1311 return PTR_ERR(opp_np);
1312
1313 dev_opp = _managed_opp(opp_np);
1314 if (dev_opp) {
1315 /* OPPs are already managed */
1316 if (!_add_list_dev(dev, dev_opp))
1317 ret = -ENOMEM;
1318 goto put_opp_np;
1319 }
1320
1321 /* We have opp-list node now, iterate over it and add OPPs */
1322 for_each_available_child_of_node(opp_np, np) {
1323 count++;
1324
1325 ret = _opp_add_static_v2(dev, np);
1326 if (ret) {
1327 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1328 ret);
1329 goto free_table;
1330 }
1331 }
1332
1333 /* There should be one of more OPP defined */
1334 if (WARN_ON(!count)) {
1335 ret = -ENOENT;
1336 goto put_opp_np;
1337 }
1338
1339 dev_opp = _find_device_opp(dev);
1340 if (WARN_ON(IS_ERR(dev_opp))) {
1341 ret = PTR_ERR(dev_opp);
1342 goto free_table;
1343 }
1344
1345 dev_opp->np = opp_np;
1346 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1347
1348 of_node_put(opp_np);
1349 return 0;
1350
1351free_table:
1352 of_free_opp_table(dev);
1353put_opp_np:
1354 of_node_put(opp_np);
1355
1356 return ret;
1357}
1358
1359/* Initializes OPP tables based on old-deprecated bindings */
1360static int _of_init_opp_table_v1(struct device *dev)
850{ 1361{
851 const struct property *prop; 1362 const struct property *prop;
852 const __be32 *val; 1363 const __be32 *val;
@@ -881,47 +1392,177 @@ int of_init_opp_table(struct device *dev)
881 1392
882 return 0; 1393 return 0;
883} 1394}
884EXPORT_SYMBOL_GPL(of_init_opp_table);
885 1395
886/** 1396/**
887 * of_free_opp_table() - Free OPP table entries created from static DT entries 1397 * of_init_opp_table() - Initialize opp table from device tree
888 * @dev: device pointer used to lookup device OPPs. 1398 * @dev: device pointer used to lookup device OPPs.
889 * 1399 *
890 * Free OPPs created using static entries present in DT. 1400 * Register the initial OPP table with the OPP library for given device.
891 * 1401 *
892 * Locking: The internal device_opp and opp structures are RCU protected. 1402 * Locking: The internal device_opp and opp structures are RCU protected.
893 * Hence this function indirectly uses RCU updater strategy with mutex locks 1403 * Hence this function indirectly uses RCU updater strategy with mutex locks
894 * to keep the integrity of the internal data structures. Callers should ensure 1404 * to keep the integrity of the internal data structures. Callers should ensure
895 * that this function is *NOT* called under RCU protection or in contexts where 1405 * that this function is *NOT* called under RCU protection or in contexts where
896 * mutex cannot be locked. 1406 * mutex cannot be locked.
1407 *
1408 * Return:
1409 * 0 On success OR
1410 * Duplicate OPPs (both freq and volt are same) and opp->available
1411 * -EEXIST Freq are same and volt are different OR
1412 * Duplicate OPPs (both freq and volt are same) and !opp->available
1413 * -ENOMEM Memory allocation failure
1414 * -ENODEV when 'operating-points' property is not found or is invalid data
1415 * in device node.
1416 * -ENODATA when empty 'operating-points' property is found
1417 * -EINVAL when invalid entries are found in opp-v2 table
897 */ 1418 */
898void of_free_opp_table(struct device *dev) 1419int of_init_opp_table(struct device *dev)
1420{
1421 const struct property *prop;
1422
1423 /*
1424 * OPPs have two version of bindings now. The older one is deprecated,
1425 * try for the new binding first.
1426 */
1427 prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
1428 if (!prop) {
1429 /*
1430 * Try old-deprecated bindings for backward compatibility with
1431 * older dtbs.
1432 */
1433 return _of_init_opp_table_v1(dev);
1434 }
1435
1436 return _of_init_opp_table_v2(dev, prop);
1437}
1438EXPORT_SYMBOL_GPL(of_init_opp_table);
1439
1440int of_cpumask_init_opp_table(cpumask_var_t cpumask)
1441{
1442 struct device *cpu_dev;
1443 int cpu, ret = 0;
1444
1445 WARN_ON(cpumask_empty(cpumask));
1446
1447 for_each_cpu(cpu, cpumask) {
1448 cpu_dev = get_cpu_device(cpu);
1449 if (!cpu_dev) {
1450 pr_err("%s: failed to get cpu%d device\n", __func__,
1451 cpu);
1452 continue;
1453 }
1454
1455 ret = of_init_opp_table(cpu_dev);
1456 if (ret) {
1457 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
1458 __func__, cpu, ret);
1459
1460 /* Free all other OPPs */
1461 of_cpumask_free_opp_table(cpumask);
1462 break;
1463 }
1464 }
1465
1466 return ret;
1467}
1468EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
1469
1470/* Required only for V1 bindings, as v2 can manage it from DT itself */
1471int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
899{ 1472{
1473 struct device_list_opp *list_dev;
900 struct device_opp *dev_opp; 1474 struct device_opp *dev_opp;
901 struct dev_pm_opp *opp, *tmp; 1475 struct device *dev;
1476 int cpu, ret = 0;
902 1477
903 /* Check for existing list for 'dev' */ 1478 rcu_read_lock();
904 dev_opp = _find_device_opp(dev); 1479
1480 dev_opp = _find_device_opp(cpu_dev);
905 if (IS_ERR(dev_opp)) { 1481 if (IS_ERR(dev_opp)) {
906 int error = PTR_ERR(dev_opp); 1482 ret = -EINVAL;
907 if (error != -ENODEV) 1483 goto out_rcu_read_unlock;
908 WARN(1, "%s: dev_opp: %d\n",
909 IS_ERR_OR_NULL(dev) ?
910 "Invalid device" : dev_name(dev),
911 error);
912 return;
913 } 1484 }
914 1485
915 /* Hold our list modification lock here */ 1486 for_each_cpu(cpu, cpumask) {
916 mutex_lock(&dev_opp_list_lock); 1487 if (cpu == cpu_dev->id)
1488 continue;
917 1489
918 /* Free static OPPs */ 1490 dev = get_cpu_device(cpu);
919 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 1491 if (!dev) {
920 if (!opp->dynamic) 1492 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
921 _opp_remove(dev_opp, opp); 1493 __func__, cpu);
1494 continue;
1495 }
1496
1497 list_dev = _add_list_dev(dev, dev_opp);
1498 if (!list_dev) {
1499 dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
1500 __func__, cpu);
1501 continue;
1502 }
922 } 1503 }
1504out_rcu_read_unlock:
1505 rcu_read_unlock();
923 1506
924 mutex_unlock(&dev_opp_list_lock); 1507 return 0;
925} 1508}
926EXPORT_SYMBOL_GPL(of_free_opp_table); 1509EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
1510
1511/*
1512 * Works only for OPP v2 bindings.
1513 *
1514 * cpumask should be already set to mask of cpu_dev->id.
1515 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
1516 */
1517int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
1518{
1519 struct device_node *np, *tmp_np;
1520 struct device *tcpu_dev;
1521 int cpu, ret = 0;
1522
1523 /* Get OPP descriptor node */
1524 np = _of_get_opp_desc_node(cpu_dev);
1525 if (IS_ERR(np)) {
1526 dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
1527 PTR_ERR(np));
1528 return -ENOENT;
1529 }
1530
1531 /* OPPs are shared ? */
1532 if (!of_property_read_bool(np, "opp-shared"))
1533 goto put_cpu_node;
1534
1535 for_each_possible_cpu(cpu) {
1536 if (cpu == cpu_dev->id)
1537 continue;
1538
1539 tcpu_dev = get_cpu_device(cpu);
1540 if (!tcpu_dev) {
1541 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
1542 __func__, cpu);
1543 ret = -ENODEV;
1544 goto put_cpu_node;
1545 }
1546
1547 /* Get OPP descriptor node */
1548 tmp_np = _of_get_opp_desc_node(tcpu_dev);
1549 if (IS_ERR(tmp_np)) {
1550 dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
1551 __func__, PTR_ERR(tmp_np));
1552 ret = PTR_ERR(tmp_np);
1553 goto put_cpu_node;
1554 }
1555
1556 /* CPUs are sharing opp node */
1557 if (np == tmp_np)
1558 cpumask_set_cpu(cpu, cpumask);
1559
1560 of_node_put(tmp_np);
1561 }
1562
1563put_cpu_node:
1564 of_node_put(np);
1565 return ret;
1566}
1567EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
927#endif 1568#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f1a5d95e7b20..998fa6b23084 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
73extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); 73extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
74extern int pm_qos_sysfs_add_flags(struct device *dev); 74extern int pm_qos_sysfs_add_flags(struct device *dev);
75extern void pm_qos_sysfs_remove_flags(struct device *dev); 75extern void pm_qos_sysfs_remove_flags(struct device *dev);
76extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
77extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
76 78
77#else /* CONFIG_PM */ 79#else /* CONFIG_PM */
78 80
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index e56d538d039e..7f3646e459cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
883 mutex_unlock(&dev_pm_qos_mtx); 883 mutex_unlock(&dev_pm_qos_mtx);
884 return ret; 884 return ret;
885} 885}
886
887/**
888 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
889 * @dev: Device whose latency tolerance to expose
890 */
891int dev_pm_qos_expose_latency_tolerance(struct device *dev)
892{
893 int ret;
894
895 if (!dev->power.set_latency_tolerance)
896 return -EINVAL;
897
898 mutex_lock(&dev_pm_qos_sysfs_mtx);
899 ret = pm_qos_sysfs_add_latency_tolerance(dev);
900 mutex_unlock(&dev_pm_qos_sysfs_mtx);
901
902 return ret;
903}
904EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
905
906/**
907 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
908 * @dev: Device whose latency tolerance to hide
909 */
910void dev_pm_qos_hide_latency_tolerance(struct device *dev)
911{
912 mutex_lock(&dev_pm_qos_sysfs_mtx);
913 pm_qos_sysfs_remove_latency_tolerance(dev);
914 mutex_unlock(&dev_pm_qos_sysfs_mtx);
915
916 /* Remove the request from user space now */
917 pm_runtime_get_sync(dev);
918 dev_pm_qos_update_user_latency_tolerance(dev,
919 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
920 pm_runtime_put(dev);
921}
922EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d2be3f9c211c..a7b46798c81d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)
738 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 738 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
739} 739}
740 740
741int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
742{
743 return sysfs_merge_group(&dev->kobj,
744 &pm_qos_latency_tolerance_attr_group);
745}
746
747void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
748{
749 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
750}
751
741void rpm_sysfs_remove(struct device *dev) 752void rpm_sysfs_remove(struct device *dev)
742{ 753{
743 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 754 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f3f6d167f3f1..841b15c5c058 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -27,9 +27,10 @@
27 */ 27 */
28void device_add_property_set(struct device *dev, struct property_set *pset) 28void device_add_property_set(struct device *dev, struct property_set *pset)
29{ 29{
30 if (pset) 30 if (!pset)
31 pset->fwnode.type = FWNODE_PDATA; 31 return;
32 32
33 pset->fwnode.type = FWNODE_PDATA;
33 set_secondary_fwnode(dev, &pset->fwnode); 34 set_secondary_fwnode(dev, &pset->fwnode);
34} 35}
35EXPORT_SYMBOL_GPL(device_add_property_set); 36EXPORT_SYMBOL_GPL(device_add_property_set);
@@ -461,7 +462,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
461 return acpi_dev_prop_read(to_acpi_node(fwnode), propname, 462 return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
462 DEV_PROP_STRING, val, 1); 463 DEV_PROP_STRING, val, 1);
463 464
464 return -ENXIO; 465 return pset_prop_read_array(to_pset(fwnode), propname,
466 DEV_PROP_STRING, val, 1);
465} 467}
466EXPORT_SYMBOL_GPL(fwnode_property_read_string); 468EXPORT_SYMBOL_GPL(fwnode_property_read_string);
467 469
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 24e5c664683f..77aa34eae92c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -130,6 +130,13 @@ config ARM_KIRKWOOD_CPUFREQ
130 This adds the CPUFreq driver for Marvell Kirkwood 130 This adds the CPUFreq driver for Marvell Kirkwood
131 SoCs. 131 SoCs.
132 132
133config ARM_MT8173_CPUFREQ
134 bool "Mediatek MT8173 CPUFreq support"
135 depends on ARCH_MEDIATEK && REGULATOR
136 select PM_OPP
137 help
138 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
139
133config ARM_OMAP2PLUS_CPUFREQ 140config ARM_OMAP2PLUS_CPUFREQ
134 bool "TI OMAP2+" 141 bool "TI OMAP2+"
135 depends on ARCH_OMAP2PLUS 142 depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 032745de8fcc..60a57ca5b22d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
62obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 62obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
63obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o 63obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
64obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o 64obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
65obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
65obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 66obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
66obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o 67obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
67obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o 68obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0136dfcdabf0..15b921a9248c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -65,18 +65,21 @@ enum {
65#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) 65#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
66 66
67struct acpi_cpufreq_data { 67struct acpi_cpufreq_data {
68 struct acpi_processor_performance *acpi_data;
69 struct cpufreq_frequency_table *freq_table; 68 struct cpufreq_frequency_table *freq_table;
70 unsigned int resume; 69 unsigned int resume;
71 unsigned int cpu_feature; 70 unsigned int cpu_feature;
71 unsigned int acpi_perf_cpu;
72 cpumask_var_t freqdomain_cpus; 72 cpumask_var_t freqdomain_cpus;
73}; 73};
74 74
75static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76
77/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
78static struct acpi_processor_performance __percpu *acpi_perf_data; 76static struct acpi_processor_performance __percpu *acpi_perf_data;
79 77
78static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
79{
80 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
81}
82
80static struct cpufreq_driver acpi_cpufreq_driver; 83static struct cpufreq_driver acpi_cpufreq_driver;
81 84
82static unsigned int acpi_pstate_strict; 85static unsigned int acpi_pstate_strict;
@@ -144,7 +147,7 @@ static int _store_boost(int val)
144 147
145static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 148static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
146{ 149{
147 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 150 struct acpi_cpufreq_data *data = policy->driver_data;
148 151
149 return cpufreq_show_cpus(data->freqdomain_cpus, buf); 152 return cpufreq_show_cpus(data->freqdomain_cpus, buf);
150} 153}
@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
202 struct acpi_processor_performance *perf; 205 struct acpi_processor_performance *perf;
203 int i; 206 int i;
204 207
205 perf = data->acpi_data; 208 perf = to_perf_data(data);
206 209
207 for (i = 0; i < perf->state_count; i++) { 210 for (i = 0; i < perf->state_count; i++) {
208 if (value == perf->states[i].status) 211 if (value == perf->states[i].status)
@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
221 else 224 else
222 msr &= INTEL_MSR_RANGE; 225 msr &= INTEL_MSR_RANGE;
223 226
224 perf = data->acpi_data; 227 perf = to_perf_data(data);
225 228
226 cpufreq_for_each_entry(pos, data->freq_table) 229 cpufreq_for_each_entry(pos, data->freq_table)
227 if (msr == perf->states[pos->driver_data].status) 230 if (msr == perf->states[pos->driver_data].status)
@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
327 put_cpu(); 330 put_cpu();
328} 331}
329 332
330static u32 get_cur_val(const struct cpumask *mask) 333static u32
334get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
331{ 335{
332 struct acpi_processor_performance *perf; 336 struct acpi_processor_performance *perf;
333 struct drv_cmd cmd; 337 struct drv_cmd cmd;
@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
335 if (unlikely(cpumask_empty(mask))) 339 if (unlikely(cpumask_empty(mask)))
336 return 0; 340 return 0;
337 341
338 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { 342 switch (data->cpu_feature) {
339 case SYSTEM_INTEL_MSR_CAPABLE: 343 case SYSTEM_INTEL_MSR_CAPABLE:
340 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 344 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
341 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 345 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
346 break; 350 break;
347 case SYSTEM_IO_CAPABLE: 351 case SYSTEM_IO_CAPABLE:
348 cmd.type = SYSTEM_IO_CAPABLE; 352 cmd.type = SYSTEM_IO_CAPABLE;
349 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 353 perf = to_perf_data(data);
350 cmd.addr.io.port = perf->control_register.address; 354 cmd.addr.io.port = perf->control_register.address;
351 cmd.addr.io.bit_width = perf->control_register.bit_width; 355 cmd.addr.io.bit_width = perf->control_register.bit_width;
352 break; 356 break;
@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
364 368
365static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 369static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
366{ 370{
367 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); 371 struct acpi_cpufreq_data *data;
372 struct cpufreq_policy *policy;
368 unsigned int freq; 373 unsigned int freq;
369 unsigned int cached_freq; 374 unsigned int cached_freq;
370 375
371 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); 376 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
372 377
373 if (unlikely(data == NULL || 378 policy = cpufreq_cpu_get(cpu);
374 data->acpi_data == NULL || data->freq_table == NULL)) { 379 if (unlikely(!policy))
375 return 0; 380 return 0;
376 }
377 381
378 cached_freq = data->freq_table[data->acpi_data->state].frequency; 382 data = policy->driver_data;
379 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); 383 cpufreq_cpu_put(policy);
384 if (unlikely(!data || !data->freq_table))
385 return 0;
386
387 cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
388 freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
380 if (freq != cached_freq) { 389 if (freq != cached_freq) {
381 /* 390 /*
382 * The dreaded BIOS frequency change behind our back. 391 * The dreaded BIOS frequency change behind our back.
@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
397 unsigned int i; 406 unsigned int i;
398 407
399 for (i = 0; i < 100; i++) { 408 for (i = 0; i < 100; i++) {
400 cur_freq = extract_freq(get_cur_val(mask), data); 409 cur_freq = extract_freq(get_cur_val(mask, data), data);
401 if (cur_freq == freq) 410 if (cur_freq == freq)
402 return 1; 411 return 1;
403 udelay(10); 412 udelay(10);
@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
408static int acpi_cpufreq_target(struct cpufreq_policy *policy, 417static int acpi_cpufreq_target(struct cpufreq_policy *policy,
409 unsigned int index) 418 unsigned int index)
410{ 419{
411 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 420 struct acpi_cpufreq_data *data = policy->driver_data;
412 struct acpi_processor_performance *perf; 421 struct acpi_processor_performance *perf;
413 struct drv_cmd cmd; 422 struct drv_cmd cmd;
414 unsigned int next_perf_state = 0; /* Index into perf table */ 423 unsigned int next_perf_state = 0; /* Index into perf table */
415 int result = 0; 424 int result = 0;
416 425
417 if (unlikely(data == NULL || 426 if (unlikely(data == NULL || data->freq_table == NULL)) {
418 data->acpi_data == NULL || data->freq_table == NULL)) {
419 return -ENODEV; 427 return -ENODEV;
420 } 428 }
421 429
422 perf = data->acpi_data; 430 perf = to_perf_data(data);
423 next_perf_state = data->freq_table[index].driver_data; 431 next_perf_state = data->freq_table[index].driver_data;
424 if (perf->state == next_perf_state) { 432 if (perf->state == next_perf_state) {
425 if (unlikely(data->resume)) { 433 if (unlikely(data->resume)) {
@@ -482,8 +490,9 @@ out:
482static unsigned long 490static unsigned long
483acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) 491acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
484{ 492{
485 struct acpi_processor_performance *perf = data->acpi_data; 493 struct acpi_processor_performance *perf;
486 494
495 perf = to_perf_data(data);
487 if (cpu_khz) { 496 if (cpu_khz) {
488 /* search the closest match to cpu_khz */ 497 /* search the closest match to cpu_khz */
489 unsigned int i; 498 unsigned int i;
@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
672 goto err_free; 681 goto err_free;
673 } 682 }
674 683
675 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 684 perf = per_cpu_ptr(acpi_perf_data, cpu);
676 per_cpu(acfreq_data, cpu) = data; 685 data->acpi_perf_cpu = cpu;
686 policy->driver_data = data;
677 687
678 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 688 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
679 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 689 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
680 690
681 result = acpi_processor_register_performance(data->acpi_data, cpu); 691 result = acpi_processor_register_performance(perf, cpu);
682 if (result) 692 if (result)
683 goto err_free_mask; 693 goto err_free_mask;
684 694
685 perf = data->acpi_data;
686 policy->shared_type = perf->shared_type; 695 policy->shared_type = perf->shared_type;
687 696
688 /* 697 /*
@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
838err_freqfree: 847err_freqfree:
839 kfree(data->freq_table); 848 kfree(data->freq_table);
840err_unreg: 849err_unreg:
841 acpi_processor_unregister_performance(perf, cpu); 850 acpi_processor_unregister_performance(cpu);
842err_free_mask: 851err_free_mask:
843 free_cpumask_var(data->freqdomain_cpus); 852 free_cpumask_var(data->freqdomain_cpus);
844err_free: 853err_free:
845 kfree(data); 854 kfree(data);
846 per_cpu(acfreq_data, cpu) = NULL; 855 policy->driver_data = NULL;
847 856
848 return result; 857 return result;
849} 858}
850 859
851static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 860static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
852{ 861{
853 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 862 struct acpi_cpufreq_data *data = policy->driver_data;
854 863
855 pr_debug("acpi_cpufreq_cpu_exit\n"); 864 pr_debug("acpi_cpufreq_cpu_exit\n");
856 865
857 if (data) { 866 if (data) {
858 per_cpu(acfreq_data, policy->cpu) = NULL; 867 policy->driver_data = NULL;
859 acpi_processor_unregister_performance(data->acpi_data, 868 acpi_processor_unregister_performance(data->acpi_perf_cpu);
860 policy->cpu);
861 free_cpumask_var(data->freqdomain_cpus); 869 free_cpumask_var(data->freqdomain_cpus);
862 kfree(data->freq_table); 870 kfree(data->freq_table);
863 kfree(data); 871 kfree(data);
@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
868 876
869static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 877static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
870{ 878{
871 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 879 struct acpi_cpufreq_data *data = policy->driver_data;
872 880
873 pr_debug("acpi_cpufreq_resume\n"); 881 pr_debug("acpi_cpufreq_resume\n");
874 882
@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
880static struct freq_attr *acpi_cpufreq_attr[] = { 888static struct freq_attr *acpi_cpufreq_attr[] = {
881 &cpufreq_freq_attr_scaling_available_freqs, 889 &cpufreq_freq_attr_scaling_available_freqs,
882 &freqdomain_cpus, 890 &freqdomain_cpus,
883 NULL, /* this is a placeholder for cpb, do not remove */ 891#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
892 &cpb,
893#endif
884 NULL, 894 NULL,
885}; 895};
886 896
@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
953 * only if configured. This is considered legacy code, which 963 * only if configured. This is considered legacy code, which
954 * will probably be removed at some point in the future. 964 * will probably be removed at some point in the future.
955 */ 965 */
956 if (check_amd_hwpstate_cpu(0)) { 966 if (!check_amd_hwpstate_cpu(0)) {
957 struct freq_attr **iter; 967 struct freq_attr **attr;
958
959 pr_debug("adding sysfs entry for cpb\n");
960 968
961 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) 969 pr_debug("CPB unsupported, do not expose it\n");
962 ;
963 970
964 /* make sure there is a terminator behind it */ 971 for (attr = acpi_cpufreq_attr; *attr; attr++)
965 if (iter[1] == NULL) 972 if (*attr == &cpb) {
966 *iter = &cpb; 973 *attr = NULL;
974 break;
975 }
967 } 976 }
968#endif 977#endif
969 acpi_cpufreq_boost_init(); 978 acpi_cpufreq_boost_init();
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 528a82bf5038..c3583cdfadbd 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,6 +36,12 @@ struct private_data {
36 unsigned int voltage_tolerance; /* in percentage */ 36 unsigned int voltage_tolerance; /* in percentage */
37}; 37};
38 38
39static struct freq_attr *cpufreq_dt_attr[] = {
40 &cpufreq_freq_attr_scaling_available_freqs,
41 NULL, /* Extra space for boost-attr if required */
42 NULL,
43};
44
39static int set_target(struct cpufreq_policy *policy, unsigned int index) 45static int set_target(struct cpufreq_policy *policy, unsigned int index)
40{ 46{
41 struct dev_pm_opp *opp; 47 struct dev_pm_opp *opp;
@@ -184,7 +190,6 @@ try_again:
184 190
185static int cpufreq_init(struct cpufreq_policy *policy) 191static int cpufreq_init(struct cpufreq_policy *policy)
186{ 192{
187 struct cpufreq_dt_platform_data *pd;
188 struct cpufreq_frequency_table *freq_table; 193 struct cpufreq_frequency_table *freq_table;
189 struct device_node *np; 194 struct device_node *np;
190 struct private_data *priv; 195 struct private_data *priv;
@@ -193,6 +198,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
193 struct clk *cpu_clk; 198 struct clk *cpu_clk;
194 unsigned long min_uV = ~0, max_uV = 0; 199 unsigned long min_uV = ~0, max_uV = 0;
195 unsigned int transition_latency; 200 unsigned int transition_latency;
201 bool need_update = false;
196 int ret; 202 int ret;
197 203
198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); 204 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@@ -208,8 +214,47 @@ static int cpufreq_init(struct cpufreq_policy *policy)
208 goto out_put_reg_clk; 214 goto out_put_reg_clk;
209 } 215 }
210 216
211 /* OPPs might be populated at runtime, don't check for error here */ 217 /* Get OPP-sharing information from "operating-points-v2" bindings */
212 of_init_opp_table(cpu_dev); 218 ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
219 if (ret) {
220 /*
221 * operating-points-v2 not supported, fallback to old method of
222 * finding shared-OPPs for backward compatibility.
223 */
224 if (ret == -ENOENT)
225 need_update = true;
226 else
227 goto out_node_put;
228 }
229
230 /*
231 * Initialize OPP tables for all policy->cpus. They will be shared by
232 * all CPUs which have marked their CPUs shared with OPP bindings.
233 *
234 * For platforms not using operating-points-v2 bindings, we do this
235 * before updating policy->cpus. Otherwise, we will end up creating
236 * duplicate OPPs for policy->cpus.
237 *
238 * OPPs might be populated at runtime, don't check for error here
239 */
240 of_cpumask_init_opp_table(policy->cpus);
241
242 if (need_update) {
243 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244
245 if (!pd || !pd->independent_clocks)
246 cpumask_setall(policy->cpus);
247
248 /*
249 * OPP tables are initialized only for policy->cpu, do it for
250 * others as well.
251 */
252 set_cpus_sharing_opps(cpu_dev, policy->cpus);
253
254 of_property_read_u32(np, "clock-latency", &transition_latency);
255 } else {
256 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257 }
213 258
214 /* 259 /*
215 * But we need OPP table to function so if it is not there let's 260 * But we need OPP table to function so if it is not there let's
@@ -230,7 +275,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
230 275
231 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 276 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
232 277
233 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 278 if (!transition_latency)
234 transition_latency = CPUFREQ_ETERNAL; 279 transition_latency = CPUFREQ_ETERNAL;
235 280
236 if (!IS_ERR(cpu_reg)) { 281 if (!IS_ERR(cpu_reg)) {
@@ -291,11 +336,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
291 goto out_free_cpufreq_table; 336 goto out_free_cpufreq_table;
292 } 337 }
293 338
294 policy->cpuinfo.transition_latency = transition_latency; 339 /* Support turbo/boost mode */
340 if (policy_has_boost_freq(policy)) {
341 /* This gets disabled by core on driver unregister */
342 ret = cpufreq_enable_boost_support();
343 if (ret)
344 goto out_free_cpufreq_table;
345 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
346 }
295 347
296 pd = cpufreq_get_driver_data(); 348 policy->cpuinfo.transition_latency = transition_latency;
297 if (!pd || !pd->independent_clocks)
298 cpumask_setall(policy->cpus);
299 349
300 of_node_put(np); 350 of_node_put(np);
301 351
@@ -306,7 +356,8 @@ out_free_cpufreq_table:
306out_free_priv: 356out_free_priv:
307 kfree(priv); 357 kfree(priv);
308out_free_opp: 358out_free_opp:
309 of_free_opp_table(cpu_dev); 359 of_cpumask_free_opp_table(policy->cpus);
360out_node_put:
310 of_node_put(np); 361 of_node_put(np);
311out_put_reg_clk: 362out_put_reg_clk:
312 clk_put(cpu_clk); 363 clk_put(cpu_clk);
@@ -322,7 +373,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
322 373
323 cpufreq_cooling_unregister(priv->cdev); 374 cpufreq_cooling_unregister(priv->cdev);
324 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 375 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
325 of_free_opp_table(priv->cpu_dev); 376 of_cpumask_free_opp_table(policy->related_cpus);
326 clk_put(policy->clk); 377 clk_put(policy->clk);
327 if (!IS_ERR(priv->cpu_reg)) 378 if (!IS_ERR(priv->cpu_reg))
328 regulator_put(priv->cpu_reg); 379 regulator_put(priv->cpu_reg);
@@ -367,7 +418,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
367 .exit = cpufreq_exit, 418 .exit = cpufreq_exit,
368 .ready = cpufreq_ready, 419 .ready = cpufreq_ready,
369 .name = "cpufreq-dt", 420 .name = "cpufreq-dt",
370 .attr = cpufreq_generic_attr, 421 .attr = cpufreq_dt_attr,
371}; 422};
372 423
373static int dt_cpufreq_probe(struct platform_device *pdev) 424static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9bb09ce98d04..b3d9368339af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -112,12 +112,6 @@ static inline bool has_target(void)
112 return cpufreq_driver->target_index || cpufreq_driver->target; 112 return cpufreq_driver->target_index || cpufreq_driver->target;
113} 113}
114 114
115/*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
121/* internal prototypes */ 115/* internal prototypes */
122static int __cpufreq_governor(struct cpufreq_policy *policy, 116static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event); 117 unsigned int event);
@@ -277,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
277 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
278 * freed as that depends on the kobj count. 272 * freed as that depends on the kobj count.
279 * 273 *
280 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
281 * valid policy is found. This is done to make sure the driver doesn't get
282 * unregistered while the policy is being used.
283 *
284 * Return: A valid policy on success, otherwise NULL on failure. 274 * Return: A valid policy on success, otherwise NULL on failure.
285 */ 275 */
286struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 276struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@@ -291,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
291 if (WARN_ON(cpu >= nr_cpu_ids)) 281 if (WARN_ON(cpu >= nr_cpu_ids))
292 return NULL; 282 return NULL;
293 283
294 if (!down_read_trylock(&cpufreq_rwsem))
295 return NULL;
296
297 /* get the cpufreq driver */ 284 /* get the cpufreq driver */
298 read_lock_irqsave(&cpufreq_driver_lock, flags); 285 read_lock_irqsave(&cpufreq_driver_lock, flags);
299 286
@@ -306,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
306 293
307 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 294 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
308 295
309 if (!policy)
310 up_read(&cpufreq_rwsem);
311
312 return policy; 296 return policy;
313} 297}
314EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 298EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -320,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
320 * 304 *
321 * This decrements the kobject reference count incremented earlier by calling 305 * This decrements the kobject reference count incremented earlier by calling
322 * cpufreq_cpu_get(). 306 * cpufreq_cpu_get().
323 *
324 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
325 */ 307 */
326void cpufreq_cpu_put(struct cpufreq_policy *policy) 308void cpufreq_cpu_put(struct cpufreq_policy *policy)
327{ 309{
328 kobject_put(&policy->kobj); 310 kobject_put(&policy->kobj);
329 up_read(&cpufreq_rwsem);
330} 311}
331EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 312EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
332 313
@@ -539,9 +520,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
539{ 520{
540 int err = -EINVAL; 521 int err = -EINVAL;
541 522
542 if (!cpufreq_driver)
543 goto out;
544
545 if (cpufreq_driver->setpolicy) { 523 if (cpufreq_driver->setpolicy) {
546 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 524 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
547 *policy = CPUFREQ_POLICY_PERFORMANCE; 525 *policy = CPUFREQ_POLICY_PERFORMANCE;
@@ -576,7 +554,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
576 554
577 mutex_unlock(&cpufreq_governor_mutex); 555 mutex_unlock(&cpufreq_governor_mutex);
578 } 556 }
579out:
580 return err; 557 return err;
581} 558}
582 559
@@ -625,9 +602,7 @@ static ssize_t store_##file_name \
625 int ret, temp; \ 602 int ret, temp; \
626 struct cpufreq_policy new_policy; \ 603 struct cpufreq_policy new_policy; \
627 \ 604 \
628 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 605 memcpy(&new_policy, policy, sizeof(*policy)); \
629 if (ret) \
630 return -EINVAL; \
631 \ 606 \
632 ret = sscanf(buf, "%u", &new_policy.object); \ 607 ret = sscanf(buf, "%u", &new_policy.object); \
633 if (ret != 1) \ 608 if (ret != 1) \
@@ -681,9 +656,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
681 char str_governor[16]; 656 char str_governor[16];
682 struct cpufreq_policy new_policy; 657 struct cpufreq_policy new_policy;
683 658
684 ret = cpufreq_get_policy(&new_policy, policy->cpu); 659 memcpy(&new_policy, policy, sizeof(*policy));
685 if (ret)
686 return ret;
687 660
688 ret = sscanf(buf, "%15s", str_governor); 661 ret = sscanf(buf, "%15s", str_governor);
689 if (ret != 1) 662 if (ret != 1)
@@ -694,14 +667,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
694 return -EINVAL; 667 return -EINVAL;
695 668
696 ret = cpufreq_set_policy(policy, &new_policy); 669 ret = cpufreq_set_policy(policy, &new_policy);
697 670 return ret ? ret : count;
698 policy->user_policy.policy = policy->policy;
699 policy->user_policy.governor = policy->governor;
700
701 if (ret)
702 return ret;
703 else
704 return count;
705} 671}
706 672
707/** 673/**
@@ -851,9 +817,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
851 struct freq_attr *fattr = to_attr(attr); 817 struct freq_attr *fattr = to_attr(attr);
852 ssize_t ret; 818 ssize_t ret;
853 819
854 if (!down_read_trylock(&cpufreq_rwsem))
855 return -EINVAL;
856
857 down_read(&policy->rwsem); 820 down_read(&policy->rwsem);
858 821
859 if (fattr->show) 822 if (fattr->show)
@@ -862,7 +825,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
862 ret = -EIO; 825 ret = -EIO;
863 826
864 up_read(&policy->rwsem); 827 up_read(&policy->rwsem);
865 up_read(&cpufreq_rwsem);
866 828
867 return ret; 829 return ret;
868} 830}
@@ -879,9 +841,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
879 if (!cpu_online(policy->cpu)) 841 if (!cpu_online(policy->cpu))
880 goto unlock; 842 goto unlock;
881 843
882 if (!down_read_trylock(&cpufreq_rwsem))
883 goto unlock;
884
885 down_write(&policy->rwsem); 844 down_write(&policy->rwsem);
886 845
887 /* Updating inactive policies is invalid, so avoid doing that. */ 846 /* Updating inactive policies is invalid, so avoid doing that. */
@@ -897,8 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
897 856
898unlock_policy_rwsem: 857unlock_policy_rwsem:
899 up_write(&policy->rwsem); 858 up_write(&policy->rwsem);
900
901 up_read(&cpufreq_rwsem);
902unlock: 859unlock:
903 put_online_cpus(); 860 put_online_cpus();
904 861
@@ -1027,8 +984,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1027 } 984 }
1028} 985}
1029 986
1030static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, 987static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1031 struct device *dev)
1032{ 988{
1033 struct freq_attr **drv_attr; 989 struct freq_attr **drv_attr;
1034 int ret = 0; 990 int ret = 0;
@@ -1060,11 +1016,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
1060 return cpufreq_add_dev_symlink(policy); 1016 return cpufreq_add_dev_symlink(policy);
1061} 1017}
1062 1018
1063static void cpufreq_init_policy(struct cpufreq_policy *policy) 1019static int cpufreq_init_policy(struct cpufreq_policy *policy)
1064{ 1020{
1065 struct cpufreq_governor *gov = NULL; 1021 struct cpufreq_governor *gov = NULL;
1066 struct cpufreq_policy new_policy; 1022 struct cpufreq_policy new_policy;
1067 int ret = 0;
1068 1023
1069 memcpy(&new_policy, policy, sizeof(*policy)); 1024 memcpy(&new_policy, policy, sizeof(*policy));
1070 1025
@@ -1083,16 +1038,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
1083 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 1038 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1084 1039
1085 /* set default policy */ 1040 /* set default policy */
1086 ret = cpufreq_set_policy(policy, &new_policy); 1041 return cpufreq_set_policy(policy, &new_policy);
1087 if (ret) {
1088 pr_debug("setting policy failed\n");
1089 if (cpufreq_driver->exit)
1090 cpufreq_driver->exit(policy);
1091 }
1092} 1042}
1093 1043
1094static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 1044static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1095 unsigned int cpu, struct device *dev)
1096{ 1045{
1097 int ret = 0; 1046 int ret = 0;
1098 1047
@@ -1126,33 +1075,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
1126 return 0; 1075 return 0;
1127} 1076}
1128 1077
1129static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 1078static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1130{
1131 struct cpufreq_policy *policy;
1132 unsigned long flags;
1133
1134 read_lock_irqsave(&cpufreq_driver_lock, flags);
1135 policy = per_cpu(cpufreq_cpu_data, cpu);
1136 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1137
1138 if (likely(policy)) {
1139 /* Policy should be inactive here */
1140 WARN_ON(!policy_is_inactive(policy));
1141
1142 down_write(&policy->rwsem);
1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1145 up_write(&policy->rwsem);
1146 }
1147
1148 return policy;
1149}
1150
1151static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1152{ 1079{
1080 struct device *dev = get_cpu_device(cpu);
1153 struct cpufreq_policy *policy; 1081 struct cpufreq_policy *policy;
1154 int ret; 1082 int ret;
1155 1083
1084 if (WARN_ON(!dev))
1085 return NULL;
1086
1156 policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1087 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1157 if (!policy) 1088 if (!policy)
1158 return NULL; 1089 return NULL;
@@ -1180,10 +1111,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1180 init_completion(&policy->kobj_unregister); 1111 init_completion(&policy->kobj_unregister);
1181 INIT_WORK(&policy->update, handle_update); 1112 INIT_WORK(&policy->update, handle_update);
1182 1113
1183 policy->cpu = dev->id; 1114 policy->cpu = cpu;
1184 1115
1185 /* Set this once on allocation */ 1116 /* Set this once on allocation */
1186 policy->kobj_cpu = dev->id; 1117 policy->kobj_cpu = cpu;
1187 1118
1188 return policy; 1119 return policy;
1189 1120
@@ -1245,59 +1176,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1245 kfree(policy); 1176 kfree(policy);
1246} 1177}
1247 1178
1248/** 1179static int cpufreq_online(unsigned int cpu)
1249 * cpufreq_add_dev - add a CPU device
1250 *
1251 * Adds the cpufreq interface for a CPU device.
1252 *
1253 * The Oracle says: try running cpufreq registration/unregistration concurrently
1254 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1255 * mess up, but more thorough testing is needed. - Mathieu
1256 */
1257static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1258{ 1180{
1259 unsigned int j, cpu = dev->id;
1260 int ret = -ENOMEM;
1261 struct cpufreq_policy *policy; 1181 struct cpufreq_policy *policy;
1182 bool new_policy;
1262 unsigned long flags; 1183 unsigned long flags;
1263 bool recover_policy = !sif; 1184 unsigned int j;
1264 1185 int ret;
1265 pr_debug("adding CPU %u\n", cpu);
1266
1267 if (cpu_is_offline(cpu)) {
1268 /*
1269 * Only possible if we are here from the subsys_interface add
1270 * callback. A hotplug notifier will follow and we will handle
1271 * it as CPU online then. For now, just create the sysfs link,
1272 * unless there is no policy or the link is already present.
1273 */
1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1278 1186
1279 if (!down_read_trylock(&cpufreq_rwsem)) 1187 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1280 return 0;
1281 1188
1282 /* Check if this CPU already has a policy to manage it */ 1189 /* Check if this CPU already has a policy to manage it */
1283 policy = per_cpu(cpufreq_cpu_data, cpu); 1190 policy = per_cpu(cpufreq_cpu_data, cpu);
1284 if (policy && !policy_is_inactive(policy)) { 1191 if (policy) {
1285 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 1192 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1286 ret = cpufreq_add_policy_cpu(policy, cpu, dev); 1193 if (!policy_is_inactive(policy))
1287 up_read(&cpufreq_rwsem); 1194 return cpufreq_add_policy_cpu(policy, cpu);
1288 return ret;
1289 }
1290 1195
1291 /* 1196 /* This is the only online CPU for the policy. Start over. */
1292 * Restore the saved policy when doing light-weight init and fall back 1197 new_policy = false;
1293 * to the full init if that fails. 1198 down_write(&policy->rwsem);
1294 */ 1199 policy->cpu = cpu;
1295 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 1200 policy->governor = NULL;
1296 if (!policy) { 1201 up_write(&policy->rwsem);
1297 recover_policy = false; 1202 } else {
1298 policy = cpufreq_policy_alloc(dev); 1203 new_policy = true;
1204 policy = cpufreq_policy_alloc(cpu);
1299 if (!policy) 1205 if (!policy)
1300 goto nomem_out; 1206 return -ENOMEM;
1301 } 1207 }
1302 1208
1303 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1209 cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1308,17 +1214,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1308 ret = cpufreq_driver->init(policy); 1214 ret = cpufreq_driver->init(policy);
1309 if (ret) { 1215 if (ret) {
1310 pr_debug("initialization failed\n"); 1216 pr_debug("initialization failed\n");
1311 goto err_set_policy_cpu; 1217 goto out_free_policy;
1312 } 1218 }
1313 1219
1314 down_write(&policy->rwsem); 1220 down_write(&policy->rwsem);
1315 1221
1316 /* related cpus should atleast have policy->cpus */ 1222 if (new_policy) {
1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1223 /* related_cpus should at least include policy->cpus. */
1318 1224 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1319 /* Remember which CPUs have been present at the policy creation time. */ 1225 /* Remember CPUs present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); 1226 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1227 }
1322 1228
1323 /* 1229 /*
1324 * affected cpus must always be the one, which are online. We aren't 1230 * affected cpus must always be the one, which are online. We aren't
@@ -1326,7 +1232,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1326 */ 1232 */
1327 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1233 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1328 1234
1329 if (!recover_policy) { 1235 if (new_policy) {
1330 policy->user_policy.min = policy->min; 1236 policy->user_policy.min = policy->min;
1331 policy->user_policy.max = policy->max; 1237 policy->user_policy.max = policy->max;
1332 1238
@@ -1340,7 +1246,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1340 policy->cur = cpufreq_driver->get(policy->cpu); 1246 policy->cur = cpufreq_driver->get(policy->cpu);
1341 if (!policy->cur) { 1247 if (!policy->cur) {
1342 pr_err("%s: ->get() failed\n", __func__); 1248 pr_err("%s: ->get() failed\n", __func__);
1343 goto err_get_freq; 1249 goto out_exit_policy;
1344 } 1250 }
1345 } 1251 }
1346 1252
@@ -1387,10 +1293,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1387 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1293 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1388 CPUFREQ_START, policy); 1294 CPUFREQ_START, policy);
1389 1295
1390 if (!recover_policy) { 1296 if (new_policy) {
1391 ret = cpufreq_add_dev_interface(policy, dev); 1297 ret = cpufreq_add_dev_interface(policy);
1392 if (ret) 1298 if (ret)
1393 goto err_out_unregister; 1299 goto out_exit_policy;
1394 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1300 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1395 CPUFREQ_CREATE_POLICY, policy); 1301 CPUFREQ_CREATE_POLICY, policy);
1396 1302
@@ -1399,18 +1305,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1399 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1305 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1400 } 1306 }
1401 1307
1402 cpufreq_init_policy(policy); 1308 ret = cpufreq_init_policy(policy);
1403 1309 if (ret) {
1404 if (!recover_policy) { 1310 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1405 policy->user_policy.policy = policy->policy; 1311 __func__, cpu, ret);
1406 policy->user_policy.governor = policy->governor; 1312 /* cpufreq_policy_free() will notify based on this */
1313 new_policy = false;
1314 goto out_exit_policy;
1407 } 1315 }
1316
1408 up_write(&policy->rwsem); 1317 up_write(&policy->rwsem);
1409 1318
1410 kobject_uevent(&policy->kobj, KOBJ_ADD); 1319 kobject_uevent(&policy->kobj, KOBJ_ADD);
1411 1320
1412 up_read(&cpufreq_rwsem);
1413
1414 /* Callback for handling stuff after policy is ready */ 1321 /* Callback for handling stuff after policy is ready */
1415 if (cpufreq_driver->ready) 1322 if (cpufreq_driver->ready)
1416 cpufreq_driver->ready(policy); 1323 cpufreq_driver->ready(policy);
@@ -1419,24 +1326,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1419 1326
1420 return 0; 1327 return 0;
1421 1328
1422err_out_unregister: 1329out_exit_policy:
1423err_get_freq:
1424 up_write(&policy->rwsem); 1330 up_write(&policy->rwsem);
1425 1331
1426 if (cpufreq_driver->exit) 1332 if (cpufreq_driver->exit)
1427 cpufreq_driver->exit(policy); 1333 cpufreq_driver->exit(policy);
1428err_set_policy_cpu: 1334out_free_policy:
1429 cpufreq_policy_free(policy, recover_policy); 1335 cpufreq_policy_free(policy, !new_policy);
1430nomem_out: 1336 return ret;
1431 up_read(&cpufreq_rwsem); 1337}
1338
1339/**
1340 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1341 * @dev: CPU device.
1342 * @sif: Subsystem interface structure pointer (not used)
1343 */
1344static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1345{
1346 unsigned cpu = dev->id;
1347 int ret;
1348
1349 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1350
1351 if (cpu_online(cpu)) {
1352 ret = cpufreq_online(cpu);
1353 } else {
1354 /*
1355 * A hotplug notifier will follow and we will handle it as CPU
1356 * online then. For now, just create the sysfs link, unless
1357 * there is no policy or the link is already present.
1358 */
1359 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1360
1361 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1362 ? add_cpu_dev_symlink(policy, cpu) : 0;
1363 }
1432 1364
1433 return ret; 1365 return ret;
1434} 1366}
1435 1367
1436static int __cpufreq_remove_dev_prepare(struct device *dev) 1368static void cpufreq_offline_prepare(unsigned int cpu)
1437{ 1369{
1438 unsigned int cpu = dev->id;
1439 int ret = 0;
1440 struct cpufreq_policy *policy; 1370 struct cpufreq_policy *policy;
1441 1371
1442 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1372 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1444,11 +1374,11 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
1444 policy = cpufreq_cpu_get_raw(cpu); 1374 policy = cpufreq_cpu_get_raw(cpu);
1445 if (!policy) { 1375 if (!policy) {
1446 pr_debug("%s: No cpu_data found\n", __func__); 1376 pr_debug("%s: No cpu_data found\n", __func__);
1447 return -EINVAL; 1377 return;
1448 } 1378 }
1449 1379
1450 if (has_target()) { 1380 if (has_target()) {
1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1381 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1452 if (ret) 1382 if (ret)
1453 pr_err("%s: Failed to stop governor\n", __func__); 1383 pr_err("%s: Failed to stop governor\n", __func__);
1454 } 1384 }
@@ -1469,7 +1399,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
1469 /* Start governor again for active policy */ 1399 /* Start governor again for active policy */
1470 if (!policy_is_inactive(policy)) { 1400 if (!policy_is_inactive(policy)) {
1471 if (has_target()) { 1401 if (has_target()) {
1472 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); 1402 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1473 if (!ret) 1403 if (!ret)
1474 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 1404 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1475 1405
@@ -1479,28 +1409,24 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
1479 } else if (cpufreq_driver->stop_cpu) { 1409 } else if (cpufreq_driver->stop_cpu) {
1480 cpufreq_driver->stop_cpu(policy); 1410 cpufreq_driver->stop_cpu(policy);
1481 } 1411 }
1482
1483 return ret;
1484} 1412}
1485 1413
1486static int __cpufreq_remove_dev_finish(struct device *dev) 1414static void cpufreq_offline_finish(unsigned int cpu)
1487{ 1415{
1488 unsigned int cpu = dev->id;
1489 int ret;
1490 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1416 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1491 1417
1492 if (!policy) { 1418 if (!policy) {
1493 pr_debug("%s: No cpu_data found\n", __func__); 1419 pr_debug("%s: No cpu_data found\n", __func__);
1494 return -EINVAL; 1420 return;
1495 } 1421 }
1496 1422
1497 /* Only proceed for inactive policies */ 1423 /* Only proceed for inactive policies */
1498 if (!policy_is_inactive(policy)) 1424 if (!policy_is_inactive(policy))
1499 return 0; 1425 return;
1500 1426
1501 /* If cpu is last user of policy, free policy */ 1427 /* If cpu is last user of policy, free policy */
1502 if (has_target()) { 1428 if (has_target()) {
1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1429 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1504 if (ret) 1430 if (ret)
1505 pr_err("%s: Failed to exit governor\n", __func__); 1431 pr_err("%s: Failed to exit governor\n", __func__);
1506 } 1432 }
@@ -1512,8 +1438,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev)
1512 */ 1438 */
1513 if (cpufreq_driver->exit) 1439 if (cpufreq_driver->exit)
1514 cpufreq_driver->exit(policy); 1440 cpufreq_driver->exit(policy);
1515
1516 return 0;
1517} 1441}
1518 1442
1519/** 1443/**
@@ -1530,8 +1454,8 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1530 return; 1454 return;
1531 1455
1532 if (cpu_online(cpu)) { 1456 if (cpu_online(cpu)) {
1533 __cpufreq_remove_dev_prepare(dev); 1457 cpufreq_offline_prepare(cpu);
1534 __cpufreq_remove_dev_finish(dev); 1458 cpufreq_offline_finish(cpu);
1535 } 1459 }
1536 1460
1537 cpumask_clear_cpu(cpu, policy->real_cpus); 1461 cpumask_clear_cpu(cpu, policy->real_cpus);
@@ -2245,7 +2169,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2245 2169
2246 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 2170 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2247 2171
2248 if (new_policy->min > policy->max || new_policy->max < policy->min) 2172 /*
2173 * This check works well when we store new min/max freq attributes,
2174 * because new_policy is a copy of policy with one field updated.
2175 */
2176 if (new_policy->min > new_policy->max)
2249 return -EINVAL; 2177 return -EINVAL;
2250 2178
2251 /* verify the cpu speed can be set within this limit */ 2179 /* verify the cpu speed can be set within this limit */
@@ -2257,10 +2185,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2257 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 2185 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2258 CPUFREQ_ADJUST, new_policy); 2186 CPUFREQ_ADJUST, new_policy);
2259 2187
2260 /* adjust if necessary - hardware incompatibility*/
2261 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2262 CPUFREQ_INCOMPATIBLE, new_policy);
2263
2264 /* 2188 /*
2265 * verify the cpu speed can be set within this limit, which might be 2189 * verify the cpu speed can be set within this limit, which might be
2266 * different to the first one 2190 * different to the first one
@@ -2294,16 +2218,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2294 old_gov = policy->governor; 2218 old_gov = policy->governor;
2295 /* end old governor */ 2219 /* end old governor */
2296 if (old_gov) { 2220 if (old_gov) {
2297 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 2221 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2222 if (ret) {
2223 /* This can happen due to race with other operations */
2224 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2225 __func__, old_gov->name, ret);
2226 return ret;
2227 }
2228
2298 up_write(&policy->rwsem); 2229 up_write(&policy->rwsem);
2299 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2230 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2300 down_write(&policy->rwsem); 2231 down_write(&policy->rwsem);
2232
2233 if (ret) {
2234 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2235 __func__, old_gov->name, ret);
2236 return ret;
2237 }
2301 } 2238 }
2302 2239
2303 /* start new governor */ 2240 /* start new governor */
2304 policy->governor = new_policy->governor; 2241 policy->governor = new_policy->governor;
2305 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { 2242 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2306 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) 2243 if (!ret) {
2244 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2245 if (!ret)
2307 goto out; 2246 goto out;
2308 2247
2309 up_write(&policy->rwsem); 2248 up_write(&policy->rwsem);
@@ -2315,11 +2254,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2315 pr_debug("starting governor %s failed\n", policy->governor->name); 2254 pr_debug("starting governor %s failed\n", policy->governor->name);
2316 if (old_gov) { 2255 if (old_gov) {
2317 policy->governor = old_gov; 2256 policy->governor = old_gov;
2318 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2257 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2319 __cpufreq_governor(policy, CPUFREQ_GOV_START); 2258 policy->governor = NULL;
2259 else
2260 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2320 } 2261 }
2321 2262
2322 return -EINVAL; 2263 return ret;
2323 2264
2324 out: 2265 out:
2325 pr_debug("governor: change or update limits\n"); 2266 pr_debug("governor: change or update limits\n");
@@ -2348,8 +2289,6 @@ int cpufreq_update_policy(unsigned int cpu)
2348 memcpy(&new_policy, policy, sizeof(*policy)); 2289 memcpy(&new_policy, policy, sizeof(*policy));
2349 new_policy.min = policy->user_policy.min; 2290 new_policy.min = policy->user_policy.min;
2350 new_policy.max = policy->user_policy.max; 2291 new_policy.max = policy->user_policy.max;
2351 new_policy.policy = policy->user_policy.policy;
2352 new_policy.governor = policy->user_policy.governor;
2353 2292
2354 /* 2293 /*
2355 * BIOS might change freq behind our back 2294 * BIOS might change freq behind our back
@@ -2385,27 +2324,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2385 unsigned long action, void *hcpu) 2324 unsigned long action, void *hcpu)
2386{ 2325{
2387 unsigned int cpu = (unsigned long)hcpu; 2326 unsigned int cpu = (unsigned long)hcpu;
2388 struct device *dev;
2389 2327
2390 dev = get_cpu_device(cpu); 2328 switch (action & ~CPU_TASKS_FROZEN) {
2391 if (dev) { 2329 case CPU_ONLINE:
2392 switch (action & ~CPU_TASKS_FROZEN) { 2330 cpufreq_online(cpu);
2393 case CPU_ONLINE: 2331 break;
2394 cpufreq_add_dev(dev, NULL);
2395 break;
2396 2332
2397 case CPU_DOWN_PREPARE: 2333 case CPU_DOWN_PREPARE:
2398 __cpufreq_remove_dev_prepare(dev); 2334 cpufreq_offline_prepare(cpu);
2399 break; 2335 break;
2400 2336
2401 case CPU_POST_DEAD: 2337 case CPU_POST_DEAD:
2402 __cpufreq_remove_dev_finish(dev); 2338 cpufreq_offline_finish(cpu);
2403 break; 2339 break;
2404 2340
2405 case CPU_DOWN_FAILED: 2341 case CPU_DOWN_FAILED:
2406 cpufreq_add_dev(dev, NULL); 2342 cpufreq_online(cpu);
2407 break; 2343 break;
2408 }
2409 } 2344 }
2410 return NOTIFY_OK; 2345 return NOTIFY_OK;
2411} 2346}
@@ -2475,6 +2410,49 @@ int cpufreq_boost_supported(void)
2475} 2410}
2476EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 2411EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2477 2412
2413static int create_boost_sysfs_file(void)
2414{
2415 int ret;
2416
2417 if (!cpufreq_boost_supported())
2418 return 0;
2419
2420 /*
2421 * Check if driver provides function to enable boost -
2422 * if not, use cpufreq_boost_set_sw as default
2423 */
2424 if (!cpufreq_driver->set_boost)
2425 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2426
2427 ret = cpufreq_sysfs_create_file(&boost.attr);
2428 if (ret)
2429 pr_err("%s: cannot register global BOOST sysfs file\n",
2430 __func__);
2431
2432 return ret;
2433}
2434
2435static void remove_boost_sysfs_file(void)
2436{
2437 if (cpufreq_boost_supported())
2438 cpufreq_sysfs_remove_file(&boost.attr);
2439}
2440
2441int cpufreq_enable_boost_support(void)
2442{
2443 if (!cpufreq_driver)
2444 return -EINVAL;
2445
2446 if (cpufreq_boost_supported())
2447 return 0;
2448
2449 cpufreq_driver->boost_supported = true;
2450
2451 /* This will get removed on driver unregister */
2452 return create_boost_sysfs_file();
2453}
2454EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2455
2478int cpufreq_boost_enabled(void) 2456int cpufreq_boost_enabled(void)
2479{ 2457{
2480 return cpufreq_driver->boost_enabled; 2458 return cpufreq_driver->boost_enabled;
@@ -2513,10 +2491,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2513 2491
2514 pr_debug("trying to register driver %s\n", driver_data->name); 2492 pr_debug("trying to register driver %s\n", driver_data->name);
2515 2493
2494 /* Protect against concurrent CPU online/offline. */
2495 get_online_cpus();
2496
2516 write_lock_irqsave(&cpufreq_driver_lock, flags); 2497 write_lock_irqsave(&cpufreq_driver_lock, flags);
2517 if (cpufreq_driver) { 2498 if (cpufreq_driver) {
2518 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2499 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2519 return -EEXIST; 2500 ret = -EEXIST;
2501 goto out;
2520 } 2502 }
2521 cpufreq_driver = driver_data; 2503 cpufreq_driver = driver_data;
2522 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2504 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2524,21 +2506,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2524 if (driver_data->setpolicy) 2506 if (driver_data->setpolicy)
2525 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2507 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2526 2508
2527 if (cpufreq_boost_supported()) { 2509 ret = create_boost_sysfs_file();
2528 /* 2510 if (ret)
2529 * Check if driver provides function to enable boost - 2511 goto err_null_driver;
2530 * if not, use cpufreq_boost_set_sw as default
2531 */
2532 if (!cpufreq_driver->set_boost)
2533 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2534
2535 ret = cpufreq_sysfs_create_file(&boost.attr);
2536 if (ret) {
2537 pr_err("%s: cannot register global BOOST sysfs file\n",
2538 __func__);
2539 goto err_null_driver;
2540 }
2541 }
2542 2512
2543 ret = subsys_interface_register(&cpufreq_interface); 2513 ret = subsys_interface_register(&cpufreq_interface);
2544 if (ret) 2514 if (ret)
@@ -2555,17 +2525,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2555 register_hotcpu_notifier(&cpufreq_cpu_notifier); 2525 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2556 pr_debug("driver %s up and running\n", driver_data->name); 2526 pr_debug("driver %s up and running\n", driver_data->name);
2557 2527
2558 return 0; 2528out:
2529 put_online_cpus();
2530 return ret;
2531
2559err_if_unreg: 2532err_if_unreg:
2560 subsys_interface_unregister(&cpufreq_interface); 2533 subsys_interface_unregister(&cpufreq_interface);
2561err_boost_unreg: 2534err_boost_unreg:
2562 if (cpufreq_boost_supported()) 2535 remove_boost_sysfs_file();
2563 cpufreq_sysfs_remove_file(&boost.attr);
2564err_null_driver: 2536err_null_driver:
2565 write_lock_irqsave(&cpufreq_driver_lock, flags); 2537 write_lock_irqsave(&cpufreq_driver_lock, flags);
2566 cpufreq_driver = NULL; 2538 cpufreq_driver = NULL;
2567 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2539 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2568 return ret; 2540 goto out;
2569} 2541}
2570EXPORT_SYMBOL_GPL(cpufreq_register_driver); 2542EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2571 2543
@@ -2586,19 +2558,18 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2586 2558
2587 pr_debug("unregistering driver %s\n", driver->name); 2559 pr_debug("unregistering driver %s\n", driver->name);
2588 2560
2561 /* Protect against concurrent cpu hotplug */
2562 get_online_cpus();
2589 subsys_interface_unregister(&cpufreq_interface); 2563 subsys_interface_unregister(&cpufreq_interface);
2590 if (cpufreq_boost_supported()) 2564 remove_boost_sysfs_file();
2591 cpufreq_sysfs_remove_file(&boost.attr);
2592
2593 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2565 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2594 2566
2595 down_write(&cpufreq_rwsem);
2596 write_lock_irqsave(&cpufreq_driver_lock, flags); 2567 write_lock_irqsave(&cpufreq_driver_lock, flags);
2597 2568
2598 cpufreq_driver = NULL; 2569 cpufreq_driver = NULL;
2599 2570
2600 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2571 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2601 up_write(&cpufreq_rwsem); 2572 put_online_cpus();
2602 2573
2603 return 0; 2574 return 0;
2604} 2575}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c86a10c30912..84a1506950a7 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
47static void cs_check_cpu(int cpu, unsigned int load) 47static void cs_check_cpu(int cpu, unsigned int load)
48{ 48{
49 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); 49 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
50 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 50 struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
51 struct dbs_data *dbs_data = policy->governor_data; 51 struct dbs_data *dbs_data = policy->governor_data;
52 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 52 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
53 53
@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
102 } 102 }
103} 103}
104 104
105static void cs_dbs_timer(struct work_struct *work) 105static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
106 struct dbs_data *dbs_data, bool modify_all)
106{ 107{
107 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
108 struct cs_cpu_dbs_info_s, cdbs.work.work);
109 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
110 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
111 cpu);
112 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
113 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 108 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
114 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
115 bool modify_all = true;
116 109
117 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 110 if (modify_all)
118 if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) 111 dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
119 modify_all = false;
120 else
121 dbs_check_cpu(dbs_data, cpu);
122 112
123 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); 113 return delay_for_sampling_rate(cs_tuners->sampling_rate);
124 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
125} 114}
126 115
127static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 116static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
135 if (!dbs_info->enable) 124 if (!dbs_info->enable)
136 return 0; 125 return 0;
137 126
138 policy = dbs_info->cdbs.cur_policy; 127 policy = dbs_info->cdbs.shared->policy;
139 128
140 /* 129 /*
141 * we only care if our internally tracked freq moves outside the 'valid' 130 * we only care if our internally tracked freq moves outside the 'valid'
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 57a39f8a92b7..939197ffa4ac 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
32 32
33void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) 33void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
34{ 34{
35 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 35 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
36 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 36 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38 struct cpufreq_policy *policy; 38 struct cpufreq_policy *policy = cdbs->shared->policy;
39 unsigned int sampling_rate; 39 unsigned int sampling_rate;
40 unsigned int max_load = 0; 40 unsigned int max_load = 0;
41 unsigned int ignore_nice; 41 unsigned int ignore_nice;
@@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
60 ignore_nice = cs_tuners->ignore_nice_load; 60 ignore_nice = cs_tuners->ignore_nice_load;
61 } 61 }
62 62
63 policy = cdbs->cur_policy;
64
65 /* Get Absolute Load */ 63 /* Get Absolute Load */
66 for_each_cpu(j, policy->cpus) { 64 for_each_cpu(j, policy->cpus) {
67 struct cpu_dbs_common_info *j_cdbs; 65 struct cpu_dbs_info *j_cdbs;
68 u64 cur_wall_time, cur_idle_time; 66 u64 cur_wall_time, cur_idle_time;
69 unsigned int idle_time, wall_time; 67 unsigned int idle_time, wall_time;
70 unsigned int load; 68 unsigned int load;
@@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
163static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, 161static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
164 unsigned int delay) 162 unsigned int delay)
165{ 163{
166 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 164 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
167 165
168 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); 166 mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
169} 167}
170 168
171void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, 169void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
199static inline void gov_cancel_work(struct dbs_data *dbs_data, 197static inline void gov_cancel_work(struct dbs_data *dbs_data,
200 struct cpufreq_policy *policy) 198 struct cpufreq_policy *policy)
201{ 199{
202 struct cpu_dbs_common_info *cdbs; 200 struct cpu_dbs_info *cdbs;
203 int i; 201 int i;
204 202
205 for_each_cpu(i, policy->cpus) { 203 for_each_cpu(i, policy->cpus) {
206 cdbs = dbs_data->cdata->get_cpu_cdbs(i); 204 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
207 cancel_delayed_work_sync(&cdbs->work); 205 cancel_delayed_work_sync(&cdbs->dwork);
208 } 206 }
209} 207}
210 208
211/* Will return if we need to evaluate cpu load again or not */ 209/* Will return if we need to evaluate cpu load again or not */
212bool need_load_eval(struct cpu_dbs_common_info *cdbs, 210static bool need_load_eval(struct cpu_common_dbs_info *shared,
213 unsigned int sampling_rate) 211 unsigned int sampling_rate)
214{ 212{
215 if (policy_is_shared(cdbs->cur_policy)) { 213 if (policy_is_shared(shared->policy)) {
216 ktime_t time_now = ktime_get(); 214 ktime_t time_now = ktime_get();
217 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); 215 s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
218 216
219 /* Do nothing if we recently have sampled */ 217 /* Do nothing if we recently have sampled */
220 if (delta_us < (s64)(sampling_rate / 2)) 218 if (delta_us < (s64)(sampling_rate / 2))
221 return false; 219 return false;
222 else 220 else
223 cdbs->time_stamp = time_now; 221 shared->time_stamp = time_now;
224 } 222 }
225 223
226 return true; 224 return true;
227} 225}
228EXPORT_SYMBOL_GPL(need_load_eval); 226
227static void dbs_timer(struct work_struct *work)
228{
229 struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
230 dwork.work);
231 struct cpu_common_dbs_info *shared = cdbs->shared;
232 struct cpufreq_policy *policy = shared->policy;
233 struct dbs_data *dbs_data = policy->governor_data;
234 unsigned int sampling_rate, delay;
235 bool modify_all = true;
236
237 mutex_lock(&shared->timer_mutex);
238
239 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
240 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
241
242 sampling_rate = cs_tuners->sampling_rate;
243 } else {
244 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
245
246 sampling_rate = od_tuners->sampling_rate;
247 }
248
249 if (!need_load_eval(cdbs->shared, sampling_rate))
250 modify_all = false;
251
252 delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
253 gov_queue_work(dbs_data, policy, delay, modify_all);
254
255 mutex_unlock(&shared->timer_mutex);
256}
229 257
230static void set_sampling_rate(struct dbs_data *dbs_data, 258static void set_sampling_rate(struct dbs_data *dbs_data,
231 unsigned int sampling_rate) 259 unsigned int sampling_rate)
@@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
239 } 267 }
240} 268}
241 269
270static int alloc_common_dbs_info(struct cpufreq_policy *policy,
271 struct common_dbs_data *cdata)
272{
273 struct cpu_common_dbs_info *shared;
274 int j;
275
276 /* Allocate memory for the common information for policy->cpus */
277 shared = kzalloc(sizeof(*shared), GFP_KERNEL);
278 if (!shared)
279 return -ENOMEM;
280
281 /* Set shared for all CPUs, online+offline */
282 for_each_cpu(j, policy->related_cpus)
283 cdata->get_cpu_cdbs(j)->shared = shared;
284
285 return 0;
286}
287
288static void free_common_dbs_info(struct cpufreq_policy *policy,
289 struct common_dbs_data *cdata)
290{
291 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
292 struct cpu_common_dbs_info *shared = cdbs->shared;
293 int j;
294
295 for_each_cpu(j, policy->cpus)
296 cdata->get_cpu_cdbs(j)->shared = NULL;
297
298 kfree(shared);
299}
300
242static int cpufreq_governor_init(struct cpufreq_policy *policy, 301static int cpufreq_governor_init(struct cpufreq_policy *policy,
243 struct dbs_data *dbs_data, 302 struct dbs_data *dbs_data,
244 struct common_dbs_data *cdata) 303 struct common_dbs_data *cdata)
@@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
246 unsigned int latency; 305 unsigned int latency;
247 int ret; 306 int ret;
248 307
308 /* State should be equivalent to EXIT */
309 if (policy->governor_data)
310 return -EBUSY;
311
249 if (dbs_data) { 312 if (dbs_data) {
250 if (WARN_ON(have_governor_per_policy())) 313 if (WARN_ON(have_governor_per_policy()))
251 return -EINVAL; 314 return -EINVAL;
315
316 ret = alloc_common_dbs_info(policy, cdata);
317 if (ret)
318 return ret;
319
252 dbs_data->usage_count++; 320 dbs_data->usage_count++;
253 policy->governor_data = dbs_data; 321 policy->governor_data = dbs_data;
254 return 0; 322 return 0;
@@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
258 if (!dbs_data) 326 if (!dbs_data)
259 return -ENOMEM; 327 return -ENOMEM;
260 328
329 ret = alloc_common_dbs_info(policy, cdata);
330 if (ret)
331 goto free_dbs_data;
332
261 dbs_data->cdata = cdata; 333 dbs_data->cdata = cdata;
262 dbs_data->usage_count = 1; 334 dbs_data->usage_count = 1;
263 335
264 ret = cdata->init(dbs_data, !policy->governor->initialized); 336 ret = cdata->init(dbs_data, !policy->governor->initialized);
265 if (ret) 337 if (ret)
266 goto free_dbs_data; 338 goto free_common_dbs_info;
267 339
268 /* policy latency is in ns. Convert it to us first */ 340 /* policy latency is in ns. Convert it to us first */
269 latency = policy->cpuinfo.transition_latency / 1000; 341 latency = policy->cpuinfo.transition_latency / 1000;
@@ -300,15 +372,22 @@ put_kobj:
300 } 372 }
301cdata_exit: 373cdata_exit:
302 cdata->exit(dbs_data, !policy->governor->initialized); 374 cdata->exit(dbs_data, !policy->governor->initialized);
375free_common_dbs_info:
376 free_common_dbs_info(policy, cdata);
303free_dbs_data: 377free_dbs_data:
304 kfree(dbs_data); 378 kfree(dbs_data);
305 return ret; 379 return ret;
306} 380}
307 381
308static void cpufreq_governor_exit(struct cpufreq_policy *policy, 382static int cpufreq_governor_exit(struct cpufreq_policy *policy,
309 struct dbs_data *dbs_data) 383 struct dbs_data *dbs_data)
310{ 384{
311 struct common_dbs_data *cdata = dbs_data->cdata; 385 struct common_dbs_data *cdata = dbs_data->cdata;
386 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
387
388 /* State should be equivalent to INIT */
389 if (!cdbs->shared || cdbs->shared->policy)
390 return -EBUSY;
312 391
313 policy->governor_data = NULL; 392 policy->governor_data = NULL;
314 if (!--dbs_data->usage_count) { 393 if (!--dbs_data->usage_count) {
@@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy,
323 cdata->exit(dbs_data, policy->governor->initialized == 1); 402 cdata->exit(dbs_data, policy->governor->initialized == 1);
324 kfree(dbs_data); 403 kfree(dbs_data);
325 } 404 }
405
406 free_common_dbs_info(policy, cdata);
407 return 0;
326} 408}
327 409
328static int cpufreq_governor_start(struct cpufreq_policy *policy, 410static int cpufreq_governor_start(struct cpufreq_policy *policy,
@@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
330{ 412{
331 struct common_dbs_data *cdata = dbs_data->cdata; 413 struct common_dbs_data *cdata = dbs_data->cdata;
332 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; 414 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
333 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 415 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
416 struct cpu_common_dbs_info *shared = cdbs->shared;
334 int io_busy = 0; 417 int io_busy = 0;
335 418
336 if (!policy->cur) 419 if (!policy->cur)
337 return -EINVAL; 420 return -EINVAL;
338 421
422 /* State should be equivalent to INIT */
423 if (!shared || shared->policy)
424 return -EBUSY;
425
339 if (cdata->governor == GOV_CONSERVATIVE) { 426 if (cdata->governor == GOV_CONSERVATIVE) {
340 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 427 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
341 428
@@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
349 io_busy = od_tuners->io_is_busy; 436 io_busy = od_tuners->io_is_busy;
350 } 437 }
351 438
439 shared->policy = policy;
440 shared->time_stamp = ktime_get();
441 mutex_init(&shared->timer_mutex);
442
352 for_each_cpu(j, policy->cpus) { 443 for_each_cpu(j, policy->cpus) {
353 struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j); 444 struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
354 unsigned int prev_load; 445 unsigned int prev_load;
355 446
356 j_cdbs->cpu = j;
357 j_cdbs->cur_policy = policy;
358 j_cdbs->prev_cpu_idle = 447 j_cdbs->prev_cpu_idle =
359 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); 448 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
360 449
@@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
366 if (ignore_nice) 455 if (ignore_nice)
367 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 456 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
368 457
369 mutex_init(&j_cdbs->timer_mutex); 458 INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
370 INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
371 } 459 }
372 460
373 if (cdata->governor == GOV_CONSERVATIVE) { 461 if (cdata->governor == GOV_CONSERVATIVE) {
@@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
386 od_ops->powersave_bias_init_cpu(cpu); 474 od_ops->powersave_bias_init_cpu(cpu);
387 } 475 }
388 476
389 /* Initiate timer time stamp */
390 cpu_cdbs->time_stamp = ktime_get();
391
392 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), 477 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
393 true); 478 true);
394 return 0; 479 return 0;
395} 480}
396 481
397static void cpufreq_governor_stop(struct cpufreq_policy *policy, 482static int cpufreq_governor_stop(struct cpufreq_policy *policy,
398 struct dbs_data *dbs_data) 483 struct dbs_data *dbs_data)
399{ 484{
400 struct common_dbs_data *cdata = dbs_data->cdata; 485 struct common_dbs_data *cdata = dbs_data->cdata;
401 unsigned int cpu = policy->cpu; 486 unsigned int cpu = policy->cpu;
402 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 487 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
488 struct cpu_common_dbs_info *shared = cdbs->shared;
489
490 /* State should be equivalent to START */
491 if (!shared || !shared->policy)
492 return -EBUSY;
493
494 gov_cancel_work(dbs_data, policy);
403 495
404 if (cdata->governor == GOV_CONSERVATIVE) { 496 if (cdata->governor == GOV_CONSERVATIVE) {
405 struct cs_cpu_dbs_info_s *cs_dbs_info = 497 struct cs_cpu_dbs_info_s *cs_dbs_info =
@@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy,
408 cs_dbs_info->enable = 0; 500 cs_dbs_info->enable = 0;
409 } 501 }
410 502
411 gov_cancel_work(dbs_data, policy); 503 shared->policy = NULL;
412 504 mutex_destroy(&shared->timer_mutex);
413 mutex_destroy(&cpu_cdbs->timer_mutex); 505 return 0;
414 cpu_cdbs->cur_policy = NULL;
415} 506}
416 507
417static void cpufreq_governor_limits(struct cpufreq_policy *policy, 508static int cpufreq_governor_limits(struct cpufreq_policy *policy,
418 struct dbs_data *dbs_data) 509 struct dbs_data *dbs_data)
419{ 510{
420 struct common_dbs_data *cdata = dbs_data->cdata; 511 struct common_dbs_data *cdata = dbs_data->cdata;
421 unsigned int cpu = policy->cpu; 512 unsigned int cpu = policy->cpu;
422 struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); 513 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
423 514
424 if (!cpu_cdbs->cur_policy) 515 /* State should be equivalent to START */
425 return; 516 if (!cdbs->shared || !cdbs->shared->policy)
517 return -EBUSY;
426 518
427 mutex_lock(&cpu_cdbs->timer_mutex); 519 mutex_lock(&cdbs->shared->timer_mutex);
428 if (policy->max < cpu_cdbs->cur_policy->cur) 520 if (policy->max < cdbs->shared->policy->cur)
429 __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, 521 __cpufreq_driver_target(cdbs->shared->policy, policy->max,
430 CPUFREQ_RELATION_H); 522 CPUFREQ_RELATION_H);
431 else if (policy->min > cpu_cdbs->cur_policy->cur) 523 else if (policy->min > cdbs->shared->policy->cur)
432 __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, 524 __cpufreq_driver_target(cdbs->shared->policy, policy->min,
433 CPUFREQ_RELATION_L); 525 CPUFREQ_RELATION_L);
434 dbs_check_cpu(dbs_data, cpu); 526 dbs_check_cpu(dbs_data, cpu);
435 mutex_unlock(&cpu_cdbs->timer_mutex); 527 mutex_unlock(&cdbs->shared->timer_mutex);
528
529 return 0;
436} 530}
437 531
438int cpufreq_governor_dbs(struct cpufreq_policy *policy, 532int cpufreq_governor_dbs(struct cpufreq_policy *policy,
439 struct common_dbs_data *cdata, unsigned int event) 533 struct common_dbs_data *cdata, unsigned int event)
440{ 534{
441 struct dbs_data *dbs_data; 535 struct dbs_data *dbs_data;
442 int ret = 0; 536 int ret;
443 537
444 /* Lock governor to block concurrent initialization of governor */ 538 /* Lock governor to block concurrent initialization of governor */
445 mutex_lock(&cdata->mutex); 539 mutex_lock(&cdata->mutex);
@@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
449 else 543 else
450 dbs_data = cdata->gdbs_data; 544 dbs_data = cdata->gdbs_data;
451 545
452 if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) { 546 if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
453 ret = -EINVAL; 547 ret = -EINVAL;
454 goto unlock; 548 goto unlock;
455 } 549 }
@@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
459 ret = cpufreq_governor_init(policy, dbs_data, cdata); 553 ret = cpufreq_governor_init(policy, dbs_data, cdata);
460 break; 554 break;
461 case CPUFREQ_GOV_POLICY_EXIT: 555 case CPUFREQ_GOV_POLICY_EXIT:
462 cpufreq_governor_exit(policy, dbs_data); 556 ret = cpufreq_governor_exit(policy, dbs_data);
463 break; 557 break;
464 case CPUFREQ_GOV_START: 558 case CPUFREQ_GOV_START:
465 ret = cpufreq_governor_start(policy, dbs_data); 559 ret = cpufreq_governor_start(policy, dbs_data);
466 break; 560 break;
467 case CPUFREQ_GOV_STOP: 561 case CPUFREQ_GOV_STOP:
468 cpufreq_governor_stop(policy, dbs_data); 562 ret = cpufreq_governor_stop(policy, dbs_data);
469 break; 563 break;
470 case CPUFREQ_GOV_LIMITS: 564 case CPUFREQ_GOV_LIMITS:
471 cpufreq_governor_limits(policy, dbs_data); 565 ret = cpufreq_governor_limits(policy, dbs_data);
472 break; 566 break;
567 default:
568 ret = -EINVAL;
473 } 569 }
474 570
475unlock: 571unlock:
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34736f5e869d..50f171796632 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -109,7 +109,7 @@ store_one(_gov, file_name)
109 109
110/* create helper routines */ 110/* create helper routines */
111#define define_get_cpu_dbs_routines(_dbs_info) \ 111#define define_get_cpu_dbs_routines(_dbs_info) \
112static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ 112static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
113{ \ 113{ \
114 return &per_cpu(_dbs_info, cpu).cdbs; \ 114 return &per_cpu(_dbs_info, cpu).cdbs; \
115} \ 115} \
@@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \
128 * cs_*: Conservative governor 128 * cs_*: Conservative governor
129 */ 129 */
130 130
131/* Common to all CPUs of a policy */
132struct cpu_common_dbs_info {
133 struct cpufreq_policy *policy;
134 /*
135 * percpu mutex that serializes governor limit change with dbs_timer
136 * invocation. We do not want dbs_timer to run when user is changing
137 * the governor or limits.
138 */
139 struct mutex timer_mutex;
140 ktime_t time_stamp;
141};
142
131/* Per cpu structures */ 143/* Per cpu structures */
132struct cpu_dbs_common_info { 144struct cpu_dbs_info {
133 int cpu;
134 u64 prev_cpu_idle; 145 u64 prev_cpu_idle;
135 u64 prev_cpu_wall; 146 u64 prev_cpu_wall;
136 u64 prev_cpu_nice; 147 u64 prev_cpu_nice;
@@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
141 * wake-up from idle. 152 * wake-up from idle.
142 */ 153 */
143 unsigned int prev_load; 154 unsigned int prev_load;
144 struct cpufreq_policy *cur_policy; 155 struct delayed_work dwork;
145 struct delayed_work work; 156 struct cpu_common_dbs_info *shared;
146 /*
147 * percpu mutex that serializes governor limit change with gov_dbs_timer
148 * invocation. We do not want gov_dbs_timer to run when user is changing
149 * the governor or limits.
150 */
151 struct mutex timer_mutex;
152 ktime_t time_stamp;
153}; 157};
154 158
155struct od_cpu_dbs_info_s { 159struct od_cpu_dbs_info_s {
156 struct cpu_dbs_common_info cdbs; 160 struct cpu_dbs_info cdbs;
157 struct cpufreq_frequency_table *freq_table; 161 struct cpufreq_frequency_table *freq_table;
158 unsigned int freq_lo; 162 unsigned int freq_lo;
159 unsigned int freq_lo_jiffies; 163 unsigned int freq_lo_jiffies;
@@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
163}; 167};
164 168
165struct cs_cpu_dbs_info_s { 169struct cs_cpu_dbs_info_s {
166 struct cpu_dbs_common_info cdbs; 170 struct cpu_dbs_info cdbs;
167 unsigned int down_skip; 171 unsigned int down_skip;
168 unsigned int requested_freq; 172 unsigned int requested_freq;
169 unsigned int enable:1; 173 unsigned int enable:1;
@@ -204,9 +208,11 @@ struct common_dbs_data {
204 */ 208 */
205 struct dbs_data *gdbs_data; 209 struct dbs_data *gdbs_data;
206 210
207 struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); 211 struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
208 void *(*get_cpu_dbs_info_s)(int cpu); 212 void *(*get_cpu_dbs_info_s)(int cpu);
209 void (*gov_dbs_timer)(struct work_struct *work); 213 unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
214 struct dbs_data *dbs_data,
215 bool modify_all);
210 void (*gov_check_cpu)(int cpu, unsigned int load); 216 void (*gov_check_cpu)(int cpu, unsigned int load);
211 int (*init)(struct dbs_data *dbs_data, bool notify); 217 int (*init)(struct dbs_data *dbs_data, bool notify);
212 void (*exit)(struct dbs_data *dbs_data, bool notify); 218 void (*exit)(struct dbs_data *dbs_data, bool notify);
@@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
265extern struct mutex cpufreq_governor_lock; 271extern struct mutex cpufreq_governor_lock;
266 272
267void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 273void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
268bool need_load_eval(struct cpu_dbs_common_info *cdbs,
269 unsigned int sampling_rate);
270int cpufreq_governor_dbs(struct cpufreq_policy *policy, 274int cpufreq_governor_dbs(struct cpufreq_policy *policy,
271 struct common_dbs_data *cdata, unsigned int event); 275 struct common_dbs_data *cdata, unsigned int event);
272void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, 276void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3c1e10f2304c..1fa9088c84a8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
155static void od_check_cpu(int cpu, unsigned int load) 155static void od_check_cpu(int cpu, unsigned int load)
156{ 156{
157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
158 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 158 struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
159 struct dbs_data *dbs_data = policy->governor_data; 159 struct dbs_data *dbs_data = policy->governor_data;
160 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 160 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
161 161
@@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
191 } 191 }
192} 192}
193 193
194static void od_dbs_timer(struct work_struct *work) 194static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
195 struct dbs_data *dbs_data, bool modify_all)
195{ 196{
196 struct od_cpu_dbs_info_s *dbs_info = 197 struct cpufreq_policy *policy = cdbs->shared->policy;
197 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 198 unsigned int cpu = policy->cpu;
198 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 199 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
199 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
200 cpu); 200 cpu);
201 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
202 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 201 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
203 int delay = 0, sample_type = core_dbs_info->sample_type; 202 int delay = 0, sample_type = dbs_info->sample_type;
204 bool modify_all = true;
205 203
206 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 204 if (!modify_all)
207 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
208 modify_all = false;
209 goto max_delay; 205 goto max_delay;
210 }
211 206
212 /* Common NORMAL_SAMPLE setup */ 207 /* Common NORMAL_SAMPLE setup */
213 core_dbs_info->sample_type = OD_NORMAL_SAMPLE; 208 dbs_info->sample_type = OD_NORMAL_SAMPLE;
214 if (sample_type == OD_SUB_SAMPLE) { 209 if (sample_type == OD_SUB_SAMPLE) {
215 delay = core_dbs_info->freq_lo_jiffies; 210 delay = dbs_info->freq_lo_jiffies;
216 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, 211 __cpufreq_driver_target(policy, dbs_info->freq_lo,
217 core_dbs_info->freq_lo, CPUFREQ_RELATION_H); 212 CPUFREQ_RELATION_H);
218 } else { 213 } else {
219 dbs_check_cpu(dbs_data, cpu); 214 dbs_check_cpu(dbs_data, cpu);
220 if (core_dbs_info->freq_lo) { 215 if (dbs_info->freq_lo) {
221 /* Setup timer for SUB_SAMPLE */ 216 /* Setup timer for SUB_SAMPLE */
222 core_dbs_info->sample_type = OD_SUB_SAMPLE; 217 dbs_info->sample_type = OD_SUB_SAMPLE;
223 delay = core_dbs_info->freq_hi_jiffies; 218 delay = dbs_info->freq_hi_jiffies;
224 } 219 }
225 } 220 }
226 221
227max_delay: 222max_delay:
228 if (!delay) 223 if (!delay)
229 delay = delay_for_sampling_rate(od_tuners->sampling_rate 224 delay = delay_for_sampling_rate(od_tuners->sampling_rate
230 * core_dbs_info->rate_mult); 225 * dbs_info->rate_mult);
231 226
232 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); 227 return delay;
233 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
234} 228}
235 229
236/************************** sysfs interface ************************/ 230/************************** sysfs interface ************************/
@@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
273 dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 267 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
274 cpufreq_cpu_put(policy); 268 cpufreq_cpu_put(policy);
275 269
276 mutex_lock(&dbs_info->cdbs.timer_mutex); 270 mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
277 271
278 if (!delayed_work_pending(&dbs_info->cdbs.work)) { 272 if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
279 mutex_unlock(&dbs_info->cdbs.timer_mutex); 273 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
280 continue; 274 continue;
281 } 275 }
282 276
283 next_sampling = jiffies + usecs_to_jiffies(new_rate); 277 next_sampling = jiffies + usecs_to_jiffies(new_rate);
284 appointed_at = dbs_info->cdbs.work.timer.expires; 278 appointed_at = dbs_info->cdbs.dwork.timer.expires;
285 279
286 if (time_before(next_sampling, appointed_at)) { 280 if (time_before(next_sampling, appointed_at)) {
287 281
288 mutex_unlock(&dbs_info->cdbs.timer_mutex); 282 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
289 cancel_delayed_work_sync(&dbs_info->cdbs.work); 283 cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
290 mutex_lock(&dbs_info->cdbs.timer_mutex); 284 mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
291 285
292 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, 286 gov_queue_work(dbs_data, policy,
293 usecs_to_jiffies(new_rate), true); 287 usecs_to_jiffies(new_rate), true);
294 288
295 } 289 }
296 mutex_unlock(&dbs_info->cdbs.timer_mutex); 290 mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
297 } 291 }
298} 292}
299 293
@@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
556 550
557 get_online_cpus(); 551 get_online_cpus();
558 for_each_online_cpu(cpu) { 552 for_each_online_cpu(cpu) {
553 struct cpu_common_dbs_info *shared;
554
559 if (cpumask_test_cpu(cpu, &done)) 555 if (cpumask_test_cpu(cpu, &done))
560 continue; 556 continue;
561 557
562 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; 558 shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
563 if (!policy) 559 if (!shared)
564 continue; 560 continue;
565 561
562 policy = shared->policy;
566 cpumask_or(&done, &done, policy->cpus); 563 cpumask_or(&done, &done, policy->cpus);
567 564
568 if (policy->governor != &cpufreq_gov_ondemand) 565 if (policy->governor != &cpufreq_gov_ondemand)
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c
index 773bcde893c0..0f5e6d5f6da0 100644
--- a/drivers/cpufreq/cpufreq_opp.c
+++ b/drivers/cpufreq/cpufreq_opp.c
@@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
75 } 75 }
76 freq_table[i].driver_data = i; 76 freq_table[i].driver_data = i;
77 freq_table[i].frequency = rate / 1000; 77 freq_table[i].frequency = rate / 1000;
78
79 /* Is Boost/turbo opp ? */
80 if (dev_pm_opp_is_turbo(opp))
81 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
78 } 82 }
79 83
80 freq_table[i].driver_data = i; 84 freq_table[i].driver_data = i;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index a0d2a423cea9..4085244c8a67 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -78,7 +78,7 @@ static int eps_acpi_init(void)
78static int eps_acpi_exit(struct cpufreq_policy *policy) 78static int eps_acpi_exit(struct cpufreq_policy *policy)
79{ 79{
80 if (eps_acpi_cpu_perf) { 80 if (eps_acpi_cpu_perf) {
81 acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0); 81 acpi_processor_unregister_performance(0);
82 free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); 82 free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
83 kfree(eps_acpi_cpu_perf); 83 kfree(eps_acpi_cpu_perf);
84 eps_acpi_cpu_perf = NULL; 84 eps_acpi_cpu_perf = NULL;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index dfbbf981ed56..a8f1daffc9bc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -18,6 +18,21 @@
18 * FREQUENCY TABLE HELPERS * 18 * FREQUENCY TABLE HELPERS *
19 *********************************************************************/ 19 *********************************************************************/
20 20
21bool policy_has_boost_freq(struct cpufreq_policy *policy)
22{
23 struct cpufreq_frequency_table *pos, *table = policy->freq_table;
24
25 if (!table)
26 return false;
27
28 cpufreq_for_each_valid_entry(pos, table)
29 if (pos->flags & CPUFREQ_BOOST_FREQ)
30 return true;
31
32 return false;
33}
34EXPORT_SYMBOL_GPL(policy_has_boost_freq);
35
21int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 36int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
22 struct cpufreq_frequency_table *table) 37 struct cpufreq_frequency_table *table)
23{ 38{
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index c30aaa6a54e8..0202429f1c5b 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
29 29
30struct cpufreq_acpi_io { 30struct cpufreq_acpi_io {
31 struct acpi_processor_performance acpi_data; 31 struct acpi_processor_performance acpi_data;
32 struct cpufreq_frequency_table *freq_table;
33 unsigned int resume; 32 unsigned int resume;
34}; 33};
35 34
@@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
221 unsigned int cpu = policy->cpu; 220 unsigned int cpu = policy->cpu;
222 struct cpufreq_acpi_io *data; 221 struct cpufreq_acpi_io *data;
223 unsigned int result = 0; 222 unsigned int result = 0;
223 struct cpufreq_frequency_table *freq_table;
224 224
225 pr_debug("acpi_cpufreq_cpu_init\n"); 225 pr_debug("acpi_cpufreq_cpu_init\n");
226 226
@@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
254 } 254 }
255 255
256 /* alloc freq_table */ 256 /* alloc freq_table */
257 data->freq_table = kzalloc(sizeof(*data->freq_table) * 257 freq_table = kzalloc(sizeof(*freq_table) *
258 (data->acpi_data.state_count + 1), 258 (data->acpi_data.state_count + 1),
259 GFP_KERNEL); 259 GFP_KERNEL);
260 if (!data->freq_table) { 260 if (!freq_table) {
261 result = -ENOMEM; 261 result = -ENOMEM;
262 goto err_unreg; 262 goto err_unreg;
263 } 263 }
@@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
276 for (i = 0; i <= data->acpi_data.state_count; i++) 276 for (i = 0; i <= data->acpi_data.state_count; i++)
277 { 277 {
278 if (i < data->acpi_data.state_count) { 278 if (i < data->acpi_data.state_count) {
279 data->freq_table[i].frequency = 279 freq_table[i].frequency =
280 data->acpi_data.states[i].core_frequency * 1000; 280 data->acpi_data.states[i].core_frequency * 1000;
281 } else { 281 } else {
282 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 282 freq_table[i].frequency = CPUFREQ_TABLE_END;
283 } 283 }
284 } 284 }
285 285
286 result = cpufreq_table_validate_and_show(policy, data->freq_table); 286 result = cpufreq_table_validate_and_show(policy, freq_table);
287 if (result) { 287 if (result) {
288 goto err_freqfree; 288 goto err_freqfree;
289 } 289 }
@@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
311 return (result); 311 return (result);
312 312
313 err_freqfree: 313 err_freqfree:
314 kfree(data->freq_table); 314 kfree(freq_table);
315 err_unreg: 315 err_unreg:
316 acpi_processor_unregister_performance(&data->acpi_data, cpu); 316 acpi_processor_unregister_performance(cpu);
317 err_free: 317 err_free:
318 kfree(data); 318 kfree(data);
319 acpi_io_data[cpu] = NULL; 319 acpi_io_data[cpu] = NULL;
@@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
332 332
333 if (data) { 333 if (data) {
334 acpi_io_data[policy->cpu] = NULL; 334 acpi_io_data[policy->cpu] = NULL;
335 acpi_processor_unregister_performance(&data->acpi_data, 335 acpi_processor_unregister_performance(policy->cpu);
336 policy->cpu); 336 kfree(policy->freq_table);
337 kfree(data); 337 kfree(data);
338 } 338 }
339 339
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 129e266f7621..2faa4216bf2a 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
98 /* get current setting */ 98 /* get current setting */
99 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); 99 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
100 100
101 if (machine_is_integrator()) { 101 if (machine_is_integrator())
102 vco.s = (cm_osc >> 8) & 7; 102 vco.s = (cm_osc >> 8) & 7;
103 } else if (machine_is_cintegrator()) { 103 else if (machine_is_cintegrator())
104 vco.s = 1; 104 vco.s = 1;
105 }
106 vco.v = cm_osc & 255; 105 vco.v = cm_osc & 255;
107 vco.r = 22; 106 vco.r = 22;
108 freqs.old = icst_hz(&cclk_params, vco) / 1000; 107 freqs.old = icst_hz(&cclk_params, vco) / 1000;
@@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
163 /* detect memory etc. */ 162 /* detect memory etc. */
164 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); 163 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
165 164
166 if (machine_is_integrator()) { 165 if (machine_is_integrator())
167 vco.s = (cm_osc >> 8) & 7; 166 vco.s = (cm_osc >> 8) & 7;
168 } else { 167 else
169 vco.s = 1; 168 vco.s = 1;
170 }
171 vco.v = cm_osc & 255; 169 vco.v = cm_osc & 255;
172 vco.r = 22; 170 vco.r = 22;
173 171
@@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
203 struct resource *res; 201 struct resource *res;
204 202
205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 203 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206 if (!res) 204 if (!res)
207 return -ENODEV; 205 return -ENODEV;
208 206
209 cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 207 cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
@@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
234module_platform_driver_probe(integrator_cpufreq_driver, 232module_platform_driver_probe(integrator_cpufreq_driver,
235 integrator_cpufreq_probe); 233 integrator_cpufreq_probe);
236 234
237MODULE_AUTHOR ("Russell M. King"); 235MODULE_AUTHOR("Russell M. King");
238MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); 236MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
239MODULE_LICENSE ("GPL"); 237MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7898de054f4e..cddc61939a86 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
484} 484}
485/************************** sysfs end ************************/ 485/************************** sysfs end ************************/
486 486
487static void intel_pstate_hwp_enable(void) 487static void intel_pstate_hwp_enable(struct cpudata *cpudata)
488{ 488{
489 hwp_active++;
490 pr_info("intel_pstate: HWP enabled\n"); 489 pr_info("intel_pstate: HWP enabled\n");
491 490
492 wrmsrl( MSR_PM_ENABLE, 0x1); 491 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
493} 492}
494 493
495static int byt_get_min_pstate(void) 494static int byt_get_min_pstate(void)
@@ -522,7 +521,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
522 int32_t vid_fp; 521 int32_t vid_fp;
523 u32 vid; 522 u32 vid;
524 523
525 val = pstate << 8; 524 val = (u64)pstate << 8;
526 if (limits.no_turbo && !limits.turbo_disabled) 525 if (limits.no_turbo && !limits.turbo_disabled)
527 val |= (u64)1 << 32; 526 val |= (u64)1 << 32;
528 527
@@ -611,7 +610,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
611{ 610{
612 u64 val; 611 u64 val;
613 612
614 val = pstate << 8; 613 val = (u64)pstate << 8;
615 if (limits.no_turbo && !limits.turbo_disabled) 614 if (limits.no_turbo && !limits.turbo_disabled)
616 val |= (u64)1 << 32; 615 val |= (u64)1 << 32;
617 616
@@ -909,6 +908,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
909 ICPU(0x4c, byt_params), 908 ICPU(0x4c, byt_params),
910 ICPU(0x4e, core_params), 909 ICPU(0x4e, core_params),
911 ICPU(0x4f, core_params), 910 ICPU(0x4f, core_params),
911 ICPU(0x5e, core_params),
912 ICPU(0x56, core_params), 912 ICPU(0x56, core_params),
913 ICPU(0x57, knl_params), 913 ICPU(0x57, knl_params),
914 {} 914 {}
@@ -933,6 +933,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
933 cpu = all_cpu_data[cpunum]; 933 cpu = all_cpu_data[cpunum];
934 934
935 cpu->cpu = cpunum; 935 cpu->cpu = cpunum;
936
937 if (hwp_active)
938 intel_pstate_hwp_enable(cpu);
939
936 intel_pstate_get_cpu_pstates(cpu); 940 intel_pstate_get_cpu_pstates(cpu);
937 941
938 init_timer_deferrable(&cpu->timer); 942 init_timer_deferrable(&cpu->timer);
@@ -1170,6 +1174,10 @@ static struct hw_vendor_info vendor_info[] = {
1170 {1, "ORACLE", "X4270M3 ", PPC}, 1174 {1, "ORACLE", "X4270M3 ", PPC},
1171 {1, "ORACLE", "X4270M2 ", PPC}, 1175 {1, "ORACLE", "X4270M2 ", PPC},
1172 {1, "ORACLE", "X4170M2 ", PPC}, 1176 {1, "ORACLE", "X4170M2 ", PPC},
1177 {1, "ORACLE", "X4170 M3", PPC},
1178 {1, "ORACLE", "X4275 M3", PPC},
1179 {1, "ORACLE", "X6-2 ", PPC},
1180 {1, "ORACLE", "Sudbury ", PPC},
1173 {0, "", ""}, 1181 {0, "", ""},
1174}; 1182};
1175 1183
@@ -1246,7 +1254,7 @@ static int __init intel_pstate_init(void)
1246 return -ENOMEM; 1254 return -ENOMEM;
1247 1255
1248 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1256 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
1249 intel_pstate_hwp_enable(); 1257 hwp_active++;
1250 1258
1251 if (!hwp_active && hwp_only) 1259 if (!hwp_active && hwp_only)
1252 goto out; 1260 goto out;
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
new file mode 100644
index 000000000000..49caed293a3b
--- /dev/null
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/cpu.h>
17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h>
19#include <linux/cpumask.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/pm_opp.h>
23#include <linux/regulator/consumer.h>
24#include <linux/slab.h>
25#include <linux/thermal.h>
26
27#define MIN_VOLT_SHIFT (100000)
28#define MAX_VOLT_SHIFT (200000)
29#define MAX_VOLT_LIMIT (1150000)
30#define VOLT_TOL (10000)
31
32/*
33 * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
34 * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
35 * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
36 * voltage inputs need to be controlled under a hardware limitation:
37 * 100mV < Vsram - Vproc < 200mV
38 *
39 * When scaling the clock frequency of a CPU clock domain, the clock source
40 * needs to be switched to another stable PLL clock temporarily until
41 * the original PLL becomes stable at target frequency.
42 */
43struct mtk_cpu_dvfs_info {
44 struct device *cpu_dev;
45 struct regulator *proc_reg;
46 struct regulator *sram_reg;
47 struct clk *cpu_clk;
48 struct clk *inter_clk;
49 struct thermal_cooling_device *cdev;
50 int intermediate_voltage;
51 bool need_voltage_tracking;
52};
53
54static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
55 int new_vproc)
56{
57 struct regulator *proc_reg = info->proc_reg;
58 struct regulator *sram_reg = info->sram_reg;
59 int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
60
61 old_vproc = regulator_get_voltage(proc_reg);
62 old_vsram = regulator_get_voltage(sram_reg);
63 /* Vsram should not exceed the maximum allowed voltage of SoC. */
64 new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
65
66 if (old_vproc < new_vproc) {
67 /*
68 * When scaling up voltages, Vsram and Vproc scale up step
69 * by step. At each step, set Vsram to (Vproc + 200mV) first,
70 * then set Vproc to (Vsram - 100mV).
71 * Keep doing it until Vsram and Vproc hit target voltages.
72 */
73 do {
74 old_vsram = regulator_get_voltage(sram_reg);
75 old_vproc = regulator_get_voltage(proc_reg);
76
77 vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
78
79 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
80 vsram = MAX_VOLT_LIMIT;
81
82 /*
83 * If the target Vsram hits the maximum voltage,
84 * try to set the exact voltage value first.
85 */
86 ret = regulator_set_voltage(sram_reg, vsram,
87 vsram);
88 if (ret)
89 ret = regulator_set_voltage(sram_reg,
90 vsram - VOLT_TOL,
91 vsram);
92
93 vproc = new_vproc;
94 } else {
95 ret = regulator_set_voltage(sram_reg, vsram,
96 vsram + VOLT_TOL);
97
98 vproc = vsram - MIN_VOLT_SHIFT;
99 }
100 if (ret)
101 return ret;
102
103 ret = regulator_set_voltage(proc_reg, vproc,
104 vproc + VOLT_TOL);
105 if (ret) {
106 regulator_set_voltage(sram_reg, old_vsram,
107 old_vsram);
108 return ret;
109 }
110 } while (vproc < new_vproc || vsram < new_vsram);
111 } else if (old_vproc > new_vproc) {
112 /*
113 * When scaling down voltages, Vsram and Vproc scale down step
114 * by step. At each step, set Vproc to (Vsram - 200mV) first,
115 * then set Vproc to (Vproc + 100mV).
116 * Keep doing it until Vsram and Vproc hit target voltages.
117 */
118 do {
119 old_vproc = regulator_get_voltage(proc_reg);
120 old_vsram = regulator_get_voltage(sram_reg);
121
122 vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
123 ret = regulator_set_voltage(proc_reg, vproc,
124 vproc + VOLT_TOL);
125 if (ret)
126 return ret;
127
128 if (vproc == new_vproc)
129 vsram = new_vsram;
130 else
131 vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
132
133 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
134 vsram = MAX_VOLT_LIMIT;
135
136 /*
137 * If the target Vsram hits the maximum voltage,
138 * try to set the exact voltage value first.
139 */
140 ret = regulator_set_voltage(sram_reg, vsram,
141 vsram);
142 if (ret)
143 ret = regulator_set_voltage(sram_reg,
144 vsram - VOLT_TOL,
145 vsram);
146 } else {
147 ret = regulator_set_voltage(sram_reg, vsram,
148 vsram + VOLT_TOL);
149 }
150
151 if (ret) {
152 regulator_set_voltage(proc_reg, old_vproc,
153 old_vproc);
154 return ret;
155 }
156 } while (vproc > new_vproc + VOLT_TOL ||
157 vsram > new_vsram + VOLT_TOL);
158 }
159
160 return 0;
161}
162
163static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
164{
165 if (info->need_voltage_tracking)
166 return mtk_cpufreq_voltage_tracking(info, vproc);
167 else
168 return regulator_set_voltage(info->proc_reg, vproc,
169 vproc + VOLT_TOL);
170}
171
172static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
173 unsigned int index)
174{
175 struct cpufreq_frequency_table *freq_table = policy->freq_table;
176 struct clk *cpu_clk = policy->clk;
177 struct clk *armpll = clk_get_parent(cpu_clk);
178 struct mtk_cpu_dvfs_info *info = policy->driver_data;
179 struct device *cpu_dev = info->cpu_dev;
180 struct dev_pm_opp *opp;
181 long freq_hz, old_freq_hz;
182 int vproc, old_vproc, inter_vproc, target_vproc, ret;
183
184 inter_vproc = info->intermediate_voltage;
185
186 old_freq_hz = clk_get_rate(cpu_clk);
187 old_vproc = regulator_get_voltage(info->proc_reg);
188
189 freq_hz = freq_table[index].frequency * 1000;
190
191 rcu_read_lock();
192 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
193 if (IS_ERR(opp)) {
194 rcu_read_unlock();
195 pr_err("cpu%d: failed to find OPP for %ld\n",
196 policy->cpu, freq_hz);
197 return PTR_ERR(opp);
198 }
199 vproc = dev_pm_opp_get_voltage(opp);
200 rcu_read_unlock();
201
202 /*
203 * If the new voltage or the intermediate voltage is higher than the
204 * current voltage, scale up voltage first.
205 */
206 target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
207 if (old_vproc < target_vproc) {
208 ret = mtk_cpufreq_set_voltage(info, target_vproc);
209 if (ret) {
210 pr_err("cpu%d: failed to scale up voltage!\n",
211 policy->cpu);
212 mtk_cpufreq_set_voltage(info, old_vproc);
213 return ret;
214 }
215 }
216
217 /* Reparent the CPU clock to intermediate clock. */
218 ret = clk_set_parent(cpu_clk, info->inter_clk);
219 if (ret) {
220 pr_err("cpu%d: failed to re-parent cpu clock!\n",
221 policy->cpu);
222 mtk_cpufreq_set_voltage(info, old_vproc);
223 WARN_ON(1);
224 return ret;
225 }
226
227 /* Set the original PLL to target rate. */
228 ret = clk_set_rate(armpll, freq_hz);
229 if (ret) {
230 pr_err("cpu%d: failed to scale cpu clock rate!\n",
231 policy->cpu);
232 clk_set_parent(cpu_clk, armpll);
233 mtk_cpufreq_set_voltage(info, old_vproc);
234 return ret;
235 }
236
237 /* Set parent of CPU clock back to the original PLL. */
238 ret = clk_set_parent(cpu_clk, armpll);
239 if (ret) {
240 pr_err("cpu%d: failed to re-parent cpu clock!\n",
241 policy->cpu);
242 mtk_cpufreq_set_voltage(info, inter_vproc);
243 WARN_ON(1);
244 return ret;
245 }
246
247 /*
248 * If the new voltage is lower than the intermediate voltage or the
249 * original voltage, scale down to the new voltage.
250 */
251 if (vproc < inter_vproc || vproc < old_vproc) {
252 ret = mtk_cpufreq_set_voltage(info, vproc);
253 if (ret) {
254 pr_err("cpu%d: failed to scale down voltage!\n",
255 policy->cpu);
256 clk_set_parent(cpu_clk, info->inter_clk);
257 clk_set_rate(armpll, old_freq_hz);
258 clk_set_parent(cpu_clk, armpll);
259 return ret;
260 }
261 }
262
263 return 0;
264}
265
266static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
267{
268 struct mtk_cpu_dvfs_info *info = policy->driver_data;
269 struct device_node *np = of_node_get(info->cpu_dev->of_node);
270
271 if (WARN_ON(!np))
272 return;
273
274 if (of_find_property(np, "#cooling-cells", NULL)) {
275 info->cdev = of_cpufreq_cooling_register(np,
276 policy->related_cpus);
277
278 if (IS_ERR(info->cdev)) {
279 dev_err(info->cpu_dev,
280 "running cpufreq without cooling device: %ld\n",
281 PTR_ERR(info->cdev));
282
283 info->cdev = NULL;
284 }
285 }
286
287 of_node_put(np);
288}
289
290static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
291{
292 struct device *cpu_dev;
293 struct regulator *proc_reg = ERR_PTR(-ENODEV);
294 struct regulator *sram_reg = ERR_PTR(-ENODEV);
295 struct clk *cpu_clk = ERR_PTR(-ENODEV);
296 struct clk *inter_clk = ERR_PTR(-ENODEV);
297 struct dev_pm_opp *opp;
298 unsigned long rate;
299 int ret;
300
301 cpu_dev = get_cpu_device(cpu);
302 if (!cpu_dev) {
303 pr_err("failed to get cpu%d device\n", cpu);
304 return -ENODEV;
305 }
306
307 cpu_clk = clk_get(cpu_dev, "cpu");
308 if (IS_ERR(cpu_clk)) {
309 if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
310 pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
311 else
312 pr_err("failed to get cpu clk for cpu%d\n", cpu);
313
314 ret = PTR_ERR(cpu_clk);
315 return ret;
316 }
317
318 inter_clk = clk_get(cpu_dev, "intermediate");
319 if (IS_ERR(inter_clk)) {
320 if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
321 pr_warn("intermediate clk for cpu%d not ready, retry.\n",
322 cpu);
323 else
324 pr_err("failed to get intermediate clk for cpu%d\n",
325 cpu);
326
327 ret = PTR_ERR(inter_clk);
328 goto out_free_resources;
329 }
330
331 proc_reg = regulator_get_exclusive(cpu_dev, "proc");
332 if (IS_ERR(proc_reg)) {
333 if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
334 pr_warn("proc regulator for cpu%d not ready, retry.\n",
335 cpu);
336 else
337 pr_err("failed to get proc regulator for cpu%d\n",
338 cpu);
339
340 ret = PTR_ERR(proc_reg);
341 goto out_free_resources;
342 }
343
344 /* Both presence and absence of sram regulator are valid cases. */
345 sram_reg = regulator_get_exclusive(cpu_dev, "sram");
346
347 ret = of_init_opp_table(cpu_dev);
348 if (ret) {
349 pr_warn("no OPP table for cpu%d\n", cpu);
350 goto out_free_resources;
351 }
352
353 /* Search a safe voltage for intermediate frequency. */
354 rate = clk_get_rate(inter_clk);
355 rcu_read_lock();
356 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
357 if (IS_ERR(opp)) {
358 rcu_read_unlock();
359 pr_err("failed to get intermediate opp for cpu%d\n", cpu);
360 ret = PTR_ERR(opp);
361 goto out_free_opp_table;
362 }
363 info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
364 rcu_read_unlock();
365
366 info->cpu_dev = cpu_dev;
367 info->proc_reg = proc_reg;
368 info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
369 info->cpu_clk = cpu_clk;
370 info->inter_clk = inter_clk;
371
372 /*
373 * If SRAM regulator is present, software "voltage tracking" is needed
374 * for this CPU power domain.
375 */
376 info->need_voltage_tracking = !IS_ERR(sram_reg);
377
378 return 0;
379
380out_free_opp_table:
381 of_free_opp_table(cpu_dev);
382
383out_free_resources:
384 if (!IS_ERR(proc_reg))
385 regulator_put(proc_reg);
386 if (!IS_ERR(sram_reg))
387 regulator_put(sram_reg);
388 if (!IS_ERR(cpu_clk))
389 clk_put(cpu_clk);
390 if (!IS_ERR(inter_clk))
391 clk_put(inter_clk);
392
393 return ret;
394}
395
396static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
397{
398 if (!IS_ERR(info->proc_reg))
399 regulator_put(info->proc_reg);
400 if (!IS_ERR(info->sram_reg))
401 regulator_put(info->sram_reg);
402 if (!IS_ERR(info->cpu_clk))
403 clk_put(info->cpu_clk);
404 if (!IS_ERR(info->inter_clk))
405 clk_put(info->inter_clk);
406
407 of_free_opp_table(info->cpu_dev);
408}
409
410static int mtk_cpufreq_init(struct cpufreq_policy *policy)
411{
412 struct mtk_cpu_dvfs_info *info;
413 struct cpufreq_frequency_table *freq_table;
414 int ret;
415
416 info = kzalloc(sizeof(*info), GFP_KERNEL);
417 if (!info)
418 return -ENOMEM;
419
420 ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
421 if (ret) {
422 pr_err("%s failed to initialize dvfs info for cpu%d\n",
423 __func__, policy->cpu);
424 goto out_free_dvfs_info;
425 }
426
427 ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
428 if (ret) {
429 pr_err("failed to init cpufreq table for cpu%d: %d\n",
430 policy->cpu, ret);
431 goto out_release_dvfs_info;
432 }
433
434 ret = cpufreq_table_validate_and_show(policy, freq_table);
435 if (ret) {
436 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
437 goto out_free_cpufreq_table;
438 }
439
440 /* CPUs in the same cluster share a clock and power domain. */
441 cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
442 policy->driver_data = info;
443 policy->clk = info->cpu_clk;
444
445 return 0;
446
447out_free_cpufreq_table:
448 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
449
450out_release_dvfs_info:
451 mtk_cpu_dvfs_info_release(info);
452
453out_free_dvfs_info:
454 kfree(info);
455
456 return ret;
457}
458
459static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
460{
461 struct mtk_cpu_dvfs_info *info = policy->driver_data;
462
463 cpufreq_cooling_unregister(info->cdev);
464 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
465 mtk_cpu_dvfs_info_release(info);
466 kfree(info);
467
468 return 0;
469}
470
471static struct cpufreq_driver mt8173_cpufreq_driver = {
472 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
473 .verify = cpufreq_generic_frequency_table_verify,
474 .target_index = mtk_cpufreq_set_target,
475 .get = cpufreq_generic_get,
476 .init = mtk_cpufreq_init,
477 .exit = mtk_cpufreq_exit,
478 .ready = mtk_cpufreq_ready,
479 .name = "mtk-cpufreq",
480 .attr = cpufreq_generic_attr,
481};
482
483static int mt8173_cpufreq_probe(struct platform_device *pdev)
484{
485 int ret;
486
487 ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
488 if (ret)
489 pr_err("failed to register mtk cpufreq driver\n");
490
491 return ret;
492}
493
494static struct platform_driver mt8173_cpufreq_platdrv = {
495 .driver = {
496 .name = "mt8173-cpufreq",
497 },
498 .probe = mt8173_cpufreq_probe,
499};
500
501static int mt8173_cpufreq_driver_init(void)
502{
503 struct platform_device *pdev;
504 int err;
505
506 if (!of_machine_is_compatible("mediatek,mt8173"))
507 return -ENODEV;
508
509 err = platform_driver_register(&mt8173_cpufreq_platdrv);
510 if (err)
511 return err;
512
513 /*
514 * Since there's no place to hold device registration code and no
515 * device tree based way to match cpufreq driver yet, both the driver
516 * and the device registration codes are put here to handle defer
517 * probing.
518 */
519 pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
520 if (IS_ERR(pdev)) {
521 pr_err("failed to register mtk-cpufreq platform device\n");
522 return PTR_ERR(pdev);
523 }
524
525 return 0;
526}
527device_initcall(mt8173_cpufreq_driver_init);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 37c5742482d8..c1ae1999770a 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
421 return 0; 421 return 0;
422 422
423err2: 423err2:
424 acpi_processor_unregister_performance(acpi_processor_perf, 0); 424 acpi_processor_unregister_performance(0);
425err1: 425err1:
426 free_cpumask_var(acpi_processor_perf->shared_cpu_map); 426 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
427err05: 427err05:
@@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
661{ 661{
662#ifdef CONFIG_X86_POWERNOW_K7_ACPI 662#ifdef CONFIG_X86_POWERNOW_K7_ACPI
663 if (acpi_processor_perf) { 663 if (acpi_processor_perf) {
664 acpi_processor_unregister_performance(acpi_processor_perf, 0); 664 acpi_processor_unregister_performance(0);
665 free_cpumask_var(acpi_processor_perf->shared_cpu_map); 665 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
666 kfree(acpi_processor_perf); 666 kfree(acpi_processor_perf);
667 } 667 }
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 5c035d04d827..0b5bf135b090 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -795,7 +795,7 @@ err_out_mem:
795 kfree(powernow_table); 795 kfree(powernow_table);
796 796
797err_out: 797err_out:
798 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 798 acpi_processor_unregister_performance(data->cpu);
799 799
800 /* data->acpi_data.state_count informs us at ->exit() 800 /* data->acpi_data.state_count informs us at ->exit()
801 * whether ACPI was used */ 801 * whether ACPI was used */
@@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
863static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 863static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
864{ 864{
865 if (data->acpi_data.state_count) 865 if (data->acpi_data.state_count)
866 acpi_processor_unregister_performance(&data->acpi_data, 866 acpi_processor_unregister_performance(data->cpu);
867 data->cpu);
868 free_cpumask_var(data->acpi_data.shared_cpu_map); 867 free_cpumask_var(data->acpi_data.shared_cpu_map);
869} 868}
870 869
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ebef0d8279c7..64994e10638e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -27,20 +27,31 @@
27#include <linux/smp.h> 27#include <linux/smp.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/reboot.h> 29#include <linux/reboot.h>
30#include <linux/slab.h>
30 31
31#include <asm/cputhreads.h> 32#include <asm/cputhreads.h>
32#include <asm/firmware.h> 33#include <asm/firmware.h>
33#include <asm/reg.h> 34#include <asm/reg.h>
34#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ 35#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
36#include <asm/opal.h>
35 37
36#define POWERNV_MAX_PSTATES 256 38#define POWERNV_MAX_PSTATES 256
37#define PMSR_PSAFE_ENABLE (1UL << 30) 39#define PMSR_PSAFE_ENABLE (1UL << 30)
38#define PMSR_SPR_EM_DISABLE (1UL << 31) 40#define PMSR_SPR_EM_DISABLE (1UL << 31)
39#define PMSR_MAX(x) ((x >> 32) & 0xFF) 41#define PMSR_MAX(x) ((x >> 32) & 0xFF)
40#define PMSR_LP(x) ((x >> 48) & 0xFF)
41 42
42static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; 43static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
43static bool rebooting, throttled; 44static bool rebooting, throttled, occ_reset;
45
46static struct chip {
47 unsigned int id;
48 bool throttled;
49 cpumask_t mask;
50 struct work_struct throttle;
51 bool restore;
52} *chips;
53
54static int nr_chips;
44 55
45/* 56/*
46 * Note: The set of pstates consists of contiguous integers, the 57 * Note: The set of pstates consists of contiguous integers, the
@@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void)
298 return powernv_pstate_info.max - powernv_pstate_info.nominal; 309 return powernv_pstate_info.max - powernv_pstate_info.nominal;
299} 310}
300 311
301static void powernv_cpufreq_throttle_check(unsigned int cpu) 312static void powernv_cpufreq_throttle_check(void *data)
302{ 313{
314 unsigned int cpu = smp_processor_id();
303 unsigned long pmsr; 315 unsigned long pmsr;
304 int pmsr_pmax, pmsr_lp; 316 int pmsr_pmax, i;
305 317
306 pmsr = get_pmspr(SPRN_PMSR); 318 pmsr = get_pmspr(SPRN_PMSR);
307 319
320 for (i = 0; i < nr_chips; i++)
321 if (chips[i].id == cpu_to_chip_id(cpu))
322 break;
323
308 /* Check for Pmax Capping */ 324 /* Check for Pmax Capping */
309 pmsr_pmax = (s8)PMSR_MAX(pmsr); 325 pmsr_pmax = (s8)PMSR_MAX(pmsr);
310 if (pmsr_pmax != powernv_pstate_info.max) { 326 if (pmsr_pmax != powernv_pstate_info.max) {
311 throttled = true; 327 if (chips[i].throttled)
312 pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax); 328 goto next;
313 pr_info("Max allowed Pstate is capped\n"); 329 chips[i].throttled = true;
330 pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
331 chips[i].id, pmsr_pmax);
332 } else if (chips[i].throttled) {
333 chips[i].throttled = false;
334 pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
335 chips[i].id, pmsr_pmax);
314 } 336 }
315 337
316 /* 338 /* Check if Psafe_mode_active is set in PMSR. */
317 * Check for Psafe by reading LocalPstate 339next:
318 * or check if Psafe_mode_active is set in PMSR. 340 if (pmsr & PMSR_PSAFE_ENABLE) {
319 */
320 pmsr_lp = (s8)PMSR_LP(pmsr);
321 if ((pmsr_lp < powernv_pstate_info.min) ||
322 (pmsr & PMSR_PSAFE_ENABLE)) {
323 throttled = true; 341 throttled = true;
324 pr_info("Pstate set to safe frequency\n"); 342 pr_info("Pstate set to safe frequency\n");
325 } 343 }
@@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
350 return 0; 368 return 0;
351 369
352 if (!throttled) 370 if (!throttled)
353 powernv_cpufreq_throttle_check(smp_processor_id()); 371 powernv_cpufreq_throttle_check(NULL);
354 372
355 freq_data.pstate_id = powernv_freqs[new_index].driver_data; 373 freq_data.pstate_id = powernv_freqs[new_index].driver_data;
356 374
@@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
395 .notifier_call = powernv_cpufreq_reboot_notifier, 413 .notifier_call = powernv_cpufreq_reboot_notifier,
396}; 414};
397 415
416void powernv_cpufreq_work_fn(struct work_struct *work)
417{
418 struct chip *chip = container_of(work, struct chip, throttle);
419 unsigned int cpu;
420 cpumask_var_t mask;
421
422 smp_call_function_any(&chip->mask,
423 powernv_cpufreq_throttle_check, NULL, 0);
424
425 if (!chip->restore)
426 return;
427
428 chip->restore = false;
429 cpumask_copy(mask, &chip->mask);
430 for_each_cpu_and(cpu, mask, cpu_online_mask) {
431 int index, tcpu;
432 struct cpufreq_policy policy;
433
434 cpufreq_get_policy(&policy, cpu);
435 cpufreq_frequency_table_target(&policy, policy.freq_table,
436 policy.cur,
437 CPUFREQ_RELATION_C, &index);
438 powernv_cpufreq_target_index(&policy, index);
439 for_each_cpu(tcpu, policy.cpus)
440 cpumask_clear_cpu(tcpu, mask);
441 }
442}
443
444static char throttle_reason[][30] = {
445 "No throttling",
446 "Power Cap",
447 "Processor Over Temperature",
448 "Power Supply Failure",
449 "Over Current",
450 "OCC Reset"
451 };
452
453static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
454 unsigned long msg_type, void *_msg)
455{
456 struct opal_msg *msg = _msg;
457 struct opal_occ_msg omsg;
458 int i;
459
460 if (msg_type != OPAL_MSG_OCC)
461 return 0;
462
463 omsg.type = be64_to_cpu(msg->params[0]);
464
465 switch (omsg.type) {
466 case OCC_RESET:
467 occ_reset = true;
468 pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
469 /*
470 * powernv_cpufreq_throttle_check() is called in
471 * target() callback which can detect the throttle state
472 * for governors like ondemand.
473 * But static governors will not call target() often thus
474 * report throttling here.
475 */
476 if (!throttled) {
477 throttled = true;
478 pr_crit("CPU frequency is throttled for duration\n");
479 }
480
481 break;
482 case OCC_LOAD:
483 pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
484 break;
485 case OCC_THROTTLE:
486 omsg.chip = be64_to_cpu(msg->params[1]);
487 omsg.throttle_status = be64_to_cpu(msg->params[2]);
488
489 if (occ_reset) {
490 occ_reset = false;
491 throttled = false;
492 pr_info("OCC Active, CPU frequency is no longer throttled\n");
493
494 for (i = 0; i < nr_chips; i++) {
495 chips[i].restore = true;
496 schedule_work(&chips[i].throttle);
497 }
498
499 return 0;
500 }
501
502 if (omsg.throttle_status &&
503 omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
504 pr_info("OCC: Chip %u Pmax reduced due to %s\n",
505 (unsigned int)omsg.chip,
506 throttle_reason[omsg.throttle_status]);
507 else if (!omsg.throttle_status)
508 pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
509 throttle_reason[omsg.throttle_status]);
510 else
511 return 0;
512
513 for (i = 0; i < nr_chips; i++)
514 if (chips[i].id == omsg.chip) {
515 if (!omsg.throttle_status)
516 chips[i].restore = true;
517 schedule_work(&chips[i].throttle);
518 }
519 }
520 return 0;
521}
522
523static struct notifier_block powernv_cpufreq_opal_nb = {
524 .notifier_call = powernv_cpufreq_occ_msg,
525 .next = NULL,
526 .priority = 0,
527};
528
398static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) 529static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
399{ 530{
400 struct powernv_smp_call_data freq_data; 531 struct powernv_smp_call_data freq_data;
@@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
414 .attr = powernv_cpu_freq_attr, 545 .attr = powernv_cpu_freq_attr,
415}; 546};
416 547
548static int init_chip_info(void)
549{
550 unsigned int chip[256];
551 unsigned int cpu, i;
552 unsigned int prev_chip_id = UINT_MAX;
553
554 for_each_possible_cpu(cpu) {
555 unsigned int id = cpu_to_chip_id(cpu);
556
557 if (prev_chip_id != id) {
558 prev_chip_id = id;
559 chip[nr_chips++] = id;
560 }
561 }
562
563 chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
564 if (!chips)
565 return -ENOMEM;
566
567 for (i = 0; i < nr_chips; i++) {
568 chips[i].id = chip[i];
569 chips[i].throttled = false;
570 cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
571 INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
572 chips[i].restore = false;
573 }
574
575 return 0;
576}
577
417static int __init powernv_cpufreq_init(void) 578static int __init powernv_cpufreq_init(void)
418{ 579{
419 int rc = 0; 580 int rc = 0;
@@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void)
429 return rc; 590 return rc;
430 } 591 }
431 592
593 /* Populate chip info */
594 rc = init_chip_info();
595 if (rc)
596 return rc;
597
432 register_reboot_notifier(&powernv_cpufreq_reboot_nb); 598 register_reboot_notifier(&powernv_cpufreq_reboot_nb);
599 opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
433 return cpufreq_register_driver(&powernv_cpufreq_driver); 600 return cpufreq_register_driver(&powernv_cpufreq_driver);
434} 601}
435module_init(powernv_cpufreq_init); 602module_init(powernv_cpufreq_init);
@@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init);
437static void __exit powernv_cpufreq_exit(void) 604static void __exit powernv_cpufreq_exit(void)
438{ 605{
439 unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); 606 unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
607 opal_message_notifier_unregister(OPAL_MSG_OCC,
608 &powernv_cpufreq_opal_nb);
440 cpufreq_unregister_driver(&powernv_cpufreq_driver); 609 cpufreq_unregister_driver(&powernv_cpufreq_driver);
441} 610}
442module_exit(powernv_cpufreq_exit); 611module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index d29e8da396a0..7969f7690498 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb,
97 struct cpufreq_frequency_table *cbe_freqs; 97 struct cpufreq_frequency_table *cbe_freqs;
98 u8 node; 98 u8 node;
99 99
100 /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE 100 /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
101 * and CPUFREQ_NOTIFY policy events?) 101 * policy events?)
102 */ 102 */
103 if (event == CPUFREQ_START) 103 if (event == CPUFREQ_START)
104 return 0; 104 return 0;
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index ffa3389e535b..992ce6f9abec 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table)
45 pentry = (struct sfi_freq_table_entry *)sb->pentry; 45 pentry = (struct sfi_freq_table_entry *)sb->pentry;
46 totallen = num_freq_table_entries * sizeof(*pentry); 46 totallen = num_freq_table_entries * sizeof(*pentry);
47 47
48 sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL); 48 sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
49 if (!sfi_cpufreq_array) 49 if (!sfi_cpufreq_array)
50 return -ENOMEM; 50 return -ENOMEM;
51 51
52 memcpy(sfi_cpufreq_array, pentry, totallen);
53
54 return 0; 52 return 0;
55} 53}
56 54
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 4ab7a2156672..15d3214aaa00 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
386 unsigned int prev_speed; 386 unsigned int prev_speed;
387 unsigned int ret = 0; 387 unsigned int ret = 0;
388 unsigned long flags; 388 unsigned long flags;
389 struct timeval tv1, tv2; 389 ktime_t tv1, tv2;
390 390
391 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) 391 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
392 return -EINVAL; 392 return -EINVAL;
@@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
415 415
416 /* start latency measurement */ 416 /* start latency measurement */
417 if (transition_latency) 417 if (transition_latency)
418 do_gettimeofday(&tv1); 418 tv1 = ktime_get();
419 419
420 /* switch to high state */ 420 /* switch to high state */
421 set_state(SPEEDSTEP_HIGH); 421 set_state(SPEEDSTEP_HIGH);
422 422
423 /* end latency measurement */ 423 /* end latency measurement */
424 if (transition_latency) 424 if (transition_latency)
425 do_gettimeofday(&tv2); 425 tv2 = ktime_get();
426 426
427 *high_speed = speedstep_get_frequency(processor); 427 *high_speed = speedstep_get_frequency(processor);
428 if (!*high_speed) { 428 if (!*high_speed) {
@@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
442 set_state(SPEEDSTEP_LOW); 442 set_state(SPEEDSTEP_LOW);
443 443
444 if (transition_latency) { 444 if (transition_latency) {
445 *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + 445 *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
446 tv2.tv_usec - tv1.tv_usec;
447 pr_debug("transition latency is %u uSec\n", *transition_latency); 446 pr_debug("transition latency is %u uSec\n", *transition_latency);
448 447
449 /* convert uSec to nSec and add 20% for safety reasons */ 448 /* convert uSec to nSec and add 20% for safety reasons */
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 7936dce4b878..1523e2d745eb 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -176,14 +176,12 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
176 176
177/** 177/**
178 * cpuidle_state_is_coupled - check if a state is part of a coupled set 178 * cpuidle_state_is_coupled - check if a state is part of a coupled set
179 * @dev: struct cpuidle_device for the current cpu
180 * @drv: struct cpuidle_driver for the platform 179 * @drv: struct cpuidle_driver for the platform
181 * @state: index of the target state in drv->states 180 * @state: index of the target state in drv->states
182 * 181 *
183 * Returns true if the target state is coupled with cpus besides this one 182 * Returns true if the target state is coupled with cpus besides this one
184 */ 183 */
185bool cpuidle_state_is_coupled(struct cpuidle_device *dev, 184bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
186 struct cpuidle_driver *drv, int state)
187{ 185{
188 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; 186 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
189} 187}
@@ -473,7 +471,7 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
473 return entered_state; 471 return entered_state;
474 } 472 }
475 entered_state = cpuidle_enter_state(dev, drv, 473 entered_state = cpuidle_enter_state(dev, drv,
476 dev->safe_state_index); 474 drv->safe_state_index);
477 local_irq_disable(); 475 local_irq_disable();
478 } 476 }
479 477
@@ -521,7 +519,7 @@ retry:
521 } 519 }
522 520
523 entered_state = cpuidle_enter_state(dev, drv, 521 entered_state = cpuidle_enter_state(dev, drv,
524 dev->safe_state_index); 522 drv->safe_state_index);
525 local_irq_disable(); 523 local_irq_disable();
526 } 524 }
527 525
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 33253930247f..17a6dc0e2111 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
214 tick_broadcast_exit(); 214 tick_broadcast_exit();
215 } 215 }
216 216
217 if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 217 if (!cpuidle_state_is_coupled(drv, entered_state))
218 local_irq_enable(); 218 local_irq_enable();
219 219
220 diff = ktime_to_us(ktime_sub(time_end, time_start)); 220 diff = ktime_to_us(ktime_sub(time_end, time_start));
@@ -263,7 +263,7 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
263int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 263int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
264 int index) 264 int index)
265{ 265{
266 if (cpuidle_state_is_coupled(dev, drv, index)) 266 if (cpuidle_state_is_coupled(drv, index))
267 return cpuidle_enter_state_coupled(dev, drv, index); 267 return cpuidle_enter_state_coupled(dev, drv, index);
268 return cpuidle_enter_state(dev, drv, index); 268 return cpuidle_enter_state(dev, drv, index);
269} 269}
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index ee97e9672ecf..178c5ad3d568 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -34,15 +34,14 @@ extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
34extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); 34extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
35 35
36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
37bool cpuidle_state_is_coupled(struct cpuidle_device *dev, 37bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
38 struct cpuidle_driver *drv, int state);
39int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 38int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
40 struct cpuidle_driver *drv, int next_state); 39 struct cpuidle_driver *drv, int next_state);
41int cpuidle_coupled_register_device(struct cpuidle_device *dev); 40int cpuidle_coupled_register_device(struct cpuidle_device *dev);
42void cpuidle_coupled_unregister_device(struct cpuidle_device *dev); 41void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
43#else 42#else
44static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev, 43static inline
45 struct cpuidle_driver *drv, int state) 44bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
46{ 45{
47 return false; 46 return false;
48} 47}
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 7d99d13bacd8..f9901f52a225 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support 2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com> 5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -82,6 +82,15 @@ struct __exynos_ppmu_events {
82 PPMU_EVENT(mscl), 82 PPMU_EVENT(mscl),
83 PPMU_EVENT(fimd0x), 83 PPMU_EVENT(fimd0x),
84 PPMU_EVENT(fimd1x), 84 PPMU_EVENT(fimd1x),
85
86 /* Only for Exynos5433 SoCs */
87 PPMU_EVENT(d0-cpu),
88 PPMU_EVENT(d0-general),
89 PPMU_EVENT(d0-rt),
90 PPMU_EVENT(d1-cpu),
91 PPMU_EVENT(d1-general),
92 PPMU_EVENT(d1-rt),
93
85 { /* sentinel */ }, 94 { /* sentinel */ },
86}; 95};
87 96
@@ -96,6 +105,9 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
96 return -EINVAL; 105 return -EINVAL;
97} 106}
98 107
108/*
109 * The devfreq-event ops structure for PPMU v1.1
110 */
99static int exynos_ppmu_disable(struct devfreq_event_dev *edev) 111static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
100{ 112{
101 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); 113 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
@@ -200,10 +212,158 @@ static const struct devfreq_event_ops exynos_ppmu_ops = {
200 .get_event = exynos_ppmu_get_event, 212 .get_event = exynos_ppmu_get_event,
201}; 213};
202 214
215/*
216 * The devfreq-event ops structure for PPMU v2.0
217 */
218static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
219{
220 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
221 u32 pmnc, clear;
222
223 /* Disable all counters */
224 clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
225 | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
226
227 __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
228 __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
229 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
230 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
231
232 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
233 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
234 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
235 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
236 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
237 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
238 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
239 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
240 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
241 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
242 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
243 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
244 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
245 __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
246
247 /* Disable PPMU */
248 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
249 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
250 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
251
252 return 0;
253}
254
255static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
256{
257 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
258 int id = exynos_ppmu_find_ppmu_id(edev);
259 u32 pmnc, cntens;
260
261 /* Enable all counters */
262 cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
263 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
264 __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
265
266 /* Set the event of Read/Write data count */
267 switch (id) {
268 case PPMU_PMNCNT0:
269 case PPMU_PMNCNT1:
270 case PPMU_PMNCNT2:
271 __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
272 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
273 break;
274 case PPMU_PMNCNT3:
275 __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
276 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
277 break;
278 }
279
280 /* Reset cycle counter/performance counter and enable PPMU */
281 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
282 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
283 | PPMU_PMNC_COUNTER_RESET_MASK
284 | PPMU_PMNC_CC_RESET_MASK
285 | PPMU_PMNC_CC_DIVIDER_MASK
286 | PPMU_V2_PMNC_START_MODE_MASK);
287 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
288 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
289 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
290 pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
291 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
292
293 return 0;
294}
295
296static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
297 struct devfreq_event_data *edata)
298{
299 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
300 int id = exynos_ppmu_find_ppmu_id(edev);
301 u32 pmnc, cntenc;
302 u32 pmcnt_high, pmcnt_low;
303 u64 load_count = 0;
304
305 /* Disable PPMU */
306 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
307 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
308 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
309
310 /* Read cycle count and performance count */
311 edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
312
313 switch (id) {
314 case PPMU_PMNCNT0:
315 case PPMU_PMNCNT1:
316 case PPMU_PMNCNT2:
317 load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
318 break;
319 case PPMU_PMNCNT3:
320 pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
321 pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
322 load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
323 break;
324 }
325 edata->load_count = load_count;
326
327 /* Disable all counters */
328 cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
329 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
330 __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
331
332 dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
333 edata->load_count, edata->total_count);
334 return 0;
335}
336
337static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
338 .disable = exynos_ppmu_v2_disable,
339 .set_event = exynos_ppmu_v2_set_event,
340 .get_event = exynos_ppmu_v2_get_event,
341};
342
343static const struct of_device_id exynos_ppmu_id_match[] = {
344 {
345 .compatible = "samsung,exynos-ppmu",
346 .data = (void *)&exynos_ppmu_ops,
347 }, {
348 .compatible = "samsung,exynos-ppmu-v2",
349 .data = (void *)&exynos_ppmu_v2_ops,
350 },
351 { /* sentinel */ },
352};
353
354static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
355{
356 const struct of_device_id *match;
357
358 match = of_match_node(exynos_ppmu_id_match, np);
359 return (struct devfreq_event_ops *)match->data;
360}
361
203static int of_get_devfreq_events(struct device_node *np, 362static int of_get_devfreq_events(struct device_node *np,
204 struct exynos_ppmu *info) 363 struct exynos_ppmu *info)
205{ 364{
206 struct devfreq_event_desc *desc; 365 struct devfreq_event_desc *desc;
366 struct devfreq_event_ops *event_ops;
207 struct device *dev = info->dev; 367 struct device *dev = info->dev;
208 struct device_node *events_np, *node; 368 struct device_node *events_np, *node;
209 int i, j, count; 369 int i, j, count;
@@ -214,6 +374,7 @@ static int of_get_devfreq_events(struct device_node *np,
214 "failed to get child node of devfreq-event devices\n"); 374 "failed to get child node of devfreq-event devices\n");
215 return -EINVAL; 375 return -EINVAL;
216 } 376 }
377 event_ops = exynos_bus_get_ops(np);
217 378
218 count = of_get_child_count(events_np); 379 count = of_get_child_count(events_np);
219 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL); 380 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
@@ -238,7 +399,7 @@ static int of_get_devfreq_events(struct device_node *np,
238 continue; 399 continue;
239 } 400 }
240 401
241 desc[j].ops = &exynos_ppmu_ops; 402 desc[j].ops = event_ops;
242 desc[j].driver_data = info; 403 desc[j].driver_data = info;
243 404
244 of_property_read_string(node, "event-name", &desc[j].name); 405 of_property_read_string(node, "event-name", &desc[j].name);
@@ -354,11 +515,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev)
354 return 0; 515 return 0;
355} 516}
356 517
357static struct of_device_id exynos_ppmu_id_match[] = {
358 { .compatible = "samsung,exynos-ppmu", },
359 { /* sentinel */ },
360};
361
362static struct platform_driver exynos_ppmu_driver = { 518static struct platform_driver exynos_ppmu_driver = {
363 .probe = exynos_ppmu_probe, 519 .probe = exynos_ppmu_probe,
364 .remove = exynos_ppmu_remove, 520 .remove = exynos_ppmu_remove,
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 4e831d48c138..05774c449137 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -26,6 +26,9 @@ enum ppmu_counter {
26 PPMU_PMNCNT_MAX, 26 PPMU_PMNCNT_MAX,
27}; 27};
28 28
29/***
30 * PPMUv1.1 Definitions
31 */
29enum ppmu_event_type { 32enum ppmu_event_type {
30 PPMU_RO_BUSY_CYCLE_CNT = 0x0, 33 PPMU_RO_BUSY_CYCLE_CNT = 0x0,
31 PPMU_WO_BUSY_CYCLE_CNT = 0x1, 34 PPMU_WO_BUSY_CYCLE_CNT = 0x1,
@@ -90,4 +93,71 @@ enum ppmu_reg {
90#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x)) 93#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
91#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x)) 94#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
92 95
96/***
97 * PPMU_V2.0 definitions
98 */
99enum ppmu_v2_mode {
100 PPMU_V2_MODE_MANUAL = 0,
101 PPMU_V2_MODE_AUTO = 1,
102 PPMU_V2_MODE_CIG = 2, /* CIG (Conditional Interrupt Generation) */
103};
104
105enum ppmu_v2_event_type {
106 PPMU_V2_RO_DATA_CNT = 0x4,
107 PPMU_V2_WO_DATA_CNT = 0x5,
108
109 PPMU_V2_EVT3_RW_DATA_CNT = 0x22, /* Only for Event3 */
110};
111
112enum ppmu_V2_reg {
113 /* PPC control register */
114 PPMU_V2_PMNC = 0x04,
115 PPMU_V2_CNTENS = 0x08,
116 PPMU_V2_CNTENC = 0x0c,
117 PPMU_V2_INTENS = 0x10,
118 PPMU_V2_INTENC = 0x14,
119 PPMU_V2_FLAG = 0x18,
120
121 /* Cycle Counter and Performance Event Counter Register */
122 PPMU_V2_CCNT = 0x48,
123 PPMU_V2_PMCNT0 = 0x34,
124 PPMU_V2_PMCNT1 = 0x38,
125 PPMU_V2_PMCNT2 = 0x3c,
126 PPMU_V2_PMCNT3_LOW = 0x40,
127 PPMU_V2_PMCNT3_HIGH = 0x44,
128
129 /* Bus Event Generator */
130 PPMU_V2_CIG_CFG0 = 0x1c,
131 PPMU_V2_CIG_CFG1 = 0x20,
132 PPMU_V2_CIG_CFG2 = 0x24,
133 PPMU_V2_CIG_RESULT = 0x28,
134 PPMU_V2_CNT_RESET = 0x2c,
135 PPMU_V2_CNT_AUTO = 0x30,
136 PPMU_V2_CH_EV0_TYPE = 0x200,
137 PPMU_V2_CH_EV1_TYPE = 0x204,
138 PPMU_V2_CH_EV2_TYPE = 0x208,
139 PPMU_V2_CH_EV3_TYPE = 0x20c,
140 PPMU_V2_SM_ID_V = 0x220,
141 PPMU_V2_SM_ID_A = 0x224,
142 PPMU_V2_SM_OTHERS_V = 0x228,
143 PPMU_V2_SM_OTHERS_A = 0x22c,
144 PPMU_V2_INTERRUPT_RESET = 0x260,
145};
146
147/* PMNC register */
148#define PPMU_V2_PMNC_START_MODE_SHIFT 20
149#define PPMU_V2_PMNC_START_MODE_MASK (0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
150
151#define PPMU_PMNC_CC_RESET_SHIFT 2
152#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
153#define PPMU_PMNC_ENABLE_SHIFT 0
154#define PPMU_PMNC_START_MODE_MASK BIT(16)
155#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
156#define PPMU_PMNC_CC_RESET_MASK BIT(2)
157#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
158#define PPMU_PMNC_ENABLE_MASK BIT(0)
159
160#define PPMU_V2_PMNCT(x) (PPMU_V2_PMCNT0 + (0x4 * x))
161#define PPMU_V2_CH_EVx_TYPE(x) (PPMU_V2_CH_EV0_TYPE + (0x4 * x))
162
93#endif /* __EXYNOS_PPMU_H__ */ 163#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 88d474b78076..bdbbe5bcfb83 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@ config INTEL_IOP_ADMA
85 help 85 help
86 Enable support for the Intel(R) IOP Series RAID engines. 86 Enable support for the Intel(R) IOP Series RAID engines.
87 87
88config IDMA64
89 tristate "Intel integrated DMA 64-bit support"
90 select DMA_ENGINE
91 select DMA_VIRTUAL_CHANNELS
92 help
93 Enable DMA support for Intel Low Power Subsystem such as found on
94 Intel Skylake PCH.
95
88source "drivers/dma/dw/Kconfig" 96source "drivers/dma/dw/Kconfig"
89 97
90config AT_HDMAC 98config AT_HDMAC
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6a4d6f2827da..56ff8c705c00 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/
14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o 14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
15obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 15obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
16obj-$(CONFIG_MV_XOR) += mv_xor.o 16obj-$(CONFIG_MV_XOR) += mv_xor.o
17obj-$(CONFIG_IDMA64) += idma64.o
17obj-$(CONFIG_DW_DMAC_CORE) += dw/ 18obj-$(CONFIG_DW_DMAC_CORE) += dw/
18obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 19obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o 20obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
new file mode 100644
index 000000000000..18c14e1f1414
--- /dev/null
+++ b/drivers/dma/idma64.c
@@ -0,0 +1,710 @@
1/*
2 * Core driver for the Intel integrated DMA 64-bit
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/bitops.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21
22#include "idma64.h"
23
24/* Platform driver name */
25#define DRV_NAME "idma64"
26
27/* For now we support only two channels */
28#define IDMA64_NR_CHAN 2
29
30/* ---------------------------------------------------------------------- */
31
32static struct device *chan2dev(struct dma_chan *chan)
33{
34 return &chan->dev->device;
35}
36
37/* ---------------------------------------------------------------------- */
38
39static void idma64_off(struct idma64 *idma64)
40{
41 unsigned short count = 100;
42
43 dma_writel(idma64, CFG, 0);
44
45 channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
46 channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
47 channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
48 channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
49 channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
50
51 do {
52 cpu_relax();
53 } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
54}
55
56static void idma64_on(struct idma64 *idma64)
57{
58 dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
59}
60
61/* ---------------------------------------------------------------------- */
62
63static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
64{
65 u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
66 u32 cfglo = 0;
67
68 /* Enforce FIFO drain when channel is suspended */
69 cfglo |= IDMA64C_CFGL_CH_DRAIN;
70
71 /* Set default burst alignment */
72 cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
73
74 channel_writel(idma64c, CFG_LO, cfglo);
75 channel_writel(idma64c, CFG_HI, cfghi);
76
77 /* Enable interrupts */
78 channel_set_bit(idma64, MASK(XFER), idma64c->mask);
79 channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
80
81 /*
82 * Enforce the controller to be turned on.
83 *
84 * The iDMA is turned off in ->probe() and looses context during system
85 * suspend / resume cycle. That's why we have to enable it each time we
86 * use it.
87 */
88 idma64_on(idma64);
89}
90
91static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
92{
93 channel_clear_bit(idma64, CH_EN, idma64c->mask);
94}
95
96static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
97{
98 struct idma64_desc *desc = idma64c->desc;
99 struct idma64_hw_desc *hw = &desc->hw[0];
100
101 channel_writeq(idma64c, SAR, 0);
102 channel_writeq(idma64c, DAR, 0);
103
104 channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
105 channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
106
107 channel_writeq(idma64c, LLP, hw->llp);
108
109 channel_set_bit(idma64, CH_EN, idma64c->mask);
110}
111
112static void idma64_stop_transfer(struct idma64_chan *idma64c)
113{
114 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
115
116 idma64_chan_stop(idma64, idma64c);
117}
118
119static void idma64_start_transfer(struct idma64_chan *idma64c)
120{
121 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
122 struct virt_dma_desc *vdesc;
123
124 /* Get the next descriptor */
125 vdesc = vchan_next_desc(&idma64c->vchan);
126 if (!vdesc) {
127 idma64c->desc = NULL;
128 return;
129 }
130
131 list_del(&vdesc->node);
132 idma64c->desc = to_idma64_desc(vdesc);
133
134 /* Configure the channel */
135 idma64_chan_init(idma64, idma64c);
136
137 /* Start the channel with a new descriptor */
138 idma64_chan_start(idma64, idma64c);
139}
140
141/* ---------------------------------------------------------------------- */
142
143static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
144 u32 status_err, u32 status_xfer)
145{
146 struct idma64_chan *idma64c = &idma64->chan[c];
147 struct idma64_desc *desc;
148 unsigned long flags;
149
150 spin_lock_irqsave(&idma64c->vchan.lock, flags);
151 desc = idma64c->desc;
152 if (desc) {
153 if (status_err & (1 << c)) {
154 dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
155 desc->status = DMA_ERROR;
156 } else if (status_xfer & (1 << c)) {
157 dma_writel(idma64, CLEAR(XFER), idma64c->mask);
158 desc->status = DMA_COMPLETE;
159 vchan_cookie_complete(&desc->vdesc);
160 idma64_start_transfer(idma64c);
161 }
162
163 /* idma64_start_transfer() updates idma64c->desc */
164 if (idma64c->desc == NULL || desc->status == DMA_ERROR)
165 idma64_stop_transfer(idma64c);
166 }
167 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
168}
169
170static irqreturn_t idma64_irq(int irq, void *dev)
171{
172 struct idma64 *idma64 = dev;
173 u32 status = dma_readl(idma64, STATUS_INT);
174 u32 status_xfer;
175 u32 status_err;
176 unsigned short i;
177
178 dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
179
180 /* Check if we have any interrupt from the DMA controller */
181 if (!status)
182 return IRQ_NONE;
183
184 /* Disable interrupts */
185 channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
186 channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
187
188 status_xfer = dma_readl(idma64, RAW(XFER));
189 status_err = dma_readl(idma64, RAW(ERROR));
190
191 for (i = 0; i < idma64->dma.chancnt; i++)
192 idma64_chan_irq(idma64, i, status_err, status_xfer);
193
194 /* Re-enable interrupts */
195 channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
196 channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
197
198 return IRQ_HANDLED;
199}
200
201/* ---------------------------------------------------------------------- */
202
203static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
204{
205 struct idma64_desc *desc;
206
207 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
208 if (!desc)
209 return NULL;
210
211 desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
212 if (!desc->hw) {
213 kfree(desc);
214 return NULL;
215 }
216
217 return desc;
218}
219
220static void idma64_desc_free(struct idma64_chan *idma64c,
221 struct idma64_desc *desc)
222{
223 struct idma64_hw_desc *hw;
224
225 if (desc->ndesc) {
226 unsigned int i = desc->ndesc;
227
228 do {
229 hw = &desc->hw[--i];
230 dma_pool_free(idma64c->pool, hw->lli, hw->llp);
231 } while (i);
232 }
233
234 kfree(desc->hw);
235 kfree(desc);
236}
237
238static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
239{
240 struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
241
242 idma64_desc_free(idma64c, to_idma64_desc(vdesc));
243}
244
245static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
246 struct dma_slave_config *config,
247 enum dma_transfer_direction direction, u64 llp)
248{
249 struct idma64_lli *lli = hw->lli;
250 u64 sar, dar;
251 u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
252 u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
253 u32 src_width, dst_width;
254
255 if (direction == DMA_MEM_TO_DEV) {
256 sar = hw->phys;
257 dar = config->dst_addr;
258 ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
259 IDMA64C_CTLL_FC_M2P;
260 src_width = min_t(u32, 2, __fls(sar | hw->len));
261 dst_width = __fls(config->dst_addr_width);
262 } else { /* DMA_DEV_TO_MEM */
263 sar = config->src_addr;
264 dar = hw->phys;
265 ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
266 IDMA64C_CTLL_FC_P2M;
267 src_width = __fls(config->src_addr_width);
268 dst_width = min_t(u32, 2, __fls(dar | hw->len));
269 }
270
271 lli->sar = sar;
272 lli->dar = dar;
273
274 lli->ctlhi = ctlhi;
275 lli->ctllo = ctllo |
276 IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
277 IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
278 IDMA64C_CTLL_DST_WIDTH(dst_width) |
279 IDMA64C_CTLL_SRC_WIDTH(src_width);
280
281 lli->llp = llp;
282 return hw->llp;
283}
284
285static void idma64_desc_fill(struct idma64_chan *idma64c,
286 struct idma64_desc *desc)
287{
288 struct dma_slave_config *config = &idma64c->config;
289 struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
290 struct idma64_lli *lli = hw->lli;
291 u64 llp = 0;
292 unsigned int i = desc->ndesc;
293
294 /* Fill the hardware descriptors and link them to a list */
295 do {
296 hw = &desc->hw[--i];
297 llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
298 desc->length += hw->len;
299 } while (i);
300
301 /* Trigger interrupt after last block */
302 lli->ctllo |= IDMA64C_CTLL_INT_EN;
303}
304
305static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
306 struct dma_chan *chan, struct scatterlist *sgl,
307 unsigned int sg_len, enum dma_transfer_direction direction,
308 unsigned long flags, void *context)
309{
310 struct idma64_chan *idma64c = to_idma64_chan(chan);
311 struct idma64_desc *desc;
312 struct scatterlist *sg;
313 unsigned int i;
314
315 desc = idma64_alloc_desc(sg_len);
316 if (!desc)
317 return NULL;
318
319 for_each_sg(sgl, sg, sg_len, i) {
320 struct idma64_hw_desc *hw = &desc->hw[i];
321
322 /* Allocate DMA capable memory for hardware descriptor */
323 hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
324 if (!hw->lli) {
325 desc->ndesc = i;
326 idma64_desc_free(idma64c, desc);
327 return NULL;
328 }
329
330 hw->phys = sg_dma_address(sg);
331 hw->len = sg_dma_len(sg);
332 }
333
334 desc->ndesc = sg_len;
335 desc->direction = direction;
336 desc->status = DMA_IN_PROGRESS;
337
338 idma64_desc_fill(idma64c, desc);
339 return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
340}
341
342static void idma64_issue_pending(struct dma_chan *chan)
343{
344 struct idma64_chan *idma64c = to_idma64_chan(chan);
345 unsigned long flags;
346
347 spin_lock_irqsave(&idma64c->vchan.lock, flags);
348 if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
349 idma64_start_transfer(idma64c);
350 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
351}
352
353static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
354{
355 struct idma64_desc *desc = idma64c->desc;
356 struct idma64_hw_desc *hw;
357 size_t bytes = desc->length;
358 u64 llp;
359 u32 ctlhi;
360 unsigned int i = 0;
361
362 llp = channel_readq(idma64c, LLP);
363 do {
364 hw = &desc->hw[i];
365 } while ((hw->llp != llp) && (++i < desc->ndesc));
366
367 if (!i)
368 return bytes;
369
370 do {
371 bytes -= desc->hw[--i].len;
372 } while (i);
373
374 ctlhi = channel_readl(idma64c, CTL_HI);
375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
376}
377
378static enum dma_status idma64_tx_status(struct dma_chan *chan,
379 dma_cookie_t cookie, struct dma_tx_state *state)
380{
381 struct idma64_chan *idma64c = to_idma64_chan(chan);
382 struct virt_dma_desc *vdesc;
383 enum dma_status status;
384 size_t bytes;
385 unsigned long flags;
386
387 status = dma_cookie_status(chan, cookie, state);
388 if (status == DMA_COMPLETE)
389 return status;
390
391 spin_lock_irqsave(&idma64c->vchan.lock, flags);
392 vdesc = vchan_find_desc(&idma64c->vchan, cookie);
393 if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
394 bytes = idma64_active_desc_size(idma64c);
395 dma_set_residue(state, bytes);
396 status = idma64c->desc->status;
397 } else if (vdesc) {
398 bytes = to_idma64_desc(vdesc)->length;
399 dma_set_residue(state, bytes);
400 }
401 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
402
403 return status;
404}
405
406static void convert_burst(u32 *maxburst)
407{
408 if (*maxburst)
409 *maxburst = __fls(*maxburst);
410 else
411 *maxburst = 0;
412}
413
414static int idma64_slave_config(struct dma_chan *chan,
415 struct dma_slave_config *config)
416{
417 struct idma64_chan *idma64c = to_idma64_chan(chan);
418
419 /* Check if chan will be configured for slave transfers */
420 if (!is_slave_direction(config->direction))
421 return -EINVAL;
422
423 memcpy(&idma64c->config, config, sizeof(idma64c->config));
424
425 convert_burst(&idma64c->config.src_maxburst);
426 convert_burst(&idma64c->config.dst_maxburst);
427
428 return 0;
429}
430
431static void idma64_chan_deactivate(struct idma64_chan *idma64c)
432{
433 unsigned short count = 100;
434 u32 cfglo;
435
436 cfglo = channel_readl(idma64c, CFG_LO);
437 channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
438 do {
439 udelay(1);
440 cfglo = channel_readl(idma64c, CFG_LO);
441 } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
442}
443
444static void idma64_chan_activate(struct idma64_chan *idma64c)
445{
446 u32 cfglo;
447
448 cfglo = channel_readl(idma64c, CFG_LO);
449 channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
450}
451
452static int idma64_pause(struct dma_chan *chan)
453{
454 struct idma64_chan *idma64c = to_idma64_chan(chan);
455 unsigned long flags;
456
457 spin_lock_irqsave(&idma64c->vchan.lock, flags);
458 if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
459 idma64_chan_deactivate(idma64c);
460 idma64c->desc->status = DMA_PAUSED;
461 }
462 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
463
464 return 0;
465}
466
467static int idma64_resume(struct dma_chan *chan)
468{
469 struct idma64_chan *idma64c = to_idma64_chan(chan);
470 unsigned long flags;
471
472 spin_lock_irqsave(&idma64c->vchan.lock, flags);
473 if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
474 idma64c->desc->status = DMA_IN_PROGRESS;
475 idma64_chan_activate(idma64c);
476 }
477 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
478
479 return 0;
480}
481
482static int idma64_terminate_all(struct dma_chan *chan)
483{
484 struct idma64_chan *idma64c = to_idma64_chan(chan);
485 unsigned long flags;
486 LIST_HEAD(head);
487
488 spin_lock_irqsave(&idma64c->vchan.lock, flags);
489 idma64_chan_deactivate(idma64c);
490 idma64_stop_transfer(idma64c);
491 if (idma64c->desc) {
492 idma64_vdesc_free(&idma64c->desc->vdesc);
493 idma64c->desc = NULL;
494 }
495 vchan_get_all_descriptors(&idma64c->vchan, &head);
496 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
497
498 vchan_dma_desc_free_list(&idma64c->vchan, &head);
499 return 0;
500}
501
502static int idma64_alloc_chan_resources(struct dma_chan *chan)
503{
504 struct idma64_chan *idma64c = to_idma64_chan(chan);
505
506 /* Create a pool of consistent memory blocks for hardware descriptors */
507 idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
508 chan->device->dev,
509 sizeof(struct idma64_lli), 8, 0);
510 if (!idma64c->pool) {
511 dev_err(chan2dev(chan), "No memory for descriptors\n");
512 return -ENOMEM;
513 }
514
515 return 0;
516}
517
518static void idma64_free_chan_resources(struct dma_chan *chan)
519{
520 struct idma64_chan *idma64c = to_idma64_chan(chan);
521
522 vchan_free_chan_resources(to_virt_chan(chan));
523 dma_pool_destroy(idma64c->pool);
524 idma64c->pool = NULL;
525}
526
527/* ---------------------------------------------------------------------- */
528
529#define IDMA64_BUSWIDTHS \
530 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
531 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
532 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
533
534static int idma64_probe(struct idma64_chip *chip)
535{
536 struct idma64 *idma64;
537 unsigned short nr_chan = IDMA64_NR_CHAN;
538 unsigned short i;
539 int ret;
540
541 idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
542 if (!idma64)
543 return -ENOMEM;
544
545 idma64->regs = chip->regs;
546 chip->idma64 = idma64;
547
548 idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
549 GFP_KERNEL);
550 if (!idma64->chan)
551 return -ENOMEM;
552
553 idma64->all_chan_mask = (1 << nr_chan) - 1;
554
555 /* Turn off iDMA controller */
556 idma64_off(idma64);
557
558 ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
559 dev_name(chip->dev), idma64);
560 if (ret)
561 return ret;
562
563 INIT_LIST_HEAD(&idma64->dma.channels);
564 for (i = 0; i < nr_chan; i++) {
565 struct idma64_chan *idma64c = &idma64->chan[i];
566
567 idma64c->vchan.desc_free = idma64_vdesc_free;
568 vchan_init(&idma64c->vchan, &idma64->dma);
569
570 idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
571 idma64c->mask = BIT(i);
572 }
573
574 dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
575 dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
576
577 idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
578 idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
579
580 idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
581
582 idma64->dma.device_issue_pending = idma64_issue_pending;
583 idma64->dma.device_tx_status = idma64_tx_status;
584
585 idma64->dma.device_config = idma64_slave_config;
586 idma64->dma.device_pause = idma64_pause;
587 idma64->dma.device_resume = idma64_resume;
588 idma64->dma.device_terminate_all = idma64_terminate_all;
589
590 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
591 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
592 idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
593 idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
594
595 idma64->dma.dev = chip->dev;
596
597 ret = dma_async_device_register(&idma64->dma);
598 if (ret)
599 return ret;
600
601 dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
602 return 0;
603}
604
605static int idma64_remove(struct idma64_chip *chip)
606{
607 struct idma64 *idma64 = chip->idma64;
608 unsigned short i;
609
610 dma_async_device_unregister(&idma64->dma);
611
612 /*
613 * Explicitly call devm_request_irq() to avoid the side effects with
614 * the scheduled tasklets.
615 */
616 devm_free_irq(chip->dev, chip->irq, idma64);
617
618 for (i = 0; i < idma64->dma.chancnt; i++) {
619 struct idma64_chan *idma64c = &idma64->chan[i];
620
621 tasklet_kill(&idma64c->vchan.task);
622 }
623
624 return 0;
625}
626
627/* ---------------------------------------------------------------------- */
628
629static int idma64_platform_probe(struct platform_device *pdev)
630{
631 struct idma64_chip *chip;
632 struct device *dev = &pdev->dev;
633 struct resource *mem;
634 int ret;
635
636 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
637 if (!chip)
638 return -ENOMEM;
639
640 chip->irq = platform_get_irq(pdev, 0);
641 if (chip->irq < 0)
642 return chip->irq;
643
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 chip->regs = devm_ioremap_resource(dev, mem);
646 if (IS_ERR(chip->regs))
647 return PTR_ERR(chip->regs);
648
649 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
650 if (ret)
651 return ret;
652
653 chip->dev = dev;
654
655 ret = idma64_probe(chip);
656 if (ret)
657 return ret;
658
659 platform_set_drvdata(pdev, chip);
660 return 0;
661}
662
663static int idma64_platform_remove(struct platform_device *pdev)
664{
665 struct idma64_chip *chip = platform_get_drvdata(pdev);
666
667 return idma64_remove(chip);
668}
669
670#ifdef CONFIG_PM_SLEEP
671
672static int idma64_pm_suspend(struct device *dev)
673{
674 struct platform_device *pdev = to_platform_device(dev);
675 struct idma64_chip *chip = platform_get_drvdata(pdev);
676
677 idma64_off(chip->idma64);
678 return 0;
679}
680
681static int idma64_pm_resume(struct device *dev)
682{
683 struct platform_device *pdev = to_platform_device(dev);
684 struct idma64_chip *chip = platform_get_drvdata(pdev);
685
686 idma64_on(chip->idma64);
687 return 0;
688}
689
690#endif /* CONFIG_PM_SLEEP */
691
692static const struct dev_pm_ops idma64_dev_pm_ops = {
693 SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
694};
695
696static struct platform_driver idma64_platform_driver = {
697 .probe = idma64_platform_probe,
698 .remove = idma64_platform_remove,
699 .driver = {
700 .name = DRV_NAME,
701 .pm = &idma64_dev_pm_ops,
702 },
703};
704
705module_platform_driver(idma64_platform_driver);
706
707MODULE_LICENSE("GPL v2");
708MODULE_DESCRIPTION("iDMA64 core driver");
709MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
710MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
new file mode 100644
index 000000000000..a4d99685a7c4
--- /dev/null
+++ b/drivers/dma/idma64.h
@@ -0,0 +1,233 @@
1/*
2 * Driver for the Intel integrated DMA 64-bit
3 *
4 * Copyright (C) 2015 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __DMA_IDMA64_H__
12#define __DMA_IDMA64_H__
13
14#include <linux/device.h>
15#include <linux/io.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include "virt-dma.h"
20
21/* Channel registers */
22
23#define IDMA64_CH_SAR 0x00 /* Source Address Register */
24#define IDMA64_CH_DAR 0x08 /* Destination Address Register */
25#define IDMA64_CH_LLP 0x10 /* Linked List Pointer */
26#define IDMA64_CH_CTL_LO 0x18 /* Control Register Low */
27#define IDMA64_CH_CTL_HI 0x1c /* Control Register High */
28#define IDMA64_CH_SSTAT 0x20
29#define IDMA64_CH_DSTAT 0x28
30#define IDMA64_CH_SSTATAR 0x30
31#define IDMA64_CH_DSTATAR 0x38
32#define IDMA64_CH_CFG_LO 0x40 /* Configuration Register Low */
33#define IDMA64_CH_CFG_HI 0x44 /* Configuration Register High */
34#define IDMA64_CH_SGR 0x48
35#define IDMA64_CH_DSR 0x50
36
37#define IDMA64_CH_LENGTH 0x58
38
39/* Bitfields in CTL_LO */
40#define IDMA64C_CTLL_INT_EN (1 << 0) /* irqs enabled? */
41#define IDMA64C_CTLL_DST_WIDTH(x) ((x) << 1) /* bytes per element */
42#define IDMA64C_CTLL_SRC_WIDTH(x) ((x) << 4)
43#define IDMA64C_CTLL_DST_INC (0 << 8) /* DAR update/not */
44#define IDMA64C_CTLL_DST_FIX (1 << 8)
45#define IDMA64C_CTLL_SRC_INC (0 << 10) /* SAR update/not */
46#define IDMA64C_CTLL_SRC_FIX (1 << 10)
47#define IDMA64C_CTLL_DST_MSIZE(x) ((x) << 11) /* burst, #elements */
48#define IDMA64C_CTLL_SRC_MSIZE(x) ((x) << 14)
49#define IDMA64C_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
50#define IDMA64C_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
51#define IDMA64C_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
52#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
53
54/* Bitfields in CTL_HI */
55#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1))
56#define IDMA64C_CTLH_DONE (1 << 17)
57
58/* Bitfields in CFG_LO */
59#define IDMA64C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */
60#define IDMA64C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */
61#define IDMA64C_CFGL_CH_SUSP (1 << 8)
62#define IDMA64C_CFGL_FIFO_EMPTY (1 << 9)
63#define IDMA64C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */
64#define IDMA64C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */
65#define IDMA64C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */
66
67/* Bitfields in CFG_HI */
68#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
69#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
70#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
71#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
72
73/* Interrupt registers */
74
75#define IDMA64_INT_XFER 0x00
76#define IDMA64_INT_BLOCK 0x08
77#define IDMA64_INT_SRC_TRAN 0x10
78#define IDMA64_INT_DST_TRAN 0x18
79#define IDMA64_INT_ERROR 0x20
80
81#define IDMA64_RAW(x) (0x2c0 + IDMA64_INT_##x) /* r */
82#define IDMA64_STATUS(x) (0x2e8 + IDMA64_INT_##x) /* r (raw & mask) */
83#define IDMA64_MASK(x) (0x310 + IDMA64_INT_##x) /* rw (set = irq enabled) */
84#define IDMA64_CLEAR(x) (0x338 + IDMA64_INT_##x) /* w (ack, affects "raw") */
85
86/* Common registers */
87
88#define IDMA64_STATUS_INT 0x360 /* r */
89#define IDMA64_CFG 0x398
90#define IDMA64_CH_EN 0x3a0
91
92/* Bitfields in CFG */
93#define IDMA64_CFG_DMA_EN (1 << 0)
94
95/* Hardware descriptor for Linked LIst transfers */
96struct idma64_lli {
97 u64 sar;
98 u64 dar;
99 u64 llp;
100 u32 ctllo;
101 u32 ctlhi;
102 u32 sstat;
103 u32 dstat;
104};
105
106struct idma64_hw_desc {
107 struct idma64_lli *lli;
108 dma_addr_t llp;
109 dma_addr_t phys;
110 unsigned int len;
111};
112
113struct idma64_desc {
114 struct virt_dma_desc vdesc;
115 enum dma_transfer_direction direction;
116 struct idma64_hw_desc *hw;
117 unsigned int ndesc;
118 size_t length;
119 enum dma_status status;
120};
121
122static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
123{
124 return container_of(vdesc, struct idma64_desc, vdesc);
125}
126
127struct idma64_chan {
128 struct virt_dma_chan vchan;
129
130 void __iomem *regs;
131
132 /* hardware configuration */
133 enum dma_transfer_direction direction;
134 unsigned int mask;
135 struct dma_slave_config config;
136
137 void *pool;
138 struct idma64_desc *desc;
139};
140
141static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
142{
143 return container_of(chan, struct idma64_chan, vchan.chan);
144}
145
146#define channel_set_bit(idma64, reg, mask) \
147 dma_writel(idma64, reg, ((mask) << 8) | (mask))
148#define channel_clear_bit(idma64, reg, mask) \
149 dma_writel(idma64, reg, ((mask) << 8) | 0)
150
151static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
152{
153 return readl(idma64c->regs + offset);
154}
155
156static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
157 u32 value)
158{
159 writel(value, idma64c->regs + offset);
160}
161
162#define channel_readl(idma64c, reg) \
163 idma64c_readl(idma64c, IDMA64_CH_##reg)
164#define channel_writel(idma64c, reg, value) \
165 idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
166
167static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
168{
169 u64 l, h;
170
171 l = idma64c_readl(idma64c, offset);
172 h = idma64c_readl(idma64c, offset + 4);
173
174 return l | (h << 32);
175}
176
177static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
178 u64 value)
179{
180 idma64c_writel(idma64c, offset, value);
181 idma64c_writel(idma64c, offset + 4, value >> 32);
182}
183
184#define channel_readq(idma64c, reg) \
185 idma64c_readq(idma64c, IDMA64_CH_##reg)
186#define channel_writeq(idma64c, reg, value) \
187 idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
188
189struct idma64 {
190 struct dma_device dma;
191
192 void __iomem *regs;
193
194 /* channels */
195 unsigned short all_chan_mask;
196 struct idma64_chan *chan;
197};
198
199static inline struct idma64 *to_idma64(struct dma_device *ddev)
200{
201 return container_of(ddev, struct idma64, dma);
202}
203
204static inline u32 idma64_readl(struct idma64 *idma64, int offset)
205{
206 return readl(idma64->regs + offset);
207}
208
209static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
210{
211 writel(value, idma64->regs + offset);
212}
213
214#define dma_readl(idma64, reg) \
215 idma64_readl(idma64, IDMA64_##reg)
216#define dma_writel(idma64, reg, value) \
217 idma64_writel(idma64, IDMA64_##reg, (value))
218
219/**
220 * struct idma64_chip - representation of DesignWare DMA controller hardware
221 * @dev: struct device of the DMA controller
222 * @irq: irq line
223 * @regs: memory mapped I/O space
224 * @idma64: struct idma64 that is filed by idma64_probe()
225 */
226struct idma64_chip {
227 struct device *dev;
228 int irq;
229 void __iomem *regs;
230 struct idma64 *idma64;
231};
232
233#endif /* __DMA_IDMA64_H__ */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 2a36a95d95cf..3a3738fe016b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -591,6 +591,67 @@ static struct cpuidle_state bdw_cstates[] = {
591 .enter = NULL } 591 .enter = NULL }
592}; 592};
593 593
594static struct cpuidle_state skl_cstates[] = {
595 {
596 .name = "C1-SKL",
597 .desc = "MWAIT 0x00",
598 .flags = MWAIT2flg(0x00),
599 .exit_latency = 2,
600 .target_residency = 2,
601 .enter = &intel_idle,
602 .enter_freeze = intel_idle_freeze, },
603 {
604 .name = "C1E-SKL",
605 .desc = "MWAIT 0x01",
606 .flags = MWAIT2flg(0x01),
607 .exit_latency = 10,
608 .target_residency = 20,
609 .enter = &intel_idle,
610 .enter_freeze = intel_idle_freeze, },
611 {
612 .name = "C3-SKL",
613 .desc = "MWAIT 0x10",
614 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
615 .exit_latency = 70,
616 .target_residency = 100,
617 .enter = &intel_idle,
618 .enter_freeze = intel_idle_freeze, },
619 {
620 .name = "C6-SKL",
621 .desc = "MWAIT 0x20",
622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
623 .exit_latency = 75,
624 .target_residency = 200,
625 .enter = &intel_idle,
626 .enter_freeze = intel_idle_freeze, },
627 {
628 .name = "C7s-SKL",
629 .desc = "MWAIT 0x33",
630 .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
631 .exit_latency = 124,
632 .target_residency = 800,
633 .enter = &intel_idle,
634 .enter_freeze = intel_idle_freeze, },
635 {
636 .name = "C8-SKL",
637 .desc = "MWAIT 0x40",
638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
639 .exit_latency = 174,
640 .target_residency = 800,
641 .enter = &intel_idle,
642 .enter_freeze = intel_idle_freeze, },
643 {
644 .name = "C10-SKL",
645 .desc = "MWAIT 0x60",
646 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
647 .exit_latency = 890,
648 .target_residency = 5000,
649 .enter = &intel_idle,
650 .enter_freeze = intel_idle_freeze, },
651 {
652 .enter = NULL }
653};
654
594static struct cpuidle_state atom_cstates[] = { 655static struct cpuidle_state atom_cstates[] = {
595 { 656 {
596 .name = "C1E-ATM", 657 .name = "C1E-ATM",
@@ -810,6 +871,12 @@ static const struct idle_cpu idle_cpu_bdw = {
810 .disable_promotion_to_c1e = true, 871 .disable_promotion_to_c1e = true,
811}; 872};
812 873
874static const struct idle_cpu idle_cpu_skl = {
875 .state_table = skl_cstates,
876 .disable_promotion_to_c1e = true,
877};
878
879
813static const struct idle_cpu idle_cpu_avn = { 880static const struct idle_cpu idle_cpu_avn = {
814 .state_table = avn_cstates, 881 .state_table = avn_cstates,
815 .disable_promotion_to_c1e = true, 882 .disable_promotion_to_c1e = true,
@@ -844,6 +911,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
844 ICPU(0x47, idle_cpu_bdw), 911 ICPU(0x47, idle_cpu_bdw),
845 ICPU(0x4f, idle_cpu_bdw), 912 ICPU(0x4f, idle_cpu_bdw),
846 ICPU(0x56, idle_cpu_bdw), 913 ICPU(0x56, idle_cpu_bdw),
914 ICPU(0x4e, idle_cpu_skl),
915 ICPU(0x5e, idle_cpu_skl),
847 {} 916 {}
848}; 917};
849MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 918MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -965,7 +1034,8 @@ static int __init intel_idle_cpuidle_driver_init(void)
965 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { 1034 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
966 int num_substates, mwait_hint, mwait_cstate; 1035 int num_substates, mwait_hint, mwait_cstate;
967 1036
968 if (cpuidle_state_table[cstate].enter == NULL) 1037 if ((cpuidle_state_table[cstate].enter == NULL) &&
1038 (cpuidle_state_table[cstate].enter_freeze == NULL))
969 break; 1039 break;
970 1040
971 if (cstate + 1 > max_cstate) { 1041 if (cstate + 1 > max_cstate) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e269f084497d..bbec5009cdc2 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -46,6 +46,7 @@ config OMAP_MBOX_KFIFO_SIZE
46config PCC 46config PCC
47 bool "Platform Communication Channel Driver" 47 bool "Platform Communication Channel Driver"
48 depends on ACPI 48 depends on ACPI
49 default n
49 help 50 help
50 ACPI 5.0+ spec defines a generic mode of communication 51 ACPI 5.0+ spec defines a generic mode of communication
51 between the OS and a platform such as the BMC. This medium 52 between the OS and a platform such as the BMC. This medium
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 26d121d1d501..68885a82e704 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -352,4 +352,10 @@ static int __init pcc_init(void)
352 352
353 return 0; 353 return 0;
354} 354}
355device_initcall(pcc_init); 355
356/*
357 * Make PCC init postcore so that users of this mailbox
358 * such as the ACPI Processor driver have it available
359 * at their init.
360 */
361postcore_initcall(pcc_init);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3f68dd251ce8..076f593f90d3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -328,6 +328,29 @@ config INTEL_SOC_PMIC
328 thermal, charger and related power management functions 328 thermal, charger and related power management functions
329 on these systems. 329 on these systems.
330 330
331config MFD_INTEL_LPSS
332 tristate
333 select COMMON_CLK
334 select MFD_CORE
335
336config MFD_INTEL_LPSS_ACPI
337 tristate "Intel Low Power Subsystem support in ACPI mode"
338 select MFD_INTEL_LPSS
339 depends on X86 && ACPI
340 help
341 This driver supports Intel Low Power Subsystem (LPSS) devices such as
342 I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
343 PCH) in ACPI mode.
344
345config MFD_INTEL_LPSS_PCI
346 tristate "Intel Low Power Subsystem support in PCI mode"
347 select MFD_INTEL_LPSS
348 depends on X86 && PCI
349 help
350 This driver supports Intel Low Power Subsystem (LPSS) devices such as
351 I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
352 PCH) in PCI mode.
353
331config MFD_INTEL_MSIC 354config MFD_INTEL_MSIC
332 bool "Intel MSIC" 355 bool "Intel MSIC"
333 depends on INTEL_SCU_IPC 356 depends on INTEL_SCU_IPC
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ea40e076cb61..9d730a2d1878 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -161,6 +161,9 @@ obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
161obj-$(CONFIG_MFD_TPS65090) += tps65090.o 161obj-$(CONFIG_MFD_TPS65090) += tps65090.o
162obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o 162obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
163obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o 163obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
164obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
165obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
166obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
164obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o 167obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
165obj-$(CONFIG_MFD_PALMAS) += palmas.o 168obj-$(CONFIG_MFD_PALMAS) += palmas.o
166obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o 169obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
new file mode 100644
index 000000000000..0d92d73bfa0e
--- /dev/null
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -0,0 +1,84 @@
1/*
2 * Intel LPSS ACPI support.
3 *
4 * Copyright (C) 2015, Intel Corporation
5 *
6 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/acpi.h>
15#include <linux/ioport.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/platform_device.h>
21
22#include "intel-lpss.h"
23
24static const struct intel_lpss_platform_info spt_info = {
25 .clk_rate = 120000000,
26};
27
28static const struct acpi_device_id intel_lpss_acpi_ids[] = {
29 /* SPT */
30 { "INT3446", (kernel_ulong_t)&spt_info },
31 { "INT3447", (kernel_ulong_t)&spt_info },
32 { }
33};
34MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
35
36static int intel_lpss_acpi_probe(struct platform_device *pdev)
37{
38 struct intel_lpss_platform_info *info;
39 const struct acpi_device_id *id;
40
41 id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
42 if (!id)
43 return -ENODEV;
44
45 info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
46 GFP_KERNEL);
47 if (!info)
48 return -ENOMEM;
49
50 info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
51 info->irq = platform_get_irq(pdev, 0);
52
53 pm_runtime_set_active(&pdev->dev);
54 pm_runtime_enable(&pdev->dev);
55
56 return intel_lpss_probe(&pdev->dev, info);
57}
58
59static int intel_lpss_acpi_remove(struct platform_device *pdev)
60{
61 intel_lpss_remove(&pdev->dev);
62 pm_runtime_disable(&pdev->dev);
63
64 return 0;
65}
66
67static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
68
69static struct platform_driver intel_lpss_acpi_driver = {
70 .probe = intel_lpss_acpi_probe,
71 .remove = intel_lpss_acpi_remove,
72 .driver = {
73 .name = "intel-lpss",
74 .acpi_match_table = intel_lpss_acpi_ids,
75 .pm = &intel_lpss_acpi_pm_ops,
76 },
77};
78
79module_platform_driver(intel_lpss_acpi_driver);
80
81MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
82MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
83MODULE_DESCRIPTION("Intel LPSS ACPI driver");
84MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
new file mode 100644
index 000000000000..9236dffeb4d6
--- /dev/null
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -0,0 +1,113 @@
1/*
2 * Intel LPSS PCI support.
3 *
4 * Copyright (C) 2015, Intel Corporation
5 *
6 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/ioport.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20
21#include "intel-lpss.h"
22
23static int intel_lpss_pci_probe(struct pci_dev *pdev,
24 const struct pci_device_id *id)
25{
26 struct intel_lpss_platform_info *info;
27 int ret;
28
29 ret = pcim_enable_device(pdev);
30 if (ret)
31 return ret;
32
33 info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
34 GFP_KERNEL);
35 if (!info)
36 return -ENOMEM;
37
38 info->mem = &pdev->resource[0];
39 info->irq = pdev->irq;
40
41 /* Probably it is enough to set this for iDMA capable devices only */
42 pci_set_master(pdev);
43
44 ret = intel_lpss_probe(&pdev->dev, info);
45 if (ret)
46 return ret;
47
48 pm_runtime_put(&pdev->dev);
49 pm_runtime_allow(&pdev->dev);
50
51 return 0;
52}
53
54static void intel_lpss_pci_remove(struct pci_dev *pdev)
55{
56 pm_runtime_forbid(&pdev->dev);
57 pm_runtime_get_sync(&pdev->dev);
58
59 intel_lpss_remove(&pdev->dev);
60}
61
62static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
63
64static const struct intel_lpss_platform_info spt_info = {
65 .clk_rate = 120000000,
66};
67
68static const struct intel_lpss_platform_info spt_uart_info = {
69 .clk_rate = 120000000,
70 .clk_con_id = "baudclk",
71};
72
73static const struct pci_device_id intel_lpss_pci_ids[] = {
74 /* SPT-LP */
75 { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
76 { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
77 { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
78 { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
79 { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info },
80 { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info },
81 { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info },
82 { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info },
83 { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info },
84 { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info },
85 { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
86 /* SPT-H */
87 { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
88 { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
89 { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
90 { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
91 { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info },
92 { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info },
93 { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
94 { }
95};
96MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
97
98static struct pci_driver intel_lpss_pci_driver = {
99 .name = "intel-lpss",
100 .id_table = intel_lpss_pci_ids,
101 .probe = intel_lpss_pci_probe,
102 .remove = intel_lpss_pci_remove,
103 .driver = {
104 .pm = &intel_lpss_pci_pm_ops,
105 },
106};
107
108module_pci_driver(intel_lpss_pci_driver);
109
110MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
111MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
112MODULE_DESCRIPTION("Intel LPSS PCI driver");
113MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
new file mode 100644
index 000000000000..fdf4d5c1add2
--- /dev/null
+++ b/drivers/mfd/intel-lpss.c
@@ -0,0 +1,524 @@
1/*
2 * Intel Sunrisepoint LPSS core support.
3 *
4 * Copyright (C) 2015, Intel Corporation
5 *
6 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
9 * Jarkko Nikula <jarkko.nikula@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/clk.h>
17#include <linux/clkdev.h>
18#include <linux/clk-provider.h>
19#include <linux/debugfs.h>
20#include <linux/idr.h>
21#include <linux/ioport.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mfd/core.h>
25#include <linux/pm_qos.h>
26#include <linux/pm_runtime.h>
27#include <linux/seq_file.h>
28
29#include "intel-lpss.h"
30
31#define LPSS_DEV_OFFSET 0x000
32#define LPSS_DEV_SIZE 0x200
33#define LPSS_PRIV_OFFSET 0x200
34#define LPSS_PRIV_SIZE 0x100
35#define LPSS_IDMA64_OFFSET 0x800
36#define LPSS_IDMA64_SIZE 0x800
37
38/* Offsets from lpss->priv */
39#define LPSS_PRIV_RESETS 0x04
40#define LPSS_PRIV_RESETS_FUNC BIT(2)
41#define LPSS_PRIV_RESETS_IDMA 0x3
42
43#define LPSS_PRIV_ACTIVELTR 0x10
44#define LPSS_PRIV_IDLELTR 0x14
45
46#define LPSS_PRIV_LTR_REQ BIT(15)
47#define LPSS_PRIV_LTR_SCALE_MASK 0xc00
48#define LPSS_PRIV_LTR_SCALE_1US 0x800
49#define LPSS_PRIV_LTR_SCALE_32US 0xc00
50#define LPSS_PRIV_LTR_VALUE_MASK 0x3ff
51
52#define LPSS_PRIV_SSP_REG 0x20
53#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
54
55#define LPSS_PRIV_REMAP_ADDR_LO 0x40
56#define LPSS_PRIV_REMAP_ADDR_HI 0x44
57
58#define LPSS_PRIV_CAPS 0xfc
59#define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
60#define LPSS_PRIV_CAPS_TYPE_SHIFT 4
61#define LPSS_PRIV_CAPS_TYPE_MASK (0xf << LPSS_PRIV_CAPS_TYPE_SHIFT)
62
63/* This matches the type field in CAPS register */
64enum intel_lpss_dev_type {
65 LPSS_DEV_I2C = 0,
66 LPSS_DEV_UART,
67 LPSS_DEV_SPI,
68};
69
70struct intel_lpss {
71 const struct intel_lpss_platform_info *info;
72 enum intel_lpss_dev_type type;
73 struct clk *clk;
74 struct clk_lookup *clock;
75 const struct mfd_cell *cell;
76 struct device *dev;
77 void __iomem *priv;
78 int devid;
79 u32 caps;
80 u32 active_ltr;
81 u32 idle_ltr;
82 struct dentry *debugfs;
83};
84
85static const struct resource intel_lpss_dev_resources[] = {
86 DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
87 DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
88 DEFINE_RES_IRQ(0),
89};
90
91static const struct resource intel_lpss_idma64_resources[] = {
92 DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
93 DEFINE_RES_IRQ(0),
94};
95
96#define LPSS_IDMA64_DRIVER_NAME "idma64"
97
98/*
99 * Cells needs to be ordered so that the iDMA is created first. This is
100 * because we need to be sure the DMA is available when the host controller
101 * driver is probed.
102 */
103static const struct mfd_cell intel_lpss_idma64_cell = {
104 .name = LPSS_IDMA64_DRIVER_NAME,
105 .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
106 .resources = intel_lpss_idma64_resources,
107};
108
109static const struct mfd_cell intel_lpss_i2c_cell = {
110 .name = "i2c_designware",
111 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
112 .resources = intel_lpss_dev_resources,
113};
114
115static const struct mfd_cell intel_lpss_uart_cell = {
116 .name = "dw-apb-uart",
117 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
118 .resources = intel_lpss_dev_resources,
119};
120
121static const struct mfd_cell intel_lpss_spi_cell = {
122 .name = "pxa2xx-spi",
123 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
124 .resources = intel_lpss_dev_resources,
125};
126
127static DEFINE_IDA(intel_lpss_devid_ida);
128static struct dentry *intel_lpss_debugfs;
129
130static int intel_lpss_request_dma_module(const char *name)
131{
132 static bool intel_lpss_dma_requested;
133
134 if (intel_lpss_dma_requested)
135 return 0;
136
137 intel_lpss_dma_requested = true;
138 return request_module("%s", name);
139}
140
141static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
142{
143 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
144 lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
145}
146
147static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
148{
149 struct dentry *dir;
150
151 dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
152 if (IS_ERR(dir))
153 return PTR_ERR(dir);
154
155 /* Cache the values into lpss structure */
156 intel_lpss_cache_ltr(lpss);
157
158 debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
159 debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
160 debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
161
162 lpss->debugfs = dir;
163 return 0;
164}
165
166static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
167{
168 debugfs_remove_recursive(lpss->debugfs);
169}
170
171static void intel_lpss_ltr_set(struct device *dev, s32 val)
172{
173 struct intel_lpss *lpss = dev_get_drvdata(dev);
174 u32 ltr;
175
176 /*
177 * Program latency tolerance (LTR) accordingly what has been asked
178 * by the PM QoS layer or disable it in case we were passed
179 * negative value or PM_QOS_LATENCY_ANY.
180 */
181 ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
182
183 if (val == PM_QOS_LATENCY_ANY || val < 0) {
184 ltr &= ~LPSS_PRIV_LTR_REQ;
185 } else {
186 ltr |= LPSS_PRIV_LTR_REQ;
187 ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
188 ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
189
190 if (val > LPSS_PRIV_LTR_VALUE_MASK)
191 ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
192 else
193 ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
194 }
195
196 if (ltr == lpss->active_ltr)
197 return;
198
199 writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
200 writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
201
202 /* Cache the values into lpss structure */
203 intel_lpss_cache_ltr(lpss);
204}
205
206static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
207{
208 lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
209 dev_pm_qos_expose_latency_tolerance(lpss->dev);
210}
211
212static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
213{
214 dev_pm_qos_hide_latency_tolerance(lpss->dev);
215 lpss->dev->power.set_latency_tolerance = NULL;
216}
217
218static int intel_lpss_assign_devs(struct intel_lpss *lpss)
219{
220 unsigned int type;
221
222 type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
223 type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
224
225 switch (type) {
226 case LPSS_DEV_I2C:
227 lpss->cell = &intel_lpss_i2c_cell;
228 break;
229 case LPSS_DEV_UART:
230 lpss->cell = &intel_lpss_uart_cell;
231 break;
232 case LPSS_DEV_SPI:
233 lpss->cell = &intel_lpss_spi_cell;
234 break;
235 default:
236 return -ENODEV;
237 }
238
239 lpss->type = type;
240
241 return 0;
242}
243
244static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
245{
246 return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
247}
248
249static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
250{
251 resource_size_t addr = lpss->info->mem->start;
252
253 writel(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR_LO);
254#if BITS_PER_LONG > 32
255 writel(addr >> 32, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
256#else
257 writel(0, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
258#endif
259}
260
261static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
262{
263 u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
264
265 /* Bring out the device from reset */
266 writel(value, lpss->priv + LPSS_PRIV_RESETS);
267}
268
269static void intel_lpss_init_dev(const struct intel_lpss *lpss)
270{
271 u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
272
273 intel_lpss_deassert_reset(lpss);
274
275 if (!intel_lpss_has_idma(lpss))
276 return;
277
278 intel_lpss_set_remap_addr(lpss);
279
280 /* Make sure that SPI multiblock DMA transfers are re-enabled */
281 if (lpss->type == LPSS_DEV_SPI)
282 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
283}
284
285static void intel_lpss_unregister_clock_tree(struct clk *clk)
286{
287 struct clk *parent;
288
289 while (clk) {
290 parent = clk_get_parent(clk);
291 clk_unregister(clk);
292 clk = parent;
293 }
294}
295
296static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
297 const char *devname,
298 struct clk **clk)
299{
300 char name[32];
301 struct clk *tmp = *clk;
302
303 snprintf(name, sizeof(name), "%s-enable", devname);
304 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
305 lpss->priv, 0, 0, NULL);
306 if (IS_ERR(tmp))
307 return PTR_ERR(tmp);
308
309 snprintf(name, sizeof(name), "%s-div", devname);
310 tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
311 0, lpss->priv, 1, 15, 16, 15, 0,
312 NULL);
313 if (IS_ERR(tmp))
314 return PTR_ERR(tmp);
315 *clk = tmp;
316
317 snprintf(name, sizeof(name), "%s-update", devname);
318 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
319 CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
320 if (IS_ERR(tmp))
321 return PTR_ERR(tmp);
322 *clk = tmp;
323
324 return 0;
325}
326
327static int intel_lpss_register_clock(struct intel_lpss *lpss)
328{
329 const struct mfd_cell *cell = lpss->cell;
330 struct clk *clk;
331 char devname[24];
332 int ret;
333
334 if (!lpss->info->clk_rate)
335 return 0;
336
337 /* Root clock */
338 clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL,
339 CLK_IS_ROOT, lpss->info->clk_rate);
340 if (IS_ERR(clk))
341 return PTR_ERR(clk);
342
343 snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
344
345 /*
346 * Support for clock divider only if it has some preset value.
347 * Otherwise we assume that the divider is not used.
348 */
349 if (lpss->type != LPSS_DEV_I2C) {
350 ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
351 if (ret)
352 goto err_clk_register;
353 }
354
355 ret = -ENOMEM;
356
357 /* Clock for the host controller */
358 lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
359 if (!lpss->clock)
360 goto err_clk_register;
361
362 lpss->clk = clk;
363
364 return 0;
365
366err_clk_register:
367 intel_lpss_unregister_clock_tree(clk);
368
369 return ret;
370}
371
372static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
373{
374 if (IS_ERR_OR_NULL(lpss->clk))
375 return;
376
377 clkdev_drop(lpss->clock);
378 intel_lpss_unregister_clock_tree(lpss->clk);
379}
380
381int intel_lpss_probe(struct device *dev,
382 const struct intel_lpss_platform_info *info)
383{
384 struct intel_lpss *lpss;
385 int ret;
386
387 if (!info || !info->mem || info->irq <= 0)
388 return -EINVAL;
389
390 lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
391 if (!lpss)
392 return -ENOMEM;
393
394 lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
395 LPSS_PRIV_SIZE);
396 if (!lpss->priv)
397 return -ENOMEM;
398
399 lpss->info = info;
400 lpss->dev = dev;
401 lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
402
403 dev_set_drvdata(dev, lpss);
404
405 ret = intel_lpss_assign_devs(lpss);
406 if (ret)
407 return ret;
408
409 intel_lpss_init_dev(lpss);
410
411 lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
412 if (lpss->devid < 0)
413 return lpss->devid;
414
415 ret = intel_lpss_register_clock(lpss);
416 if (ret)
417 goto err_clk_register;
418
419 intel_lpss_ltr_expose(lpss);
420
421 ret = intel_lpss_debugfs_add(lpss);
422 if (ret)
423 dev_warn(dev, "Failed to create debugfs entries\n");
424
425 if (intel_lpss_has_idma(lpss)) {
426 /*
427 * Ensure the DMA driver is loaded before the host
428 * controller device appears, so that the host controller
429 * driver can request its DMA channels as early as
430 * possible.
431 *
432 * If the DMA module is not there that's OK as well.
433 */
434 intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME);
435
436 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
437 1, info->mem, info->irq, NULL);
438 if (ret)
439 dev_warn(dev, "Failed to add %s, fallback to PIO\n",
440 LPSS_IDMA64_DRIVER_NAME);
441 }
442
443 ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
444 1, info->mem, info->irq, NULL);
445 if (ret)
446 goto err_remove_ltr;
447
448 return 0;
449
450err_remove_ltr:
451 intel_lpss_debugfs_remove(lpss);
452 intel_lpss_ltr_hide(lpss);
453
454err_clk_register:
455 ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
456
457 return ret;
458}
459EXPORT_SYMBOL_GPL(intel_lpss_probe);
460
461void intel_lpss_remove(struct device *dev)
462{
463 struct intel_lpss *lpss = dev_get_drvdata(dev);
464
465 mfd_remove_devices(dev);
466 intel_lpss_debugfs_remove(lpss);
467 intel_lpss_ltr_hide(lpss);
468 intel_lpss_unregister_clock(lpss);
469 ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
470}
471EXPORT_SYMBOL_GPL(intel_lpss_remove);
472
473static int resume_lpss_device(struct device *dev, void *data)
474{
475 pm_runtime_resume(dev);
476 return 0;
477}
478
479int intel_lpss_prepare(struct device *dev)
480{
481 /*
482 * Resume both child devices before entering system sleep. This
483 * ensures that they are in proper state before they get suspended.
484 */
485 device_for_each_child_reverse(dev, NULL, resume_lpss_device);
486 return 0;
487}
488EXPORT_SYMBOL_GPL(intel_lpss_prepare);
489
490int intel_lpss_suspend(struct device *dev)
491{
492 return 0;
493}
494EXPORT_SYMBOL_GPL(intel_lpss_suspend);
495
496int intel_lpss_resume(struct device *dev)
497{
498 struct intel_lpss *lpss = dev_get_drvdata(dev);
499
500 intel_lpss_init_dev(lpss);
501
502 return 0;
503}
504EXPORT_SYMBOL_GPL(intel_lpss_resume);
505
506static int __init intel_lpss_init(void)
507{
508 intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
509 return 0;
510}
511module_init(intel_lpss_init);
512
513static void __exit intel_lpss_exit(void)
514{
515 debugfs_remove(intel_lpss_debugfs);
516}
517module_exit(intel_lpss_exit);
518
519MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
520MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
521MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
522MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
523MODULE_DESCRIPTION("Intel LPSS core driver");
524MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
new file mode 100644
index 000000000000..f28cb28a62f8
--- /dev/null
+++ b/drivers/mfd/intel-lpss.h
@@ -0,0 +1,62 @@
1/*
2 * Intel LPSS core support.
3 *
4 * Copyright (C) 2015, Intel Corporation
5 *
6 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __MFD_INTEL_LPSS_H
15#define __MFD_INTEL_LPSS_H
16
17struct device;
18struct resource;
19
20struct intel_lpss_platform_info {
21 struct resource *mem;
22 int irq;
23 unsigned long clk_rate;
24 const char *clk_con_id;
25};
26
27int intel_lpss_probe(struct device *dev,
28 const struct intel_lpss_platform_info *info);
29void intel_lpss_remove(struct device *dev);
30
31#ifdef CONFIG_PM
32int intel_lpss_prepare(struct device *dev);
33int intel_lpss_suspend(struct device *dev);
34int intel_lpss_resume(struct device *dev);
35
36#ifdef CONFIG_PM_SLEEP
37#define INTEL_LPSS_SLEEP_PM_OPS \
38 .prepare = intel_lpss_prepare, \
39 .suspend = intel_lpss_suspend, \
40 .resume = intel_lpss_resume, \
41 .freeze = intel_lpss_suspend, \
42 .thaw = intel_lpss_resume, \
43 .poweroff = intel_lpss_suspend, \
44 .restore = intel_lpss_resume,
45#endif
46
47#define INTEL_LPSS_RUNTIME_PM_OPS \
48 .runtime_suspend = intel_lpss_suspend, \
49 .runtime_resume = intel_lpss_resume,
50
51#else /* !CONFIG_PM */
52#define INTEL_LPSS_SLEEP_PM_OPS
53#define INTEL_LPSS_RUNTIME_PM_OPS
54#endif /* CONFIG_PM */
55
56#define INTEL_LPSS_PM_OPS(name) \
57const struct dev_pm_ops name = { \
58 INTEL_LPSS_SLEEP_PM_OPS \
59 INTEL_LPSS_RUNTIME_PM_OPS \
60}
61
62#endif /* __MFD_INTEL_LPSS_H */
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 14fd5cbcf0f2..c17635d3e504 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -302,7 +302,7 @@ void mfd_remove_devices(struct device *parent)
302{ 302{
303 atomic_t *cnts = NULL; 303 atomic_t *cnts = NULL;
304 304
305 device_for_each_child(parent, &cnts, mfd_remove_devices_fn); 305 device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
306 kfree(cnts); 306 kfree(cnts);
307} 307}
308EXPORT_SYMBOL(mfd_remove_devices); 308EXPORT_SYMBOL(mfd_remove_devices);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 7f3d389bd601..a67eeace6a89 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -13,7 +13,7 @@ menuconfig POWER_AVS
13 13
14config ROCKCHIP_IODOMAIN 14config ROCKCHIP_IODOMAIN
15 tristate "Rockchip IO domain support" 15 tristate "Rockchip IO domain support"
16 depends on ARCH_ROCKCHIP && OF 16 depends on POWER_AVS && ARCH_ROCKCHIP && OF
17 help 17 help
18 Say y here to enable support io domains on Rockchip SoCs. It is 18 Say y here to enable support io domains on Rockchip SoCs. It is
19 necessary for the io domain setting of the SoC to match the 19 necessary for the io domain setting of the SoC to match the
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 3ae35d0590d2..2e300028f0f7 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -43,6 +43,10 @@
43#define RK3288_SOC_CON2_FLASH0 BIT(7) 43#define RK3288_SOC_CON2_FLASH0 BIT(7)
44#define RK3288_SOC_FLASH_SUPPLY_NUM 2 44#define RK3288_SOC_FLASH_SUPPLY_NUM 2
45 45
46#define RK3368_SOC_CON15 0x43c
47#define RK3368_SOC_CON15_FLASH0 BIT(14)
48#define RK3368_SOC_FLASH_SUPPLY_NUM 2
49
46struct rockchip_iodomain; 50struct rockchip_iodomain;
47 51
48/** 52/**
@@ -158,6 +162,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
158 dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); 162 dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
159} 163}
160 164
165static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
166{
167 int ret;
168 u32 val;
169
170 /* if no flash supply we should leave things alone */
171 if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
172 return;
173
174 /*
175 * set flash0 iodomain to also use this framework
176 * instead of a special gpio.
177 */
178 val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
179 ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
180 if (ret < 0)
181 dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
182}
183
161/* 184/*
162 * On the rk3188 the io-domains are handled by a shared register with the 185 * On the rk3188 the io-domains are handled by a shared register with the
163 * lower 8 bits being still being continuing drive-strength settings. 186 * lower 8 bits being still being continuing drive-strength settings.
@@ -201,6 +224,34 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
201 .init = rk3288_iodomain_init, 224 .init = rk3288_iodomain_init,
202}; 225};
203 226
227static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
228 .grf_offset = 0x900,
229 .supply_names = {
230 NULL, /* reserved */
231 "dvp", /* DVPIO_VDD */
232 "flash0", /* FLASH0_VDD (emmc) */
233 "wifi", /* APIO2_VDD (sdio0) */
234 NULL,
235 "audio", /* APIO3_VDD */
236 "sdcard", /* SDMMC0_VDD (sdmmc) */
237 "gpio30", /* APIO1_VDD */
238 "gpio1830", /* APIO4_VDD (gpujtag) */
239 },
240 .init = rk3368_iodomain_init,
241};
242
243static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
244 .grf_offset = 0x100,
245 .supply_names = {
246 NULL,
247 NULL,
248 NULL,
249 NULL,
250 "pmu", /*PMU IO domain*/
251 "vop", /*LCDC IO domain*/
252 },
253};
254
204static const struct of_device_id rockchip_iodomain_match[] = { 255static const struct of_device_id rockchip_iodomain_match[] = {
205 { 256 {
206 .compatible = "rockchip,rk3188-io-voltage-domain", 257 .compatible = "rockchip,rk3188-io-voltage-domain",
@@ -210,6 +261,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
210 .compatible = "rockchip,rk3288-io-voltage-domain", 261 .compatible = "rockchip,rk3288-io-voltage-domain",
211 .data = (void *)&soc_data_rk3288 262 .data = (void *)&soc_data_rk3288
212 }, 263 },
264 {
265 .compatible = "rockchip,rk3368-io-voltage-domain",
266 .data = (void *)&soc_data_rk3368
267 },
268 {
269 .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
270 .data = (void *)&soc_data_rk3368_pmu
271 },
213 { /* sentinel */ }, 272 { /* sentinel */ },
214}; 273};
215 274
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 482b22ddc7b2..5efacd050c7d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1096,11 +1096,13 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1096 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1096 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1097 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1097 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
1098 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1098 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1099 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
1099 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ 1100 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
1100 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ 1101 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
1101 RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */ 1102 RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
1102 RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */ 1103 RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
1103 RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */ 1104 RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
1105 RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
1104 RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */ 1106 RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
1105 {} 1107 {}
1106}; 1108};
@@ -1145,9 +1147,11 @@ static int rapl_unregister_powercap(void)
1145 pr_debug("remove package, undo power limit on %d: %s\n", 1147 pr_debug("remove package, undo power limit on %d: %s\n",
1146 rp->id, rd->name); 1148 rp->id, rd->name);
1147 rapl_write_data_raw(rd, PL1_ENABLE, 0); 1149 rapl_write_data_raw(rd, PL1_ENABLE, 0);
1148 rapl_write_data_raw(rd, PL2_ENABLE, 0);
1149 rapl_write_data_raw(rd, PL1_CLAMP, 0); 1150 rapl_write_data_raw(rd, PL1_CLAMP, 0);
1150 rapl_write_data_raw(rd, PL2_CLAMP, 0); 1151 if (find_nr_power_limit(rd) > 1) {
1152 rapl_write_data_raw(rd, PL2_ENABLE, 0);
1153 rapl_write_data_raw(rd, PL2_CLAMP, 0);
1154 }
1151 if (rd->id == RAPL_DOMAIN_PACKAGE) { 1155 if (rd->id == RAPL_DOMAIN_PACKAGE) {
1152 rd_package = rd; 1156 rd_package = rd;
1153 continue; 1157 continue;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 7245611ec963..94813af97f09 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1668 1668
1669 switch (val) { 1669 switch (val) {
1670 case CPUFREQ_ADJUST: 1670 case CPUFREQ_ADJUST:
1671 case CPUFREQ_INCOMPATIBLE:
1672 pr_debug("min dma period: %d ps, " 1671 pr_debug("min dma period: %d ps, "
1673 "new clock %d kHz\n", pxafb_display_dma_period(var), 1672 "new clock %d kHz\n", pxafb_display_dma_period(var),
1674 policy->max); 1673 policy->max);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 89dd7e02197f..dcf774c15889 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
1042 1042
1043 switch (val) { 1043 switch (val) {
1044 case CPUFREQ_ADJUST: 1044 case CPUFREQ_ADJUST:
1045 case CPUFREQ_INCOMPATIBLE:
1046 dev_dbg(fbi->dev, "min dma period: %d ps, " 1045 dev_dbg(fbi->dev, "min dma period: %d ps, "
1047 "new clock %d kHz\n", sa1100fb_min_dma_period(fbi), 1046 "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
1048 policy->max); 1047 policy->max);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 59fc190f1e92..70fa438000af 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
560 560
561 return 0; 561 return 0;
562err_unregister: 562err_unregister:
563 for_each_possible_cpu(i) { 563 for_each_possible_cpu(i)
564 struct acpi_processor_performance *perf; 564 acpi_processor_unregister_performance(i);
565 perf = per_cpu_ptr(acpi_perf_data, i); 565
566 acpi_processor_unregister_performance(perf, i);
567 }
568err_out: 566err_out:
569 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 567 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
570 free_acpi_perf_data(); 568 free_acpi_perf_data();
@@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
579 kfree(acpi_ids_done); 577 kfree(acpi_ids_done);
580 kfree(acpi_id_present); 578 kfree(acpi_id_present);
581 kfree(acpi_id_cst_present); 579 kfree(acpi_id_cst_present);
582 for_each_possible_cpu(i) { 580 for_each_possible_cpu(i)
583 struct acpi_processor_performance *perf; 581 acpi_processor_unregister_performance(i);
584 perf = per_cpu_ptr(acpi_perf_data, i); 582
585 acpi_processor_unregister_performance(perf, i);
586 }
587 free_acpi_perf_data(); 583 free_acpi_perf_data();
588} 584}
589 585
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 6b040f4ddfab..fcf9080eae85 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -147,6 +147,7 @@ struct acpi_pld_info {
147 * (Intended for BIOS use only) 147 * (Intended for BIOS use only)
148 */ 148 */
149#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */ 149#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */
150#define ACPI_PLD_REV2_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
150#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ 151#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
151 152
152/* First 32-bit dword, bits 0:32 */ 153/* First 32-bit dword, bits 0:32 */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 03aacfb3e98b..e11611ca72a4 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -136,10 +136,6 @@
136 136
137#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 137#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
138 138
139/* Maximum number of While() loop iterations before forced abort */
140
141#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
142
143/* Maximum sleep allowed via Sleep() operator */ 139/* Maximum sleep allowed via Sleep() operator */
144 140
145#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */ 141#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 11c3a011dcbf..9f20eb4acaa6 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -192,8 +192,9 @@ struct acpi_exception_info {
192#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) 192#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
193#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) 193#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
194#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) 194#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
195#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
195 196
196#define AE_CODE_AML_MAX 0x0021 197#define AE_CODE_AML_MAX 0x0022
197 198
198/* 199/*
199 * Internal exceptions used for control 200 * Internal exceptions used for control
@@ -355,7 +356,9 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
355 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", 356 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
356 "A memory, I/O, or PCI configuration address is invalid"), 357 "A memory, I/O, or PCI configuration address is invalid"),
357 EXCEP_TXT("AE_AML_INFINITE_LOOP", 358 EXCEP_TXT("AE_AML_INFINITE_LOOP",
358 "An apparent infinite AML While loop, method was aborted") 359 "An apparent infinite AML While loop, method was aborted"),
360 EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
361 "A namespace node is uninitialized or unresolved")
359}; 362};
360 363
361static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { 364static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index f56de8c5d844..908d4f9c348c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -88,7 +88,8 @@
88#define ACPI_LV_DEBUG_OBJECT 0x00000002 88#define ACPI_LV_DEBUG_OBJECT 0x00000002
89#define ACPI_LV_INFO 0x00000004 89#define ACPI_LV_INFO 0x00000004
90#define ACPI_LV_REPAIR 0x00000008 90#define ACPI_LV_REPAIR 0x00000008
91#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F 91#define ACPI_LV_TRACE_POINT 0x00000010
92#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F
92 93
93/* Trace verbosity level 1 [Standard Trace Level] */ 94/* Trace verbosity level 1 [Standard Trace Level] */
94 95
@@ -147,6 +148,7 @@
147#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) 148#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
148#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) 149#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
149#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR) 150#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
151#define ACPI_DB_TRACE_POINT ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT)
150#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) 152#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
151 153
152/* Trace level -- also used in the global "DebugLevel" */ 154/* Trace level -- also used in the global "DebugLevel" */
@@ -182,6 +184,20 @@
182#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) 184#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
183#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) 185#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
184 186
187/*
188 * Global trace flags
189 */
190#define ACPI_TRACE_ENABLED ((u32) 4)
191#define ACPI_TRACE_ONESHOT ((u32) 2)
192#define ACPI_TRACE_OPCODE ((u32) 1)
193
194/* Defaults for trace debugging level/layer */
195
196#define ACPI_TRACE_LEVEL_ALL ACPI_LV_ALL
197#define ACPI_TRACE_LAYER_ALL 0x000001FF
198#define ACPI_TRACE_LEVEL_DEFAULT ACPI_LV_TRACE_POINT
199#define ACPI_TRACE_LAYER_DEFAULT ACPI_EXECUTER
200
185#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) 201#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
186/* 202/*
187 * The module name is used primarily for error and debug messages. 203 * The module name is used primarily for error and debug messages.
@@ -432,6 +448,8 @@
432#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) 448#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
433#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) 449#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
434 450
451#define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d)
452
435#else /* ACPI_DEBUG_OUTPUT */ 453#else /* ACPI_DEBUG_OUTPUT */
436/* 454/*
437 * This is the non-debug case -- make everything go away, 455 * This is the non-debug case -- make everything go away,
@@ -453,6 +471,7 @@
453#define ACPI_DUMP_PATHNAME(a, b, c, d) 471#define ACPI_DUMP_PATHNAME(a, b, c, d)
454#define ACPI_DUMP_BUFFER(a, b) 472#define ACPI_DUMP_BUFFER(a, b)
455#define ACPI_IS_DEBUG_ENABLED(level, component) 0 473#define ACPI_IS_DEBUG_ENABLED(level, component) 0
474#define ACPI_TRACE_POINT(a, b, c, d)
456 475
457/* Return macros must have a return statement at the minimum */ 476/* Return macros must have a return statement at the minimum */
458 477
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 83061cac719b..5ba8fb64f664 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index ea6428b7dacb..29c691265b49 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d02df0a49d98..a54ad1cc990c 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -430,4 +430,10 @@ long acpi_os_get_file_offset(ACPI_FILE file);
430acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from); 430acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
431#endif 431#endif
432 432
433#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
434void
435acpi_os_trace_point(acpi_trace_event_type type,
436 u8 begin, u8 *aml, char *pathname);
437#endif
438
433#endif /* __ACPIOSXF_H__ */ 439#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e8ec18a4a634..c33eeabde160 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20150619 49#define ACPI_CA_VERSION 0x20150818
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -251,7 +251,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
251 * traced each time it is executed. 251 * traced each time it is executed.
252 */ 252 */
253ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0); 253ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0);
254ACPI_INIT_GLOBAL(acpi_name, acpi_gbl_trace_method_name, 0); 254ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL);
255ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT);
256ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT);
255 257
256/* 258/*
257 * Runtime configuration of debug output control masks. We want the debug 259 * Runtime configuration of debug output control masks. We want the debug
@@ -504,7 +506,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
504 acpi_object_handler handler, 506 acpi_object_handler handler,
505 void **data)) 507 void **data))
506ACPI_EXTERNAL_RETURN_STATUS(acpi_status 508ACPI_EXTERNAL_RETURN_STATUS(acpi_status
507 acpi_debug_trace(char *name, u32 debug_level, 509 acpi_debug_trace(const char *name, u32 debug_level,
508 u32 debug_layer, u32 flags)) 510 u32 debug_layer, u32 flags))
509 511
510/* 512/*
@@ -907,9 +909,17 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
907 const char *module_name, 909 const char *module_name,
908 u32 component_id, 910 u32 component_id,
909 const char *format, ...)) 911 const char *format, ...))
912
913ACPI_DBG_DEPENDENT_RETURN_VOID(void
914 acpi_trace_point(acpi_trace_event_type type,
915 u8 begin,
916 u8 *aml, char *pathname))
910ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) 917ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
911 void ACPI_INTERNAL_VAR_XFACE 918 void ACPI_INTERNAL_VAR_XFACE
912 acpi_log_error(const char *format, ...)) 919 acpi_log_error(const char *format, ...))
920 acpi_status acpi_initialize_debugger(void);
921
922void acpi_terminate_debugger(void);
913 923
914/* 924/*
915 * Divergences 925 * Divergences
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a948fc586b9b..6e28f544b7b2 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1186,20 +1186,29 @@ enum acpi_spmi_interface_types {
1186 * December 19, 2014 1186 * December 19, 2014
1187 * 1187 *
1188 * NOTE: There are two versions of the table with the same signature -- 1188 * NOTE: There are two versions of the table with the same signature --
1189 * the client version and the server version. 1189 * the client version and the server version. The common platform_class
1190 * field is used to differentiate the two types of tables.
1190 * 1191 *
1191 ******************************************************************************/ 1192 ******************************************************************************/
1192 1193
1193struct acpi_table_tcpa_client { 1194struct acpi_table_tcpa_hdr {
1194 struct acpi_table_header header; /* Common ACPI table header */ 1195 struct acpi_table_header header; /* Common ACPI table header */
1195 u16 platform_class; 1196 u16 platform_class;
1197};
1198
1199/*
1200 * Values for platform_class above.
1201 * This is how the client and server subtables are differentiated
1202 */
1203#define ACPI_TCPA_CLIENT_TABLE 0
1204#define ACPI_TCPA_SERVER_TABLE 1
1205
1206struct acpi_table_tcpa_client {
1196 u32 minimum_log_length; /* Minimum length for the event log area */ 1207 u32 minimum_log_length; /* Minimum length for the event log area */
1197 u64 log_address; /* Address of the event log area */ 1208 u64 log_address; /* Address of the event log area */
1198}; 1209};
1199 1210
1200struct acpi_table_tcpa_server { 1211struct acpi_table_tcpa_server {
1201 struct acpi_table_header header; /* Common ACPI table header */
1202 u16 platform_class;
1203 u16 reserved; 1212 u16 reserved;
1204 u64 minimum_log_length; /* Minimum length for the event log area */ 1213 u64 minimum_log_length; /* Minimum length for the event log area */
1205 u64 log_address; /* Address of the event log area */ 1214 u64 log_address; /* Address of the event log area */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index c2a41d223162..f914958c4adb 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -662,6 +662,7 @@ typedef u32 acpi_object_type;
662#define ACPI_TYPE_DEBUG_OBJECT 0x10 662#define ACPI_TYPE_DEBUG_OBJECT 0x10
663 663
664#define ACPI_TYPE_EXTERNAL_MAX 0x10 664#define ACPI_TYPE_EXTERNAL_MAX 0x10
665#define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1)
665 666
666/* 667/*
667 * These are object types that do not map directly to the ACPI 668 * These are object types that do not map directly to the ACPI
@@ -683,6 +684,7 @@ typedef u32 acpi_object_type;
683#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ 684#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */
684 685
685#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ 686#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */
687#define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1)
686 688
687/* 689/*
688 * These are special object types that never appear in 690 * These are special object types that never appear in
@@ -985,7 +987,8 @@ struct acpi_buffer {
985 */ 987 */
986#define ACPI_FULL_PATHNAME 0 988#define ACPI_FULL_PATHNAME 0
987#define ACPI_SINGLE_NAME 1 989#define ACPI_SINGLE_NAME 1
988#define ACPI_NAME_TYPE_MAX 1 990#define ACPI_FULL_PATHNAME_NO_TRAILING 2
991#define ACPI_NAME_TYPE_MAX 2
989 992
990/* 993/*
991 * Predefined Namespace items 994 * Predefined Namespace items
@@ -1246,6 +1249,14 @@ struct acpi_memory_list {
1246#endif 1249#endif
1247}; 1250};
1248 1251
1252/* Definitions of trace event types */
1253
1254typedef enum {
1255 ACPI_TRACE_AML_METHOD,
1256 ACPI_TRACE_AML_OPCODE,
1257 ACPI_TRACE_AML_REGION
1258} acpi_trace_event_type;
1259
1249/* Definitions of _OSI support */ 1260/* Definitions of _OSI support */
1250 1261
1251#define ACPI_VENDOR_STRINGS 0x01 1262#define ACPI_VENDOR_STRINGS 0x01
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 3cedd43943f4..ec00e2bb029e 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -70,13 +70,14 @@
70 70
71#ifdef ACPI_ASL_COMPILER 71#ifdef ACPI_ASL_COMPILER
72#define ACPI_APPLICATION 72#define ACPI_APPLICATION
73#define ACPI_DISASSEMBLER
74#define ACPI_DEBUG_OUTPUT 73#define ACPI_DEBUG_OUTPUT
75#define ACPI_CONSTANT_EVAL_ONLY 74#define ACPI_CONSTANT_EVAL_ONLY
76#define ACPI_LARGE_NAMESPACE_NODE 75#define ACPI_LARGE_NAMESPACE_NODE
77#define ACPI_DATA_TABLE_DISASSEMBLY 76#define ACPI_DATA_TABLE_DISASSEMBLY
78#define ACPI_SINGLE_THREADED 77#define ACPI_SINGLE_THREADED
79#define ACPI_32BIT_PHYSICAL_ADDRESS 78#define ACPI_32BIT_PHYSICAL_ADDRESS
79
80#define ACPI_DISASSEMBLER 1
80#endif 81#endif
81 82
82/* acpi_exec configuration. Multithreaded with full AML debugger */ 83/* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -89,8 +90,8 @@
89#endif 90#endif
90 91
91/* 92/*
92 * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration. 93 * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
93 * All single threaded. 94 * configuration. All single threaded.
94 */ 95 */
95#if (defined ACPI_BIN_APP) || \ 96#if (defined ACPI_BIN_APP) || \
96 (defined ACPI_DUMP_APP) || \ 97 (defined ACPI_DUMP_APP) || \
@@ -123,7 +124,7 @@
123#define ACPI_USE_NATIVE_RSDP_POINTER 124#define ACPI_USE_NATIVE_RSDP_POINTER
124#endif 125#endif
125 126
126/* acpi_dump configuration. Native mapping used if provied by OSPMs */ 127/* acpi_dump configuration. Native mapping used if provided by the host */
127 128
128#ifdef ACPI_DUMP_APP 129#ifdef ACPI_DUMP_APP
129#define ACPI_USE_NATIVE_MEMORY_MAPPING 130#define ACPI_USE_NATIVE_MEMORY_MAPPING
@@ -151,12 +152,12 @@
151#define ACPI_USE_LOCAL_CACHE 152#define ACPI_USE_LOCAL_CACHE
152#endif 153#endif
153 154
154/* Common debug support */ 155/* Common debug/disassembler support */
155 156
156#ifdef ACPI_FULL_DEBUG 157#ifdef ACPI_FULL_DEBUG
157#define ACPI_DEBUGGER
158#define ACPI_DEBUG_OUTPUT 158#define ACPI_DEBUG_OUTPUT
159#define ACPI_DISASSEMBLER 159#define ACPI_DEBUGGER 1
160#define ACPI_DISASSEMBLER 1
160#endif 161#endif
161 162
162 163
@@ -323,8 +324,8 @@
323 * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and 324 * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
324 * the standard header files may be used. 325 * the standard header files may be used.
325 * 326 *
326 * The ACPICA subsystem only uses low level C library functions that do not call 327 * The ACPICA subsystem only uses low level C library functions that do not
327 * operating system services and may therefore be inlined in the code. 328 * call operating system services and may therefore be inlined in the code.
328 * 329 *
329 * It may be necessary to tailor these include files to the target 330 * It may be necessary to tailor these include files to the target
330 * generation environment. 331 * generation environment.
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 0a7dc8e583b1..2f296cb5f7e2 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
56#if defined(_LINUX) || defined(__linux__) 56#if defined(_LINUX) || defined(__linux__)
57#include <acpi/platform/aclinuxex.h> 57#include <acpi/platform/aclinuxex.h>
58 58
59#elif defined(WIN32)
60#include "acwinex.h"
61
59#elif defined(_AED_EFI) 62#elif defined(_AED_EFI)
60#include "acefiex.h" 63#include "acefiex.h"
61 64
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
new file mode 100644
index 000000000000..b64797488775
--- /dev/null
+++ b/include/acpi/platform/acmsvcex.h
@@ -0,0 +1,54 @@
1/******************************************************************************
2 *
3 * Name: acmsvcex.h - Extra VC specific defines, etc.
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACMSVCEX_H__
45#define __ACMSVCEX_H__
46
47/* Debug support. */
48
49#ifdef _DEBUG
50#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
51#include <crtdbg.h>
52#endif
53
54#endif /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
new file mode 100644
index 000000000000..6ed1d713509b
--- /dev/null
+++ b/include/acpi/platform/acwinex.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 *
3 * Name: acwinex.h - Extra OS specific defines, etc.
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACWINEX_H__
45#define __ACWINEX_H__
46
47/* Windows uses VC */
48
49#endif /* __ACWINEX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 4188a4d3b597..ff5f135f16b1 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
228 228
229extern int acpi_processor_register_performance(struct acpi_processor_performance 229extern int acpi_processor_register_performance(struct acpi_processor_performance
230 *performance, unsigned int cpu); 230 *performance, unsigned int cpu);
231extern void acpi_processor_unregister_performance(struct 231extern void acpi_processor_unregister_performance(unsigned int cpu);
232 acpi_processor_performance
233 *performance,
234 unsigned int cpu);
235 232
236/* note: this locks both the calling module and the processor module 233/* note: this locks both the calling module and the processor module
237 if a _PPC object exists, rmmod is disallowed then */ 234 if a _PPC object exists, rmmod is disallowed then */
@@ -318,6 +315,7 @@ int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
318void acpi_processor_set_pdc(acpi_handle handle); 315void acpi_processor_set_pdc(acpi_handle handle);
319 316
320/* in processor_throttling.c */ 317/* in processor_throttling.c */
318#ifdef CONFIG_ACPI_CPU_FREQ_PSS
321int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 319int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
322int acpi_processor_get_throttling_info(struct acpi_processor *pr); 320int acpi_processor_get_throttling_info(struct acpi_processor *pr);
323extern int acpi_processor_set_throttling(struct acpi_processor *pr, 321extern int acpi_processor_set_throttling(struct acpi_processor *pr,
@@ -330,14 +328,59 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
330 unsigned long action); 328 unsigned long action);
331extern const struct file_operations acpi_processor_throttling_fops; 329extern const struct file_operations acpi_processor_throttling_fops;
332extern void acpi_processor_throttling_init(void); 330extern void acpi_processor_throttling_init(void);
331#else
332static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
333{
334 return 0;
335}
336
337static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr)
338{
339 return -ENODEV;
340}
341
342static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
343 int state, bool force)
344{
345 return -ENODEV;
346}
347
348static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
349 unsigned long action) {}
350
351static inline void acpi_processor_throttling_init(void) {}
352#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
353
333/* in processor_idle.c */ 354/* in processor_idle.c */
355extern struct cpuidle_driver acpi_idle_driver;
356#ifdef CONFIG_ACPI_PROCESSOR_IDLE
334int acpi_processor_power_init(struct acpi_processor *pr); 357int acpi_processor_power_init(struct acpi_processor *pr);
335int acpi_processor_power_exit(struct acpi_processor *pr); 358int acpi_processor_power_exit(struct acpi_processor *pr);
336int acpi_processor_cst_has_changed(struct acpi_processor *pr); 359int acpi_processor_cst_has_changed(struct acpi_processor *pr);
337int acpi_processor_hotplug(struct acpi_processor *pr); 360int acpi_processor_hotplug(struct acpi_processor *pr);
338extern struct cpuidle_driver acpi_idle_driver; 361#else
362static inline int acpi_processor_power_init(struct acpi_processor *pr)
363{
364 return -ENODEV;
365}
366
367static inline int acpi_processor_power_exit(struct acpi_processor *pr)
368{
369 return -ENODEV;
370}
371
372static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
373{
374 return -ENODEV;
375}
339 376
340#ifdef CONFIG_PM_SLEEP 377static inline int acpi_processor_hotplug(struct acpi_processor *pr)
378{
379 return -ENODEV;
380}
381#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
382
383#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
341void acpi_processor_syscore_init(void); 384void acpi_processor_syscore_init(void);
342void acpi_processor_syscore_exit(void); 385void acpi_processor_syscore_exit(void);
343#else 386#else
@@ -348,7 +391,7 @@ static inline void acpi_processor_syscore_exit(void) {}
348/* in processor_thermal.c */ 391/* in processor_thermal.c */
349int acpi_processor_get_limit_info(struct acpi_processor *pr); 392int acpi_processor_get_limit_info(struct acpi_processor *pr);
350extern const struct thermal_cooling_device_ops processor_cooling_ops; 393extern const struct thermal_cooling_device_ops processor_cooling_ops;
351#ifdef CONFIG_CPU_FREQ 394#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
352void acpi_thermal_cpufreq_init(void); 395void acpi_thermal_cpufreq_init(void);
353void acpi_thermal_cpufreq_exit(void); 396void acpi_thermal_cpufreq_exit(void);
354#else 397#else
@@ -360,6 +403,6 @@ static inline void acpi_thermal_cpufreq_exit(void)
360{ 403{
361 return; 404 return;
362} 405}
363#endif 406#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
364 407
365#endif 408#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2445fa9999f..7235c4851460 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
@@ -221,7 +217,7 @@ struct pci_dev;
221 217
222int acpi_pci_irq_enable (struct pci_dev *dev); 218int acpi_pci_irq_enable (struct pci_dev *dev);
223void acpi_penalize_isa_irq(int irq, int active); 219void acpi_penalize_isa_irq(int irq, int active);
224 220void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
225void acpi_pci_irq_disable (struct pci_dev *dev); 221void acpi_pci_irq_disable (struct pci_dev *dev);
226 222
227extern int ec_read(u8 addr, u8 *val); 223extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e567b3a9..430efcbea48e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
51 unsigned int transition_latency; 51 unsigned int transition_latency;
52}; 52};
53 53
54struct cpufreq_real_policy { 54struct cpufreq_user_policy {
55 unsigned int min; /* in kHz */ 55 unsigned int min; /* in kHz */
56 unsigned int max; /* in kHz */ 56 unsigned int max; /* in kHz */
57 unsigned int policy; /* see above */
58 struct cpufreq_governor *governor; /* see below */
59}; 57};
60 58
61struct cpufreq_policy { 59struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
88 struct work_struct update; /* if update_policy() needs to be 86 struct work_struct update; /* if update_policy() needs to be
89 * called, but you're in IRQ context */ 87 * called, but you're in IRQ context */
90 88
91 struct cpufreq_real_policy user_policy; 89 struct cpufreq_user_policy user_policy;
92 struct cpufreq_frequency_table *freq_table; 90 struct cpufreq_frequency_table *freq_table;
93 91
94 struct list_head policy_list; 92 struct list_head policy_list;
@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
369 367
370/* Policy Notifiers */ 368/* Policy Notifiers */
371#define CPUFREQ_ADJUST (0) 369#define CPUFREQ_ADJUST (0)
372#define CPUFREQ_INCOMPATIBLE (1) 370#define CPUFREQ_NOTIFY (1)
373#define CPUFREQ_NOTIFY (2) 371#define CPUFREQ_START (2)
374#define CPUFREQ_START (3) 372#define CPUFREQ_CREATE_POLICY (3)
375#define CPUFREQ_CREATE_POLICY (4) 373#define CPUFREQ_REMOVE_POLICY (4)
376#define CPUFREQ_REMOVE_POLICY (5)
377 374
378#ifdef CONFIG_CPU_FREQ 375#ifdef CONFIG_CPU_FREQ
379int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 376int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
578int cpufreq_boost_trigger_state(int state); 575int cpufreq_boost_trigger_state(int state);
579int cpufreq_boost_supported(void); 576int cpufreq_boost_supported(void);
580int cpufreq_boost_enabled(void); 577int cpufreq_boost_enabled(void);
578int cpufreq_enable_boost_support(void);
579bool policy_has_boost_freq(struct cpufreq_policy *policy);
581#else 580#else
582static inline int cpufreq_boost_trigger_state(int state) 581static inline int cpufreq_boost_trigger_state(int state)
583{ 582{
@@ -591,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
591{ 590{
592 return 0; 591 return 0;
593} 592}
593
594static inline int cpufreq_enable_boost_support(void)
595{
596 return -EINVAL;
597}
598
599static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
600{
601 return false;
602}
594#endif 603#endif
595/* the following funtion is for cpufreq core use only */ 604/* the following funtion is for cpufreq core use only */
596struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); 605struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
597 606
598/* the following are really really optional */ 607/* the following are really really optional */
599extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 608extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
609extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
600extern struct freq_attr *cpufreq_generic_attr[]; 610extern struct freq_attr *cpufreq_generic_attr[];
601int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 611int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
602 struct cpufreq_frequency_table *table); 612 struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d34279df..786ad32631a6 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
84 struct list_head device_list; 84 struct list_head device_list;
85 85
86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
87 int safe_state_index;
88 cpumask_t coupled_cpus; 87 cpumask_t coupled_cpus;
89 struct cpuidle_coupled *coupled; 88 struct cpuidle_coupled *coupled;
90#endif 89#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 49fdcc4b8adf..5d7bc6349930 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -983,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
983extern void device_del(struct device *dev); 983extern void device_del(struct device *dev);
984extern int device_for_each_child(struct device *dev, void *data, 984extern int device_for_each_child(struct device *dev, void *data,
985 int (*fn)(struct device *dev, void *data)); 985 int (*fn)(struct device *dev, void *data));
986extern int device_for_each_child_reverse(struct device *dev, void *data,
987 int (*fn)(struct device *dev, void *data));
986extern struct device *device_find_child(struct device *dev, void *data, 988extern struct device *device_find_child(struct device *dev, void *data,
987 int (*match)(struct device *dev, void *data)); 989 int (*match)(struct device *dev, void *data));
988extern int device_rename(struct device *dev, const char *new_name); 990extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b723ae73..953f283f8451 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, 63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
64 struct klist_node *n); 64 struct klist_node *n);
65extern void klist_iter_exit(struct klist_iter *i); 65extern void klist_iter_exit(struct klist_iter *i);
66extern struct klist_node *klist_prev(struct klist_iter *i);
66extern struct klist_node *klist_next(struct klist_iter *i); 67extern struct klist_node *klist_next(struct klist_iter *i);
67 68
68#endif 69#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d19c79..2194b8ca41f9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
136 136
137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
138{ 138{
139 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; 139 return is_of_node(fwnode) ?
140 container_of(fwnode, struct device_node, fwnode) : NULL;
140} 141}
141 142
142static inline bool of_have_populated_dt(void) 143static inline bool of_have_populated_dt(void)
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb053f72..b1cf7e797892 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
22 22
23enum gpd_status { 23enum gpd_status {
24 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 24 GPD_STATE_ACTIVE = 0, /* PM domain is active */
25 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
26 GPD_STATE_BUSY, /* Something is happening to the PM domain */
27 GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
28 GPD_STATE_POWER_OFF, /* PM domain is off */ 25 GPD_STATE_POWER_OFF, /* PM domain is off */
29}; 26};
30 27
@@ -59,9 +56,6 @@ struct generic_pm_domain {
59 unsigned int in_progress; /* Number of devices being suspended now */ 56 unsigned int in_progress; /* Number of devices being suspended now */
60 atomic_t sd_count; /* Number of subdomains with power "on" */ 57 atomic_t sd_count; /* Number of subdomains with power "on" */
61 enum gpd_status status; /* Current state of the domain */ 58 enum gpd_status status; /* Current state of the domain */
62 wait_queue_head_t status_wait_queue;
63 struct task_struct *poweroff_task; /* Powering off task */
64 unsigned int resume_count; /* Number of devices being resumed */
65 unsigned int device_count; /* Number of devices */ 59 unsigned int device_count; /* Number of devices */
66 unsigned int suspended_count; /* System suspend device counter */ 60 unsigned int suspended_count; /* System suspend device counter */
67 unsigned int prepared_count; /* Suspend counter of prepared devices */ 61 unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 107 struct pm_domain_data base;
114 struct gpd_timing_data td; 108 struct gpd_timing_data td;
115 struct notifier_block nb; 109 struct notifier_block nb;
116 int need_restore;
117}; 110};
118 111
119#ifdef CONFIG_PM_GENERIC_DOMAINS 112#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
228 return -ENOSYS; 221 return -ENOSYS;
229} 222}
230static inline void pm_genpd_poweroff_unused(void) {} 223static inline void pm_genpd_poweroff_unused(void) {}
231#define simple_qos_governor NULL
232#define pm_domain_always_on_gov NULL
233#endif 224#endif
234 225
235static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 226static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540914..cab7ba55bedb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
30 30
31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
32 32
33bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34
33int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
34 37
35struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
36 unsigned long freq, 39 unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
62 return 0; 65 return 0;
63} 66}
64 67
68static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
69{
70 return false;
71}
72
65static inline int dev_pm_opp_get_opp_count(struct device *dev) 73static inline int dev_pm_opp_get_opp_count(struct device *dev)
66{ 74{
67 return 0; 75 return 0;
68} 76}
69 77
78static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
79{
80 return 0;
81}
82
70static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
71 unsigned long freq, bool available) 84 unsigned long freq, bool available)
72{ 85{
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 128#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
116int of_init_opp_table(struct device *dev); 129int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev); 130void of_free_opp_table(struct device *dev);
131int of_cpumask_init_opp_table(cpumask_var_t cpumask);
132void of_cpumask_free_opp_table(cpumask_var_t cpumask);
133int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
134int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
118#else 135#else
119static inline int of_init_opp_table(struct device *dev) 136static inline int of_init_opp_table(struct device *dev)
120{ 137{
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
124static inline void of_free_opp_table(struct device *dev) 141static inline void of_free_opp_table(struct device *dev)
125{ 142{
126} 143}
144
145static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
146{
147 return -ENOSYS;
148}
149
150static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
151{
152}
153
154static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
155{
156 return -ENOSYS;
157}
158
159static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
160{
161 return -ENOSYS;
162}
127#endif 163#endif
128 164
129#endif /* __LINUX_OPP_H__ */ 165#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0cffc05..0f65d36c2a75 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); 161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); 162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); 163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164int dev_pm_qos_expose_latency_tolerance(struct device *dev);
165void dev_pm_qos_hide_latency_tolerance(struct device *dev);
164 166
165static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) 167static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
166{ 168{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
229 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } 231 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
230static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) 232static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
231 { return 0; } 233 { return 0; }
234static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
235 { return 0; }
236static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
232 237
233static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } 238static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
234static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } 239static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d48bfea..3bdbb4189780 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
98 return dev->power.runtime_status == RPM_SUSPENDED; 98 return dev->power.runtime_status == RPM_SUSPENDED;
99} 99}
100 100
101static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
102{
103 return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
104}
105
106static inline bool pm_runtime_enabled(struct device *dev) 101static inline bool pm_runtime_enabled(struct device *dev)
107{ 102{
108 return !dev->power.disable_depth; 103 return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
164static inline bool pm_runtime_suspended(struct device *dev) { return false; } 159static inline bool pm_runtime_suspended(struct device *dev) { return false; }
165static inline bool pm_runtime_active(struct device *dev) { return true; } 160static inline bool pm_runtime_active(struct device *dev) { return true; }
166static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } 161static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
167static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
168static inline bool pm_runtime_enabled(struct device *dev) { return false; } 162static inline bool pm_runtime_enabled(struct device *dev) { return false; }
169 163
170static inline void pm_runtime_no_callbacks(struct device *dev) {} 164static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9e302315e33d..02e8dfaa1ce2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,16 @@ config SUSPEND_FREEZER
18 18
19 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
20 20
21config SUSPEND_SKIP_SYNC
22 bool "Skip kernel's sys_sync() on suspend to RAM/standby"
23 depends on SUSPEND
24 depends on EXPERT
25 help
26 Skip the kernel sys_sync() before freezing user processes.
27 Some systems prefer not to pay this cost on every invocation
28 of suspend, or they are content with invoking sync() from
29 user-space before invoking suspend. Say Y if that's your case.
30
21config HIBERNATE_CALLBACKS 31config HIBERNATE_CALLBACKS
22 bool 32 bool
23 33
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 53266b729fd9..7e4cda4a8dd9 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -484,11 +484,13 @@ static int enter_state(suspend_state_t state)
484 if (state == PM_SUSPEND_FREEZE) 484 if (state == PM_SUSPEND_FREEZE)
485 freeze_begin(); 485 freeze_begin();
486 486
487#ifndef CONFIG_SUSPEND_SKIP_SYNC
487 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 488 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
488 printk(KERN_INFO "PM: Syncing filesystems ... "); 489 printk(KERN_INFO "PM: Syncing filesystems ... ");
489 sys_sync(); 490 sys_sync();
490 printk("done.\n"); 491 printk("done.\n");
491 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 492 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
493#endif
492 494
493 pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); 495 pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
494 error = suspend_prepare(state); 496 error = suspend_prepare(state);
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 019069c84ff6..1896386e16bb 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -17,6 +17,7 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/rbtree.h> 18#include <linux/rbtree.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/workqueue.h>
20 21
21#include "power.h" 22#include "power.h"
22 23
@@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {}
83#define WL_GC_COUNT_MAX 100 84#define WL_GC_COUNT_MAX 100
84#define WL_GC_TIME_SEC 300 85#define WL_GC_TIME_SEC 300
85 86
87static void __wakelocks_gc(struct work_struct *work);
86static LIST_HEAD(wakelocks_lru_list); 88static LIST_HEAD(wakelocks_lru_list);
89static DECLARE_WORK(wakelock_work, __wakelocks_gc);
87static unsigned int wakelocks_gc_count; 90static unsigned int wakelocks_gc_count;
88 91
89static inline void wakelocks_lru_add(struct wakelock *wl) 92static inline void wakelocks_lru_add(struct wakelock *wl)
@@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl)
96 list_move(&wl->lru, &wakelocks_lru_list); 99 list_move(&wl->lru, &wakelocks_lru_list);
97} 100}
98 101
99static void wakelocks_gc(void) 102static void __wakelocks_gc(struct work_struct *work)
100{ 103{
101 struct wakelock *wl, *aux; 104 struct wakelock *wl, *aux;
102 ktime_t now; 105 ktime_t now;
103 106
104 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) 107 mutex_lock(&wakelocks_lock);
105 return;
106 108
107 now = ktime_get(); 109 now = ktime_get();
108 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { 110 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
@@ -127,6 +129,16 @@ static void wakelocks_gc(void)
127 } 129 }
128 } 130 }
129 wakelocks_gc_count = 0; 131 wakelocks_gc_count = 0;
132
133 mutex_unlock(&wakelocks_lock);
134}
135
136static void wakelocks_gc(void)
137{
138 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
139 return;
140
141 schedule_work(&wakelock_work);
130} 142}
131#else /* !CONFIG_PM_WAKELOCKS_GC */ 143#else /* !CONFIG_PM_WAKELOCKS_GC */
132static inline void wakelocks_lru_add(struct wakelock *wl) {} 144static inline void wakelocks_lru_add(struct wakelock *wl) {}
diff --git a/lib/klist.c b/lib/klist.c
index 89b485a2a58d..d74cf7a29afd 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -324,6 +324,47 @@ static struct klist_node *to_klist_node(struct list_head *n)
324} 324}
325 325
326/** 326/**
327 * klist_prev - Ante up prev node in list.
328 * @i: Iterator structure.
329 *
330 * First grab list lock. Decrement the reference count of the previous
331 * node, if there was one. Grab the prev node, increment its reference
332 * count, drop the lock, and return that prev node.
333 */
334struct klist_node *klist_prev(struct klist_iter *i)
335{
336 void (*put)(struct klist_node *) = i->i_klist->put;
337 struct klist_node *last = i->i_cur;
338 struct klist_node *prev;
339
340 spin_lock(&i->i_klist->k_lock);
341
342 if (last) {
343 prev = to_klist_node(last->n_node.prev);
344 if (!klist_dec_and_del(last))
345 put = NULL;
346 } else
347 prev = to_klist_node(i->i_klist->k_list.prev);
348
349 i->i_cur = NULL;
350 while (prev != to_klist_node(&i->i_klist->k_list)) {
351 if (likely(!knode_dead(prev))) {
352 kref_get(&prev->n_ref);
353 i->i_cur = prev;
354 break;
355 }
356 prev = to_klist_node(prev->n_node.prev);
357 }
358
359 spin_unlock(&i->i_klist->k_lock);
360
361 if (put && last)
362 put(last);
363 return i->i_cur;
364}
365EXPORT_SYMBOL_GPL(klist_prev);
366
367/**
327 * klist_next - Ante up next node in list. 368 * klist_next - Ante up next node in list.
328 * @i: Iterator structure. 369 * @i: Iterator structure.
329 * 370 *
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index 3d1537b93c64..e882c8320135 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -8,154 +8,20 @@
8# as published by the Free Software Foundation; version 2 8# as published by the Free Software Foundation; version 2
9# of the License. 9# of the License.
10 10
11OUTPUT=./ 11include ../../scripts/Makefile.include
12ifeq ("$(origin O)", "command line") 12
13 OUTPUT := $(O)/ 13all: acpidump ec
14endif 14clean: acpidump_clean ec_clean
15 15install: acpidump_install ec_install
16ifneq ($(OUTPUT),) 16uninstall: acpidump_uninstall ec_uninstall
17# check that the output directory actually exists 17
18OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 18acpidump ec: FORCE
19$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 19 $(call descend,tools/$@,all)
20endif 20acpidump_clean ec_clean:
21 21 $(call descend,tools/$(@:_clean=),clean)
22SUBDIRS = tools/ec 22acpidump_install ec_install:
23 23 $(call descend,tools/$(@:_install=),install)
24# --- CONFIGURATION BEGIN --- 24acpidump_uninstall ec_uninstall:
25 25 $(call descend,tools/$(@:_uninstall=),uninstall)
26# Set the following to `true' to make a unstripped, unoptimized 26
27# binary. Leave this set to `false' for production use. 27.PHONY: FORCE
28DEBUG ?= true
29
30# make the build silent. Set this to something else to make it noisy again.
31V ?= false
32
33# Prefix to the directories we're installing to
34DESTDIR ?=
35
36# --- CONFIGURATION END ---
37
38# Directory definitions. These are default and most probably
39# do not need to be changed. Please note that DESTDIR is
40# added in front of any of them
41
42bindir ?= /usr/bin
43sbindir ?= /usr/sbin
44mandir ?= /usr/man
45
46# Toolchain: what tools do we use, and what options do they need:
47
48INSTALL = /usr/bin/install -c
49INSTALL_PROGRAM = ${INSTALL}
50INSTALL_DATA = ${INSTALL} -m 644
51INSTALL_SCRIPT = ${INSTALL_PROGRAM}
52
53# If you are running a cross compiler, you may want to set this
54# to something more interesting, like "arm-linux-". If you want
55# to compile vs uClibc, that can be done here as well.
56CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
57CC = $(CROSS)gcc
58LD = $(CROSS)gcc
59STRIP = $(CROSS)strip
60HOSTCC = gcc
61
62# check if compiler option is supported
63cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
64
65# use '-Os' optimization if available, else use -O2
66OPTIMIZATION := $(call cc-supports,-Os,-O2)
67
68WARNINGS := -Wall
69WARNINGS += $(call cc-supports,-Wstrict-prototypes)
70WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
71
72KERNEL_INCLUDE := ../../../include
73ACPICA_INCLUDE := ../../../drivers/acpi/acpica
74CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
75CFLAGS += $(WARNINGS)
76
77ifeq ($(strip $(V)),false)
78 QUIET=@
79 ECHO=@echo
80else
81 QUIET=
82 ECHO=@\#
83endif
84export QUIET ECHO
85
86# if DEBUG is enabled, then we do not strip or optimize
87ifeq ($(strip $(DEBUG)),true)
88 CFLAGS += -O1 -g -DDEBUG
89 STRIPCMD = /bin/true -Since_we_are_debugging
90else
91 CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
92 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
93endif
94
95# --- ACPIDUMP BEGIN ---
96
97vpath %.c \
98 ../../../drivers/acpi/acpica\
99 tools/acpidump\
100 common\
101 os_specific/service_layers
102
103CFLAGS += -DACPI_DUMP_APP -Itools/acpidump
104
105DUMP_OBJS = \
106 apdump.o\
107 apfiles.o\
108 apmain.o\
109 osunixdir.o\
110 osunixmap.o\
111 osunixxf.o\
112 tbprint.o\
113 tbxfroot.o\
114 utbuffer.o\
115 utdebug.o\
116 utexcep.o\
117 utglobal.o\
118 utmath.o\
119 utprint.o\
120 utstring.o\
121 utxferror.o\
122 oslibcfs.o\
123 oslinuxtbl.o\
124 cmfsize.o\
125 getopt.o
126
127DUMP_OBJS := $(addprefix $(OUTPUT)tools/acpidump/,$(DUMP_OBJS))
128
129$(OUTPUT)acpidump: $(DUMP_OBJS)
130 $(ECHO) " LD " $@
131 $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(DUMP_OBJS) -L$(OUTPUT) -o $@
132 $(QUIET) $(STRIPCMD) $@
133
134$(OUTPUT)tools/acpidump/%.o: %.c
135 $(ECHO) " CC " $@
136 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
137
138# --- ACPIDUMP END ---
139
140all: $(OUTPUT)acpidump
141 echo $(OUTPUT)
142
143clean:
144 -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
145 | xargs rm -f
146 -rm -f $(OUTPUT)acpidump
147
148install-tools:
149 $(INSTALL) -d $(DESTDIR)${sbindir}
150 $(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir}
151
152install-man:
153 $(INSTALL_DATA) -D man/acpidump.8 $(DESTDIR)${mandir}/man8/acpidump.8
154
155install: all install-tools install-man
156
157uninstall:
158 - rm -f $(DESTDIR)${sbindir}/acpidump
159 - rm -f $(DESTDIR)${mandir}/man8/acpidump.8
160
161.PHONY: all utils install-tools install-man install uninstall clean
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
new file mode 100644
index 000000000000..552af68d5414
--- /dev/null
+++ b/tools/power/acpi/Makefile.config
@@ -0,0 +1,92 @@
1# tools/power/acpi/Makefile.config - ACPI tool Makefile
2#
3# Copyright (c) 2015, Intel Corporation
4# Author: Lv Zheng <lv.zheng@intel.com>
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License
8# as published by the Free Software Foundation; version 2
9# of the License.
10
11include ../../../../scripts/Makefile.include
12
13OUTPUT=./
14ifeq ("$(origin O)", "command line")
15 OUTPUT := $(O)/
16endif
17
18ifneq ($(OUTPUT),)
19# check that the output directory actually exists
20OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
21$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
22endif
23
24# --- CONFIGURATION BEGIN ---
25
26# Set the following to `true' to make a unstripped, unoptimized
27# binary. Leave this set to `false' for production use.
28DEBUG ?= true
29
30# make the build silent. Set this to something else to make it noisy again.
31V ?= false
32
33# Prefix to the directories we're installing to
34DESTDIR ?=
35
36# --- CONFIGURATION END ---
37
38# Directory definitions. These are default and most probably
39# do not need to be changed. Please note that DESTDIR is
40# added in front of any of them
41
42bindir ?= /usr/bin
43sbindir ?= /usr/sbin
44mandir ?= /usr/man
45
46# Toolchain: what tools do we use, and what options do they need:
47
48INSTALL = /usr/bin/install -c
49INSTALL_PROGRAM = ${INSTALL}
50INSTALL_DATA = ${INSTALL} -m 644
51INSTALL_SCRIPT = ${INSTALL_PROGRAM}
52
53# If you are running a cross compiler, you may want to set this
54# to something more interesting, like "arm-linux-". If you want
55# to compile vs uClibc, that can be done here as well.
56CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
57CC = $(CROSS)gcc
58LD = $(CROSS)gcc
59STRIP = $(CROSS)strip
60HOSTCC = gcc
61
62# check if compiler option is supported
63cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
64
65# use '-Os' optimization if available, else use -O2
66OPTIMIZATION := $(call cc-supports,-Os,-O2)
67
68WARNINGS := -Wall
69WARNINGS += $(call cc-supports,-Wstrict-prototypes)
70WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
71
72KERNEL_INCLUDE := ../../../include
73ACPICA_INCLUDE := ../../../drivers/acpi/acpica
74CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
75CFLAGS += $(WARNINGS)
76
77ifeq ($(strip $(V)),false)
78 QUIET=@
79 ECHO=@echo
80else
81 QUIET=
82 ECHO=@\#
83endif
84
85# if DEBUG is enabled, then we do not strip or optimize
86ifeq ($(strip $(DEBUG)),true)
87 CFLAGS += -O1 -g -DDEBUG
88 STRIPCMD = /bin/true -Since_we_are_debugging
89else
90 CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
91 STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
92endif
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
new file mode 100644
index 000000000000..ec87a9e562c0
--- /dev/null
+++ b/tools/power/acpi/Makefile.rules
@@ -0,0 +1,37 @@
1# tools/power/acpi/Makefile.rules - ACPI tool Makefile
2#
3# Copyright (c) 2015, Intel Corporation
4# Author: Lv Zheng <lv.zheng@intel.com>
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License
8# as published by the Free Software Foundation; version 2
9# of the License.
10
11$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE
12 $(ECHO) " LD " $@
13 $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@
14 $(QUIET) $(STRIPCMD) $@
15
16$(OUTPUT)%.o: %.c
17 $(ECHO) " CC " $@
18 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
19
20all: $(OUTPUT)$(TOOL)
21clean:
22 -find $(OUTPUT) \( -not -type d \) \
23 -and \( -name '*~' -o -name '*.[oas]' \) \
24 -type f -print \
25 | xargs rm -f
26 -rm -f $(OUTPUT)$(TOOL)
27
28install-tools:
29 $(INSTALL) -d $(DESTDIR)${sbindir}
30 $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir}
31uninstall-tools:
32 - rm -f $(DESTDIR)${sbindir}/$(TOOL)
33
34install: all install-tools $(EXTRA_INSTALL)
35uninstall: uninstall-tools $(EXTRA_UNINSTALL)
36
37.PHONY: FORCE
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
new file mode 100644
index 000000000000..8d761576e91b
--- /dev/null
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -0,0 +1,53 @@
1# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
2#
3# Copyright (c) 2015, Intel Corporation
4# Author: Lv Zheng <lv.zheng@intel.com>
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License
8# as published by the Free Software Foundation; version 2
9# of the License.
10
11include ../../Makefile.config
12
13TOOL = acpidump
14EXTRA_INSTALL = install-man
15EXTRA_UNINSTALL = uninstall-man
16
17vpath %.c \
18 ../../../../../drivers/acpi/acpica\
19 ./\
20 ../../common\
21 ../../os_specific/service_layers
22CFLAGS += -DACPI_DUMP_APP -I.\
23 -I../../../../../drivers/acpi/acpica\
24 -I../../../../../include
25TOOL_OBJS = \
26 apdump.o\
27 apfiles.o\
28 apmain.o\
29 osunixdir.o\
30 osunixmap.o\
31 osunixxf.o\
32 tbprint.o\
33 tbxfroot.o\
34 utbuffer.o\
35 utdebug.o\
36 utexcep.o\
37 utglobal.o\
38 utmath.o\
39 utnonansi.o\
40 utprint.o\
41 utstring.o\
42 utxferror.o\
43 oslibcfs.o\
44 oslinuxtbl.o\
45 cmfsize.o\
46 getopt.o
47
48include ../../Makefile.rules
49
50install-man: ../../man/acpidump.8
51 $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8
52uninstall-man:
53 - rm -f $(DESTDIR)${mandir}/man8/acpidump.8
diff --git a/tools/power/acpi/tools/ec/Makefile b/tools/power/acpi/tools/ec/Makefile
index b7b0b929bd32..75d8a127b6ee 100644
--- a/tools/power/acpi/tools/ec/Makefile
+++ b/tools/power/acpi/tools/ec/Makefile
@@ -1,22 +1,17 @@
1ec_access: ec_access.o 1# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
2 $(ECHO) " LD " $@ 2#
3 $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $< -o $@ 3# Copyright (c) 2015, Intel Corporation
4 $(QUIET) $(STRIPCMD) $@ 4# Author: Lv Zheng <lv.zheng@intel.com>
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License
8# as published by the Free Software Foundation; version 2
9# of the License.
5 10
6%.o: %.c 11include ../../Makefile.config
7 $(ECHO) " CC " $@
8 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
9 12
10all: ec_access 13TOOL = ec
14TOOL_OBJS = \
15 ec_access.o
11 16
12install: 17include ../../Makefile.rules
13 $(INSTALL) -d $(DESTDIR)${sbindir}
14 $(INSTALL_PROGRAM) ec_access $(DESTDIR)${sbindir}
15
16uninstall:
17 - rm -f $(DESTDIR)${sbindir}/ec_access
18
19clean:
20 -rm -f $(OUTPUT)ec_access
21
22.PHONY: all install uninstall
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index f656e585ed45..4e213576381e 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -17,6 +17,7 @@
17 17
18#include "cpufreq.h" 18#include "cpufreq.h"
19#include "helpers/helpers.h" 19#include "helpers/helpers.h"
20#include "helpers/sysfs.h"
20 21
21#define NORM_FREQ_LEN 32 22#define NORM_FREQ_LEN 32
22 23
@@ -318,6 +319,9 @@ int cmd_freq_set(int argc, char **argv)
318 cpufreq_cpu_exists(cpu)) 319 cpufreq_cpu_exists(cpu))
319 continue; 320 continue;
320 321
322 if (sysfs_is_cpu_online(cpu) != 1)
323 continue;
324
321 printf(_("Setting cpu: %d\n"), cpu); 325 printf(_("Setting cpu: %d\n"), cpu);
322 ret = do_one_cpu(cpu, &new_pol, freq, policychange); 326 ret = do_one_cpu(cpu, &new_pol, freq, policychange);
323 if (ret) { 327 if (ret) {
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index c13120af519b..cea398c176e7 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -73,6 +73,8 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
73 for (cpu = 0; cpu < cpus; cpu++) { 73 for (cpu = 0; cpu < cpus; cpu++) {
74 cpu_top->core_info[cpu].cpu = cpu; 74 cpu_top->core_info[cpu].cpu = cpu;
75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); 75 cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
76 if (!cpu_top->core_info[cpu].is_online)
77 continue;
76 if(sysfs_topology_read_file( 78 if(sysfs_topology_read_file(
77 cpu, 79 cpu,
78 "physical_package_id", 80 "physical_package_id",
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 05b8fc38dc8b..622db685b4f9 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -251,11 +251,6 @@ Although it is not guaranteed by the architecture, turbostat assumes
251that they count at TSC rate, which is true on all processors tested to date. 251that they count at TSC rate, which is true on all processors tested to date.
252 252
253.SH REFERENCES 253.SH REFERENCES
254"Intel® Turbo Boost Technology
255in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
256http://download.intel.com/design/processor/applnots/320354.pdf
257
258"Intel® 64 and IA-32 Architectures Software Developer's Manual
259Volume 3B: System Programming Guide" 254Volume 3B: System Programming Guide"
260http://www.intel.com/products/processor/manuals/ 255http://www.intel.com/products/processor/manuals/
261 256
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 323b65edfc97..9655cb49c7cb 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -372,7 +372,7 @@ void print_header(void)
372 if (do_rapl & RAPL_GFX) 372 if (do_rapl & RAPL_GFX)
373 outp += sprintf(outp, " GFX_J"); 373 outp += sprintf(outp, " GFX_J");
374 if (do_rapl & RAPL_DRAM) 374 if (do_rapl & RAPL_DRAM)
375 outp += sprintf(outp, " RAM_W"); 375 outp += sprintf(outp, " RAM_J");
376 if (do_rapl & RAPL_PKG_PERF_STATUS) 376 if (do_rapl & RAPL_PKG_PERF_STATUS)
377 outp += sprintf(outp, " PKG_%%"); 377 outp += sprintf(outp, " PKG_%%");
378 if (do_rapl & RAPL_DRAM_PERF_STATUS) 378 if (do_rapl & RAPL_DRAM_PERF_STATUS)
@@ -1157,7 +1157,7 @@ dump_nhm_platform_info(void)
1157 1157
1158 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1158 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1159 1159
1160 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); 1160 fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1161 1161
1162 ratio = (msr >> 40) & 0xFF; 1162 ratio = (msr >> 40) & 0xFF;
1163 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1163 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1168,8 +1168,8 @@ dump_nhm_platform_info(void)
1168 ratio, bclk, ratio * bclk); 1168 ratio, bclk, ratio * bclk);
1169 1169
1170 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); 1170 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1171 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 1171 fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1172 msr, msr & 0x2 ? "EN" : "DIS"); 1172 base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1173 1173
1174 return; 1174 return;
1175} 1175}
@@ -1182,7 +1182,7 @@ dump_hsw_turbo_ratio_limits(void)
1182 1182
1183 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); 1183 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1184 1184
1185 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); 1185 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1186 1186
1187 ratio = (msr >> 8) & 0xFF; 1187 ratio = (msr >> 8) & 0xFF;
1188 if (ratio) 1188 if (ratio)
@@ -1204,7 +1204,7 @@ dump_ivt_turbo_ratio_limits(void)
1204 1204
1205 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); 1205 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1206 1206
1207 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); 1207 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1208 1208
1209 ratio = (msr >> 56) & 0xFF; 1209 ratio = (msr >> 56) & 0xFF;
1210 if (ratio) 1210 if (ratio)
@@ -1256,7 +1256,7 @@ dump_nhm_turbo_ratio_limits(void)
1256 1256
1257 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); 1257 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1258 1258
1259 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); 1259 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1260 1260
1261 ratio = (msr >> 56) & 0xFF; 1261 ratio = (msr >> 56) & 0xFF;
1262 if (ratio) 1262 if (ratio)
@@ -1312,8 +1312,8 @@ dump_knl_turbo_ratio_limits(void)
1312 1312
1313 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); 1313 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1314 1314
1315 fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", 1315 fprintf(stderr, "cpu%d: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1316 msr); 1316 base_cpu, msr);
1317 1317
1318 /** 1318 /**
1319 * Turbo encoding in KNL is as follows: 1319 * Turbo encoding in KNL is as follows:
@@ -1371,7 +1371,7 @@ dump_nhm_cst_cfg(void)
1371#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 1371#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1372#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 1372#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
1373 1373
1374 fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr); 1374 fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
1375 1375
1376 fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", 1376 fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1377 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", 1377 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
@@ -1384,6 +1384,49 @@ dump_nhm_cst_cfg(void)
1384 return; 1384 return;
1385} 1385}
1386 1386
1387static void
1388dump_config_tdp(void)
1389{
1390 unsigned long long msr;
1391
1392 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
1393 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
1394 fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF);
1395
1396 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
1397 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
1398 if (msr) {
1399 fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1400 fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1401 fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1402 fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF);
1403 }
1404 fprintf(stderr, ")\n");
1405
1406 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
1407 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
1408 if (msr) {
1409 fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1410 fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1411 fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1412 fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF);
1413 }
1414 fprintf(stderr, ")\n");
1415
1416 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
1417 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
1418 if ((msr) & 0x3)
1419 fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
1420 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1421 fprintf(stderr, ")\n");
1422
1423 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
1424 fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
1425 fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xEF);
1426 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1427 fprintf(stderr, ")\n");
1428}
1429
1387void free_all_buffers(void) 1430void free_all_buffers(void)
1388{ 1431{
1389 CPU_FREE(cpu_present_set); 1432 CPU_FREE(cpu_present_set);
@@ -1873,6 +1916,36 @@ int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1873 return 0; 1916 return 0;
1874 } 1917 }
1875} 1918}
1919int has_config_tdp(unsigned int family, unsigned int model)
1920{
1921 if (!genuine_intel)
1922 return 0;
1923
1924 if (family != 6)
1925 return 0;
1926
1927 switch (model) {
1928 case 0x3A: /* IVB */
1929 case 0x3E: /* IVB Xeon */
1930
1931 case 0x3C: /* HSW */
1932 case 0x3F: /* HSX */
1933 case 0x45: /* HSW */
1934 case 0x46: /* HSW */
1935 case 0x3D: /* BDW */
1936 case 0x47: /* BDW */
1937 case 0x4F: /* BDX */
1938 case 0x56: /* BDX-DE */
1939 case 0x4E: /* SKL */
1940 case 0x5E: /* SKL */
1941
1942 case 0x57: /* Knights Landing */
1943 return 1;
1944 default:
1945 return 0;
1946 }
1947}
1948
1876static void 1949static void
1877dump_cstate_pstate_config_info(family, model) 1950dump_cstate_pstate_config_info(family, model)
1878{ 1951{
@@ -1893,6 +1966,9 @@ dump_cstate_pstate_config_info(family, model)
1893 if (has_knl_turbo_ratio_limit(family, model)) 1966 if (has_knl_turbo_ratio_limit(family, model))
1894 dump_knl_turbo_ratio_limits(); 1967 dump_knl_turbo_ratio_limits();
1895 1968
1969 if (has_config_tdp(family, model))
1970 dump_config_tdp();
1971
1896 dump_nhm_cst_cfg(); 1972 dump_nhm_cst_cfg();
1897} 1973}
1898 1974
@@ -3014,7 +3090,7 @@ int get_and_dump_counters(void)
3014} 3090}
3015 3091
3016void print_version() { 3092void print_version() {
3017 fprintf(stderr, "turbostat version 4.7 27-May, 2015" 3093 fprintf(stderr, "turbostat version 4.7 17-June, 2015"
3018 " - Len Brown <lenb@kernel.org>\n"); 3094 " - Len Brown <lenb@kernel.org>\n");
3019} 3095}
3020 3096
@@ -3042,7 +3118,7 @@ void cmdline(int argc, char **argv)
3042 3118
3043 progname = argv[0]; 3119 progname = argv[0];
3044 3120
3045 while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v", 3121 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v",
3046 long_options, &option_index)) != -1) { 3122 long_options, &option_index)) != -1) {
3047 switch (opt) { 3123 switch (opt) {
3048 case 'C': 3124 case 'C':