diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 15:45:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 15:45:35 -0500 |
commit | bad73c5aa069f1f14cc07ce7bbae8d463635560c (patch) | |
tree | db905bb3400e6fe70be95cd20158bed79b2b2c6c | |
parent | b58ed041a360ed051fab17e4d9b0f451c6fedba7 (diff) | |
parent | f316fc56555a5c3bcf6350f3d5ac26dd2c55f4cb (diff) |
Merge tag 'pm+acpi-for-3.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki:
- Introduction of device PM QoS flags.
- ACPI device power management update allowing subsystems other than
PCI to use it more easily.
- ACPI device enumeration rework allowing additional kinds of devices
to be enumerated via ACPI. From Mika Westerberg, Adrian Hunter,
Mathias Nyman, Andy Shevchenko, and Rafael J. Wysocki.
- ACPICA update to version 20121018 from Bob Moore and Lv Zheng.
- ACPI memory hotplug update from Wen Congyang and Yasuaki Ishimatsu.
- Introduction of acpi_handle_<level>() messaging macros and ACPI-based
CPU hot-remove support from Toshi Kani.
- ACPI EC updates from Feng Tang.
- cpufreq updates from Viresh Kumar, Fabio Baltieri and others.
- cpuidle changes to quickly notice governor prediction failure from
Youquan Song.
- Support for using multiple cpuidle drivers at the same time and
cpuidle cleanups from Daniel Lezcano.
- devfreq updates from Nishanth Menon and others.
- cpupower update from Thomas Renninger.
- Fixes and small cleanups all over the place.
* tag 'pm+acpi-for-3.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (196 commits)
mmc: sdhci-acpi: enable runtime-pm for device HID INT33C6
ACPI: add Haswell LPSS devices to acpi_platform_device_ids list
ACPI: add documentation about ACPI 5 enumeration
pnpacpi: fix incorrect TEST_ALPHA() test
ACPI / PM: Fix header of acpi_dev_pm_detach() in acpi.h
ACPI / video: ignore BIOS initial backlight value for HP Folio 13-2000
ACPI : do not use Lid and Sleep button for S5 wakeup
ACPI / PNP: Do not crash due to stale pointer use during system resume
ACPI / video: Add "Asus UL30VT" to ACPI video detect blacklist
ACPI: do acpisleep dmi check when CONFIG_ACPI_SLEEP is set
spi / ACPI: add ACPI enumeration support
gpio / ACPI: add ACPI support
PM / devfreq: remove compiler error with module governors (2)
cpupower: IvyBridge (0x3a and 0x3e models) support
cpupower: Provide -c param for cpupower monitor to schedule process on all cores
cpupower tools: Fix warning and a bug with the cpu package count
cpupower tools: Fix malloc of cpu_info structure
cpupower tools: Fix issues with sysfs_topology_read_file
cpupower tools: Fix minor warnings
cpupower tools: Update .gitignore for files created in the debug directories
...
233 files changed, 9470 insertions, 3326 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 23d78b5aab11..0ba6ea2f89d9 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq | |||
@@ -11,7 +11,7 @@ What: /sys/class/devfreq/.../governor | |||
11 | Date: September 2011 | 11 | Date: September 2011 |
12 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 12 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
13 | Description: | 13 | Description: |
14 | The /sys/class/devfreq/.../governor shows the name of the | 14 | The /sys/class/devfreq/.../governor show or set the name of the |
15 | governor used by the corresponding devfreq object. | 15 | governor used by the corresponding devfreq object. |
16 | 16 | ||
17 | What: /sys/class/devfreq/.../cur_freq | 17 | What: /sys/class/devfreq/.../cur_freq |
@@ -19,15 +19,16 @@ Date: September 2011 | |||
19 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 19 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
20 | Description: | 20 | Description: |
21 | The /sys/class/devfreq/.../cur_freq shows the current | 21 | The /sys/class/devfreq/.../cur_freq shows the current |
22 | frequency of the corresponding devfreq object. | 22 | frequency of the corresponding devfreq object. Same as |
23 | target_freq when get_cur_freq() is not implemented by | ||
24 | devfreq driver. | ||
23 | 25 | ||
24 | What: /sys/class/devfreq/.../central_polling | 26 | What: /sys/class/devfreq/.../target_freq |
25 | Date: September 2011 | 27 | Date: September 2012 |
26 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 28 | Contact: Rajagopal Venkat <rajagopal.venkat@linaro.org> |
27 | Description: | 29 | Description: |
28 | The /sys/class/devfreq/.../central_polling shows whether | 30 | The /sys/class/devfreq/.../target_freq shows the next governor |
29 | the devfreq ojbect is using devfreq-provided central | 31 | predicted target frequency of the corresponding devfreq object. |
30 | polling mechanism or not. | ||
31 | 32 | ||
32 | What: /sys/class/devfreq/.../polling_interval | 33 | What: /sys/class/devfreq/.../polling_interval |
33 | Date: September 2011 | 34 | Date: September 2011 |
@@ -43,6 +44,17 @@ Description: | |||
43 | (/sys/class/devfreq/.../central_polling is 0), this value | 44 | (/sys/class/devfreq/.../central_polling is 0), this value |
44 | may be useless. | 45 | may be useless. |
45 | 46 | ||
47 | What: /sys/class/devfreq/.../trans_stat | ||
48 | Date: October 2012 | ||
49 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
50 | Descrtiption: | ||
51 | This ABI shows the statistics of devfreq behavior on a | ||
52 | specific device. It shows the time spent in each state and | ||
53 | the number of transitions between states. | ||
54 | In order to activate this ABI, the devfreq target device | ||
55 | driver should provide the list of available frequencies | ||
56 | with its profile. | ||
57 | |||
46 | What: /sys/class/devfreq/.../userspace/set_freq | 58 | What: /sys/class/devfreq/.../userspace/set_freq |
47 | Date: September 2011 | 59 | Date: September 2011 |
48 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 60 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
@@ -50,3 +62,19 @@ Description: | |||
50 | The /sys/class/devfreq/.../userspace/set_freq shows and | 62 | The /sys/class/devfreq/.../userspace/set_freq shows and |
51 | sets the requested frequency for the devfreq object if | 63 | sets the requested frequency for the devfreq object if |
52 | userspace governor is in effect. | 64 | userspace governor is in effect. |
65 | |||
66 | What: /sys/class/devfreq/.../available_frequencies | ||
67 | Date: October 2012 | ||
68 | Contact: Nishanth Menon <nm@ti.com> | ||
69 | Description: | ||
70 | The /sys/class/devfreq/.../available_frequencies shows | ||
71 | the available frequencies of the corresponding devfreq object. | ||
72 | This is a snapshot of available frequencies and not limited | ||
73 | by the min/max frequency restrictions. | ||
74 | |||
75 | What: /sys/class/devfreq/.../available_governors | ||
76 | Date: October 2012 | ||
77 | Contact: Nishanth Menon <nm@ti.com> | ||
78 | Description: | ||
79 | The /sys/class/devfreq/.../available_governors shows | ||
80 | currently available governors in the system. | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 45000f0db4d4..7fc2997b23a6 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power | |||
@@ -204,3 +204,34 @@ Description: | |||
204 | 204 | ||
205 | This attribute has no effect on system-wide suspend/resume and | 205 | This attribute has no effect on system-wide suspend/resume and |
206 | hibernation. | 206 | hibernation. |
207 | |||
208 | What: /sys/devices/.../power/pm_qos_no_power_off | ||
209 | Date: September 2012 | ||
210 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
211 | Description: | ||
212 | The /sys/devices/.../power/pm_qos_no_power_off attribute | ||
213 | is used for manipulating the PM QoS "no power off" flag. If | ||
214 | set, this flag indicates to the kernel that power should not | ||
215 | be removed entirely from the device. | ||
216 | |||
217 | Not all drivers support this attribute. If it isn't supported, | ||
218 | it is not present. | ||
219 | |||
220 | This attribute has no effect on system-wide suspend/resume and | ||
221 | hibernation. | ||
222 | |||
223 | What: /sys/devices/.../power/pm_qos_remote_wakeup | ||
224 | Date: September 2012 | ||
225 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
226 | Description: | ||
227 | The /sys/devices/.../power/pm_qos_remote_wakeup attribute | ||
228 | is used for manipulating the PM QoS "remote wakeup required" | ||
229 | flag. If set, this flag indicates to the kernel that the | ||
230 | device is a source of user events that have to be signaled from | ||
231 | its low-power states. | ||
232 | |||
233 | Not all drivers support this attribute. If it isn't supported, | ||
234 | it is not present. | ||
235 | |||
236 | This attribute has no effect on system-wide suspend/resume and | ||
237 | hibernation. | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-sun b/Documentation/ABI/testing/sysfs-devices-sun new file mode 100644 index 000000000000..86be9848a77e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-sun | |||
@@ -0,0 +1,14 @@ | |||
1 | Whatt: /sys/devices/.../sun | ||
2 | Date: October 2012 | ||
3 | Contact: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> | ||
4 | Description: | ||
5 | The file contains a Slot-unique ID which provided by the _SUN | ||
6 | method in the ACPI namespace. The value is written in Advanced | ||
7 | Configuration and Power Interface Specification as follows: | ||
8 | |||
9 | "The _SUN value is required to be unique among the slots of | ||
10 | the same type. It is also recommended that this number match | ||
11 | the slot number printed on the physical slot whenever possible." | ||
12 | |||
13 | So reading the sysfs file, we can identify a physical position | ||
14 | of the slot in the system. | ||
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt new file mode 100644 index 000000000000..4f27785ca0c8 --- /dev/null +++ b/Documentation/acpi/enumeration.txt | |||
@@ -0,0 +1,227 @@ | |||
1 | ACPI based device enumeration | ||
2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
3 | ACPI 5 introduced a set of new resources (UartTSerialBus, I2cSerialBus, | ||
4 | SpiSerialBus, GpioIo and GpioInt) which can be used in enumerating slave | ||
5 | devices behind serial bus controllers. | ||
6 | |||
7 | In addition we are starting to see peripherals integrated in the | ||
8 | SoC/Chipset to appear only in ACPI namespace. These are typically devices | ||
9 | that are accessed through memory-mapped registers. | ||
10 | |||
11 | In order to support this and re-use the existing drivers as much as | ||
12 | possible we decided to do following: | ||
13 | |||
14 | o Devices that have no bus connector resource are represented as | ||
15 | platform devices. | ||
16 | |||
17 | o Devices behind real busses where there is a connector resource | ||
18 | are represented as struct spi_device or struct i2c_device | ||
19 | (standard UARTs are not busses so there is no struct uart_device). | ||
20 | |||
21 | As both ACPI and Device Tree represent a tree of devices (and their | ||
22 | resources) this implementation follows the Device Tree way as much as | ||
23 | possible. | ||
24 | |||
25 | The ACPI implementation enumerates devices behind busses (platform, SPI and | ||
26 | I2C), creates the physical devices and binds them to their ACPI handle in | ||
27 | the ACPI namespace. | ||
28 | |||
29 | This means that when ACPI_HANDLE(dev) returns non-NULL the device was | ||
30 | enumerated from ACPI namespace. This handle can be used to extract other | ||
31 | device-specific configuration. There is an example of this below. | ||
32 | |||
33 | Platform bus support | ||
34 | ~~~~~~~~~~~~~~~~~~~~ | ||
35 | Since we are using platform devices to represent devices that are not | ||
36 | connected to any physical bus we only need to implement a platform driver | ||
37 | for the device and add supported ACPI IDs. If this same IP-block is used on | ||
38 | some other non-ACPI platform, the driver might work out of the box or needs | ||
39 | some minor changes. | ||
40 | |||
41 | Adding ACPI support for an existing driver should be pretty | ||
42 | straightforward. Here is the simplest example: | ||
43 | |||
44 | #ifdef CONFIG_ACPI | ||
45 | static struct acpi_device_id mydrv_acpi_match[] = { | ||
46 | /* ACPI IDs here */ | ||
47 | { } | ||
48 | }; | ||
49 | MODULE_DEVICE_TABLE(acpi, mydrv_acpi_match); | ||
50 | #endif | ||
51 | |||
52 | static struct platform_driver my_driver = { | ||
53 | ... | ||
54 | .driver = { | ||
55 | .acpi_match_table = ACPI_PTR(mydrv_acpi_match), | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | If the driver needs to perform more complex initialization like getting and | ||
60 | configuring GPIOs it can get its ACPI handle and extract this information | ||
61 | from ACPI tables. | ||
62 | |||
63 | Currently the kernel is not able to automatically determine from which ACPI | ||
64 | device it should make the corresponding platform device so we need to add | ||
65 | the ACPI device explicitly to acpi_platform_device_ids list defined in | ||
66 | drivers/acpi/scan.c. This limitation is only for the platform devices, SPI | ||
67 | and I2C devices are created automatically as described below. | ||
68 | |||
69 | SPI serial bus support | ||
70 | ~~~~~~~~~~~~~~~~~~~~~~ | ||
71 | Slave devices behind SPI bus have SpiSerialBus resource attached to them. | ||
72 | This is extracted automatically by the SPI core and the slave devices are | ||
73 | enumerated once spi_register_master() is called by the bus driver. | ||
74 | |||
75 | Here is what the ACPI namespace for a SPI slave might look like: | ||
76 | |||
77 | Device (EEP0) | ||
78 | { | ||
79 | Name (_ADR, 1) | ||
80 | Name (_CID, Package() { | ||
81 | "ATML0025", | ||
82 | "AT25", | ||
83 | }) | ||
84 | ... | ||
85 | Method (_CRS, 0, NotSerialized) | ||
86 | { | ||
87 | SPISerialBus(1, PolarityLow, FourWireMode, 8, | ||
88 | ControllerInitiated, 1000000, ClockPolarityLow, | ||
89 | ClockPhaseFirst, "\\_SB.PCI0.SPI1",) | ||
90 | } | ||
91 | ... | ||
92 | |||
93 | The SPI device drivers only need to add ACPI IDs in a similar way than with | ||
94 | the platform device drivers. Below is an example where we add ACPI support | ||
95 | to at25 SPI eeprom driver (this is meant for the above ACPI snippet): | ||
96 | |||
97 | #ifdef CONFIG_ACPI | ||
98 | static struct acpi_device_id at25_acpi_match[] = { | ||
99 | { "AT25", 0 }, | ||
100 | { }, | ||
101 | }; | ||
102 | MODULE_DEVICE_TABLE(acpi, at25_acpi_match); | ||
103 | #endif | ||
104 | |||
105 | static struct spi_driver at25_driver = { | ||
106 | .driver = { | ||
107 | ... | ||
108 | .acpi_match_table = ACPI_PTR(at25_acpi_match), | ||
109 | }, | ||
110 | }; | ||
111 | |||
112 | Note that this driver actually needs more information like page size of the | ||
113 | eeprom etc. but at the time writing this there is no standard way of | ||
114 | passing those. One idea is to return this in _DSM method like: | ||
115 | |||
116 | Device (EEP0) | ||
117 | { | ||
118 | ... | ||
119 | Method (_DSM, 4, NotSerialized) | ||
120 | { | ||
121 | Store (Package (6) | ||
122 | { | ||
123 | "byte-len", 1024, | ||
124 | "addr-mode", 2, | ||
125 | "page-size, 32 | ||
126 | }, Local0) | ||
127 | |||
128 | // Check UUIDs etc. | ||
129 | |||
130 | Return (Local0) | ||
131 | } | ||
132 | |||
133 | Then the at25 SPI driver can get this configation by calling _DSM on its | ||
134 | ACPI handle like: | ||
135 | |||
136 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
137 | struct acpi_object_list input; | ||
138 | acpi_status status; | ||
139 | |||
140 | /* Fill in the input buffer */ | ||
141 | |||
142 | status = acpi_evaluate_object(ACPI_HANDLE(&spi->dev), "_DSM", | ||
143 | &input, &output); | ||
144 | if (ACPI_FAILURE(status)) | ||
145 | /* Handle the error */ | ||
146 | |||
147 | /* Extract the data here */ | ||
148 | |||
149 | kfree(output.pointer); | ||
150 | |||
151 | I2C serial bus support | ||
152 | ~~~~~~~~~~~~~~~~~~~~~~ | ||
153 | The slaves behind I2C bus controller only need to add the ACPI IDs like | ||
154 | with the platform and SPI drivers. However the I2C bus controller driver | ||
155 | needs to call acpi_i2c_register_devices() after it has added the adapter. | ||
156 | |||
157 | An I2C bus (controller) driver does: | ||
158 | |||
159 | ... | ||
160 | ret = i2c_add_numbered_adapter(adapter); | ||
161 | if (ret) | ||
162 | /* handle error */ | ||
163 | |||
164 | of_i2c_register_devices(adapter); | ||
165 | /* Enumerate the slave devices behind this bus via ACPI */ | ||
166 | acpi_i2c_register_devices(adapter); | ||
167 | |||
168 | Below is an example of how to add ACPI support to the existing mpu3050 | ||
169 | input driver: | ||
170 | |||
171 | #ifdef CONFIG_ACPI | ||
172 | static struct acpi_device_id mpu3050_acpi_match[] = { | ||
173 | { "MPU3050", 0 }, | ||
174 | { }, | ||
175 | }; | ||
176 | MODULE_DEVICE_TABLE(acpi, mpu3050_acpi_match); | ||
177 | #endif | ||
178 | |||
179 | static struct i2c_driver mpu3050_i2c_driver = { | ||
180 | .driver = { | ||
181 | .name = "mpu3050", | ||
182 | .owner = THIS_MODULE, | ||
183 | .pm = &mpu3050_pm, | ||
184 | .of_match_table = mpu3050_of_match, | ||
185 | .acpi_match_table ACPI_PTR(mpu3050_acpi_match), | ||
186 | }, | ||
187 | .probe = mpu3050_probe, | ||
188 | .remove = __devexit_p(mpu3050_remove), | ||
189 | .id_table = mpu3050_ids, | ||
190 | }; | ||
191 | |||
192 | GPIO support | ||
193 | ~~~~~~~~~~~~ | ||
194 | ACPI 5 introduced two new resources to describe GPIO connections: GpioIo | ||
195 | and GpioInt. These resources are used be used to pass GPIO numbers used by | ||
196 | the device to the driver. For example: | ||
197 | |||
198 | Method (_CRS, 0, NotSerialized) | ||
199 | { | ||
200 | Name (SBUF, ResourceTemplate() | ||
201 | { | ||
202 | GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, | ||
203 | IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0", | ||
204 | 0x00, ResourceConsumer,,) | ||
205 | { | ||
206 | // Pin List | ||
207 | 0x0055 | ||
208 | } | ||
209 | ... | ||
210 | |||
211 | Return (SBUF) | ||
212 | } | ||
213 | } | ||
214 | |||
215 | These GPIO numbers are controller relative and path "\\_SB.PCI0.GPI0" | ||
216 | specifies the path to the controller. In order to use these GPIOs in Linux | ||
217 | we need to translate them to the Linux GPIO numbers. | ||
218 | |||
219 | The driver can do this by including <linux/acpi_gpio.h> and then calling | ||
220 | acpi_get_gpio(path, gpio). This will return the Linux GPIO number or | ||
221 | negative errno if there was no translation found. | ||
222 | |||
223 | Other GpioIo parameters must be converted first by the driver to be | ||
224 | suitable to the gpiolib before passing them. | ||
225 | |||
226 | In case of GpioInt resource an additional call to gpio_to_irq() must be | ||
227 | done before calling request_irq(). | ||
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt new file mode 100644 index 000000000000..f3d44984d91c --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt | |||
@@ -0,0 +1,42 @@ | |||
1 | SPEAr cpufreq driver | ||
2 | ------------------- | ||
3 | |||
4 | SPEAr SoC cpufreq driver for CPU frequency scaling. | ||
5 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems | ||
6 | which share clock across all CPUs. | ||
7 | |||
8 | Required properties: | ||
9 | - cpufreq_tbl: Table of frequencies CPU could be transitioned into, in the | ||
10 | increasing order. | ||
11 | |||
12 | Optional properties: | ||
13 | - clock-latency: Specify the possible maximum transition latency for clock, in | ||
14 | unit of nanoseconds. | ||
15 | |||
16 | Both required and optional properties listed above must be defined under node | ||
17 | /cpus/cpu@0. | ||
18 | |||
19 | Examples: | ||
20 | -------- | ||
21 | cpus { | ||
22 | |||
23 | <...> | ||
24 | |||
25 | cpu@0 { | ||
26 | compatible = "arm,cortex-a9"; | ||
27 | reg = <0>; | ||
28 | |||
29 | <...> | ||
30 | |||
31 | cpufreq_tbl = < 166000 | ||
32 | 200000 | ||
33 | 250000 | ||
34 | 300000 | ||
35 | 400000 | ||
36 | 500000 | ||
37 | 600000 >; | ||
38 | }; | ||
39 | |||
40 | <...> | ||
41 | |||
42 | }; | ||
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 17e130a80347..79a2a58425ee 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt | |||
@@ -99,7 +99,7 @@ reading the aggregated value does not require any locking mechanism. | |||
99 | 99 | ||
100 | From kernel mode the use of this interface is the following: | 100 | From kernel mode the use of this interface is the following: |
101 | 101 | ||
102 | int dev_pm_qos_add_request(device, handle, value): | 102 | int dev_pm_qos_add_request(device, handle, type, value): |
103 | Will insert an element into the list for that identified device with the | 103 | Will insert an element into the list for that identified device with the |
104 | target value. Upon change to this list the new target is recomputed and any | 104 | target value. Upon change to this list the new target is recomputed and any |
105 | registered notifiers are called only if the target value is now different. | 105 | registered notifiers are called only if the target value is now different. |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67f1fdbad7f9..672e0a2d5871 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -908,6 +908,7 @@ config ARCH_NOMADIK | |||
908 | 908 | ||
909 | config PLAT_SPEAR | 909 | config PLAT_SPEAR |
910 | bool "ST SPEAr" | 910 | bool "ST SPEAr" |
911 | select ARCH_HAS_CPUFREQ | ||
911 | select ARCH_REQUIRE_GPIOLIB | 912 | select ARCH_REQUIRE_GPIOLIB |
912 | select ARM_AMBA | 913 | select ARM_AMBA |
913 | select CLKDEV_LOOKUP | 914 | select CLKDEV_LOOKUP |
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index d05e78f6db94..f69c32ffbe6a 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h | |||
@@ -7,9 +7,6 @@ | |||
7 | #define _ASM_IA64_DEVICE_H | 7 | #define _ASM_IA64_DEVICE_H |
8 | 8 | ||
9 | struct dev_archdata { | 9 | struct dev_archdata { |
10 | #ifdef CONFIG_ACPI | ||
11 | void *acpi_handle; | ||
12 | #endif | ||
13 | #ifdef CONFIG_INTEL_IOMMU | 10 | #ifdef CONFIG_INTEL_IOMMU |
14 | void *iommu; /* hook for IOMMU specific extension */ | 11 | void *iommu; /* hook for IOMMU specific extension */ |
15 | #endif | 12 | #endif |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 440578850ae5..e9682f5be343 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -633,6 +633,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) | |||
633 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : | 633 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : |
634 | IOSAPIC_LEVEL); | 634 | IOSAPIC_LEVEL); |
635 | } | 635 | } |
636 | EXPORT_SYMBOL_GPL(acpi_register_gsi); | ||
636 | 637 | ||
637 | void acpi_unregister_gsi(u32 gsi) | 638 | void acpi_unregister_gsi(u32 gsi) |
638 | { | 639 | { |
@@ -644,6 +645,7 @@ void acpi_unregister_gsi(u32 gsi) | |||
644 | 645 | ||
645 | iosapic_unregister_intr(gsi); | 646 | iosapic_unregister_intr(gsi); |
646 | } | 647 | } |
648 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | ||
647 | 649 | ||
648 | static int __init acpi_parse_fadt(struct acpi_table_header *table) | 650 | static int __init acpi_parse_fadt(struct acpi_table_header *table) |
649 | { | 651 | { |
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 45d00e5fe14d..4d806b419606 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c | |||
@@ -36,7 +36,7 @@ static struct cpuidle_state *cpuidle_state_table; | |||
36 | static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) | 36 | static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) |
37 | { | 37 | { |
38 | 38 | ||
39 | *kt_before = ktime_get_real(); | 39 | *kt_before = ktime_get(); |
40 | *in_purr = mfspr(SPRN_PURR); | 40 | *in_purr = mfspr(SPRN_PURR); |
41 | /* | 41 | /* |
42 | * Indicate to the HV that we are idle. Now would be | 42 | * Indicate to the HV that we are idle. Now would be |
@@ -50,7 +50,7 @@ static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) | |||
50 | get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; | 50 | get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; |
51 | get_lppaca()->idle = 0; | 51 | get_lppaca()->idle = 0; |
52 | 52 | ||
53 | return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); | 53 | return ktime_to_us(ktime_sub(ktime_get(), kt_before)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static int snooze_loop(struct cpuidle_device *dev, | 56 | static int snooze_loop(struct cpuidle_device *dev, |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 93e1c55f14ab..03dd72957d2f 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -2,9 +2,6 @@ | |||
2 | #define _ASM_X86_DEVICE_H | 2 | #define _ASM_X86_DEVICE_H |
3 | 3 | ||
4 | struct dev_archdata { | 4 | struct dev_archdata { |
5 | #ifdef CONFIG_ACPI | ||
6 | void *acpi_handle; | ||
7 | #endif | ||
8 | #ifdef CONFIG_X86_DEV_DMA_OPS | 5 | #ifdef CONFIG_X86_DEV_DMA_OPS |
9 | struct dma_map_ops *dma_ops; | 6 | struct dma_map_ops *dma_ops; |
10 | #endif | 7 | #endif |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e651f7a589ac..e48cafcf92ae 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -574,6 +574,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
574 | 574 | ||
575 | return irq; | 575 | return irq; |
576 | } | 576 | } |
577 | EXPORT_SYMBOL_GPL(acpi_register_gsi); | ||
578 | |||
579 | void acpi_unregister_gsi(u32 gsi) | ||
580 | { | ||
581 | } | ||
582 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | ||
577 | 583 | ||
578 | void __init acpi_set_irq_model_pic(void) | 584 | void __init acpi_set_irq_model_pic(void) |
579 | { | 585 | { |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 11676cf65aee..d5e0d717005a 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -101,6 +101,8 @@ static int __init acpi_sleep_setup(char *str) | |||
101 | #endif | 101 | #endif |
102 | if (strncmp(str, "nonvs", 5) == 0) | 102 | if (strncmp(str, "nonvs", 5) == 0) |
103 | acpi_nvs_nosave(); | 103 | acpi_nvs_nosave(); |
104 | if (strncmp(str, "nonvs_s3", 8) == 0) | ||
105 | acpi_nvs_nosave_s3(); | ||
104 | if (strncmp(str, "old_ordering", 12) == 0) | 106 | if (strncmp(str, "old_ordering", 12) == 0) |
105 | acpi_old_suspend_ordering(); | 107 | acpi_old_suspend_ordering(); |
106 | str = strchr(str, ','); | 108 | str = strchr(str, ','); |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 119d58db8342..0300bf612946 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -181,6 +181,12 @@ config ACPI_DOCK | |||
181 | This driver supports ACPI-controlled docking stations and removable | 181 | This driver supports ACPI-controlled docking stations and removable |
182 | drive bays such as the IBM Ultrabay and the Dell Module Bay. | 182 | drive bays such as the IBM Ultrabay and the Dell Module Bay. |
183 | 183 | ||
184 | config ACPI_I2C | ||
185 | def_tristate I2C | ||
186 | depends on I2C | ||
187 | help | ||
188 | ACPI I2C enumeration support. | ||
189 | |||
184 | config ACPI_PROCESSOR | 190 | config ACPI_PROCESSOR |
185 | tristate "Processor" | 191 | tristate "Processor" |
186 | select THERMAL | 192 | select THERMAL |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 82422fe90f81..2a4502becd13 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -21,9 +21,10 @@ obj-y += acpi.o \ | |||
21 | acpi-y += osl.o utils.o reboot.o | 21 | acpi-y += osl.o utils.o reboot.o |
22 | acpi-y += nvs.o | 22 | acpi-y += nvs.o |
23 | 23 | ||
24 | # sleep related files | 24 | # Power management related files |
25 | acpi-y += wakeup.o | 25 | acpi-y += wakeup.o |
26 | acpi-y += sleep.o | 26 | acpi-y += sleep.o |
27 | acpi-$(CONFIG_PM) += device_pm.o | ||
27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o | 28 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o |
28 | 29 | ||
29 | 30 | ||
@@ -32,10 +33,12 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o | |||
32 | # | 33 | # |
33 | acpi-y += bus.o glue.o | 34 | acpi-y += bus.o glue.o |
34 | acpi-y += scan.o | 35 | acpi-y += scan.o |
36 | acpi-y += resource.o | ||
35 | acpi-y += processor_core.o | 37 | acpi-y += processor_core.o |
36 | acpi-y += ec.o | 38 | acpi-y += ec.o |
37 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 39 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
38 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o | 40 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o |
41 | acpi-y += acpi_platform.o | ||
39 | acpi-y += power.o | 42 | acpi-y += power.o |
40 | acpi-y += event.o | 43 | acpi-y += event.o |
41 | acpi-y += sysfs.o | 44 | acpi-y += sysfs.o |
@@ -67,6 +70,7 @@ obj-$(CONFIG_ACPI_HED) += hed.o | |||
67 | obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o | 70 | obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o |
68 | obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o | 71 | obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o |
69 | obj-$(CONFIG_ACPI_BGRT) += bgrt.o | 72 | obj-$(CONFIG_ACPI_BGRT) += bgrt.o |
73 | obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o | ||
70 | 74 | ||
71 | # processor has its own "processor." module_param namespace | 75 | # processor has its own "processor." module_param namespace |
72 | processor-y := processor_driver.o processor_throttling.o | 76 | processor-y := processor_driver.o processor_throttling.o |
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c new file mode 100644 index 000000000000..82045e3f5cac --- /dev/null +++ b/drivers/acpi/acpi_i2c.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * ACPI I2C enumeration support | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/acpi.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/export.h> | ||
15 | #include <linux/i2c.h> | ||
16 | #include <linux/ioport.h> | ||
17 | |||
18 | ACPI_MODULE_NAME("i2c"); | ||
19 | |||
20 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
21 | { | ||
22 | struct i2c_board_info *info = data; | ||
23 | |||
24 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
25 | struct acpi_resource_i2c_serialbus *sb; | ||
26 | |||
27 | sb = &ares->data.i2c_serial_bus; | ||
28 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
29 | info->addr = sb->slave_address; | ||
30 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
31 | info->flags |= I2C_CLIENT_TEN; | ||
32 | } | ||
33 | } else if (info->irq < 0) { | ||
34 | struct resource r; | ||
35 | |||
36 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
37 | info->irq = r.start; | ||
38 | } | ||
39 | |||
40 | /* Tell the ACPI core to skip this resource */ | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
45 | void *data, void **return_value) | ||
46 | { | ||
47 | struct i2c_adapter *adapter = data; | ||
48 | struct list_head resource_list; | ||
49 | struct i2c_board_info info; | ||
50 | struct acpi_device *adev; | ||
51 | int ret; | ||
52 | |||
53 | if (acpi_bus_get_device(handle, &adev)) | ||
54 | return AE_OK; | ||
55 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
56 | return AE_OK; | ||
57 | |||
58 | memset(&info, 0, sizeof(info)); | ||
59 | info.acpi_node.handle = handle; | ||
60 | info.irq = -1; | ||
61 | |||
62 | INIT_LIST_HEAD(&resource_list); | ||
63 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
64 | acpi_i2c_add_resource, &info); | ||
65 | acpi_dev_free_resource_list(&resource_list); | ||
66 | |||
67 | if (ret < 0 || !info.addr) | ||
68 | return AE_OK; | ||
69 | |||
70 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
71 | if (!i2c_new_device(adapter, &info)) { | ||
72 | dev_err(&adapter->dev, | ||
73 | "failed to add I2C device %s from ACPI\n", | ||
74 | dev_name(&adev->dev)); | ||
75 | } | ||
76 | |||
77 | return AE_OK; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
82 | * @adapter: pointer to adapter | ||
83 | * | ||
84 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
85 | * namespace. When a device is found it will be added to the Linux device | ||
86 | * model and bound to the corresponding ACPI handle. | ||
87 | */ | ||
88 | void acpi_i2c_register_devices(struct i2c_adapter *adapter) | ||
89 | { | ||
90 | acpi_handle handle; | ||
91 | acpi_status status; | ||
92 | |||
93 | handle = ACPI_HANDLE(&adapter->dev); | ||
94 | if (!handle) | ||
95 | return; | ||
96 | |||
97 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
98 | acpi_i2c_add_device, NULL, | ||
99 | adapter, NULL); | ||
100 | if (ACPI_FAILURE(status)) | ||
101 | dev_warn(&adapter->dev, "failed to enumerate I2C slaves\n"); | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(acpi_i2c_register_devices); | ||
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 24c807f96636..eb30e5ab4cab 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/memory_hotplug.h> | 32 | #include <linux/memory_hotplug.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/acpi.h> | ||
34 | #include <acpi/acpi_drivers.h> | 35 | #include <acpi/acpi_drivers.h> |
35 | 36 | ||
36 | #define ACPI_MEMORY_DEVICE_CLASS "memory" | 37 | #define ACPI_MEMORY_DEVICE_CLASS "memory" |
@@ -78,6 +79,7 @@ struct acpi_memory_info { | |||
78 | unsigned short caching; /* memory cache attribute */ | 79 | unsigned short caching; /* memory cache attribute */ |
79 | unsigned short write_protect; /* memory read/write attribute */ | 80 | unsigned short write_protect; /* memory read/write attribute */ |
80 | unsigned int enabled:1; | 81 | unsigned int enabled:1; |
82 | unsigned int failed:1; | ||
81 | }; | 83 | }; |
82 | 84 | ||
83 | struct acpi_memory_device { | 85 | struct acpi_memory_device { |
@@ -86,8 +88,6 @@ struct acpi_memory_device { | |||
86 | struct list_head res_list; | 88 | struct list_head res_list; |
87 | }; | 89 | }; |
88 | 90 | ||
89 | static int acpi_hotmem_initialized; | ||
90 | |||
91 | static acpi_status | 91 | static acpi_status |
92 | acpi_memory_get_resource(struct acpi_resource *resource, void *context) | 92 | acpi_memory_get_resource(struct acpi_resource *resource, void *context) |
93 | { | 93 | { |
@@ -125,12 +125,20 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) | |||
125 | return AE_OK; | 125 | return AE_OK; |
126 | } | 126 | } |
127 | 127 | ||
128 | static void | ||
129 | acpi_memory_free_device_resources(struct acpi_memory_device *mem_device) | ||
130 | { | ||
131 | struct acpi_memory_info *info, *n; | ||
132 | |||
133 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) | ||
134 | kfree(info); | ||
135 | INIT_LIST_HEAD(&mem_device->res_list); | ||
136 | } | ||
137 | |||
128 | static int | 138 | static int |
129 | acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) | 139 | acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) |
130 | { | 140 | { |
131 | acpi_status status; | 141 | acpi_status status; |
132 | struct acpi_memory_info *info, *n; | ||
133 | |||
134 | 142 | ||
135 | if (!list_empty(&mem_device->res_list)) | 143 | if (!list_empty(&mem_device->res_list)) |
136 | return 0; | 144 | return 0; |
@@ -138,9 +146,7 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) | |||
138 | status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, | 146 | status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, |
139 | acpi_memory_get_resource, mem_device); | 147 | acpi_memory_get_resource, mem_device); |
140 | if (ACPI_FAILURE(status)) { | 148 | if (ACPI_FAILURE(status)) { |
141 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) | 149 | acpi_memory_free_device_resources(mem_device); |
142 | kfree(info); | ||
143 | INIT_LIST_HEAD(&mem_device->res_list); | ||
144 | return -EINVAL; | 150 | return -EINVAL; |
145 | } | 151 | } |
146 | 152 | ||
@@ -170,7 +176,7 @@ acpi_memory_get_device(acpi_handle handle, | |||
170 | /* Get the parent device */ | 176 | /* Get the parent device */ |
171 | result = acpi_bus_get_device(phandle, &pdevice); | 177 | result = acpi_bus_get_device(phandle, &pdevice); |
172 | if (result) { | 178 | if (result) { |
173 | printk(KERN_WARNING PREFIX "Cannot get acpi bus device"); | 179 | acpi_handle_warn(phandle, "Cannot get acpi bus device\n"); |
174 | return -EINVAL; | 180 | return -EINVAL; |
175 | } | 181 | } |
176 | 182 | ||
@@ -180,14 +186,14 @@ acpi_memory_get_device(acpi_handle handle, | |||
180 | */ | 186 | */ |
181 | result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); | 187 | result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); |
182 | if (result) { | 188 | if (result) { |
183 | printk(KERN_WARNING PREFIX "Cannot add acpi bus"); | 189 | acpi_handle_warn(handle, "Cannot add acpi bus\n"); |
184 | return -EINVAL; | 190 | return -EINVAL; |
185 | } | 191 | } |
186 | 192 | ||
187 | end: | 193 | end: |
188 | *mem_device = acpi_driver_data(device); | 194 | *mem_device = acpi_driver_data(device); |
189 | if (!(*mem_device)) { | 195 | if (!(*mem_device)) { |
190 | printk(KERN_ERR "\n driver data not found"); | 196 | dev_err(&device->dev, "driver data not found\n"); |
191 | return -ENODEV; | 197 | return -ENODEV; |
192 | } | 198 | } |
193 | 199 | ||
@@ -224,7 +230,8 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
224 | /* Get the range from the _CRS */ | 230 | /* Get the range from the _CRS */ |
225 | result = acpi_memory_get_device_resources(mem_device); | 231 | result = acpi_memory_get_device_resources(mem_device); |
226 | if (result) { | 232 | if (result) { |
227 | printk(KERN_ERR PREFIX "get_device_resources failed\n"); | 233 | dev_err(&mem_device->device->dev, |
234 | "get_device_resources failed\n"); | ||
228 | mem_device->state = MEMORY_INVALID_STATE; | 235 | mem_device->state = MEMORY_INVALID_STATE; |
229 | return result; | 236 | return result; |
230 | } | 237 | } |
@@ -251,13 +258,27 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
251 | node = memory_add_physaddr_to_nid(info->start_addr); | 258 | node = memory_add_physaddr_to_nid(info->start_addr); |
252 | 259 | ||
253 | result = add_memory(node, info->start_addr, info->length); | 260 | result = add_memory(node, info->start_addr, info->length); |
254 | if (result) | 261 | |
262 | /* | ||
263 | * If the memory block has been used by the kernel, add_memory() | ||
264 | * returns -EEXIST. If add_memory() returns the other error, it | ||
265 | * means that this memory block is not used by the kernel. | ||
266 | */ | ||
267 | if (result && result != -EEXIST) { | ||
268 | info->failed = 1; | ||
255 | continue; | 269 | continue; |
256 | info->enabled = 1; | 270 | } |
271 | |||
272 | if (!result) | ||
273 | info->enabled = 1; | ||
274 | /* | ||
275 | * Add num_enable even if add_memory() returns -EEXIST, so the | ||
276 | * device is bound to this driver. | ||
277 | */ | ||
257 | num_enabled++; | 278 | num_enabled++; |
258 | } | 279 | } |
259 | if (!num_enabled) { | 280 | if (!num_enabled) { |
260 | printk(KERN_ERR PREFIX "add_memory failed\n"); | 281 | dev_err(&mem_device->device->dev, "add_memory failed\n"); |
261 | mem_device->state = MEMORY_INVALID_STATE; | 282 | mem_device->state = MEMORY_INVALID_STATE; |
262 | return -EINVAL; | 283 | return -EINVAL; |
263 | } | 284 | } |
@@ -272,68 +293,31 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
272 | return 0; | 293 | return 0; |
273 | } | 294 | } |
274 | 295 | ||
275 | static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) | 296 | static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device) |
276 | { | 297 | { |
277 | acpi_status status; | 298 | int result = 0; |
278 | struct acpi_object_list arg_list; | 299 | struct acpi_memory_info *info, *n; |
279 | union acpi_object arg; | ||
280 | unsigned long long current_status; | ||
281 | |||
282 | |||
283 | /* Issue the _EJ0 command */ | ||
284 | arg_list.count = 1; | ||
285 | arg_list.pointer = &arg; | ||
286 | arg.type = ACPI_TYPE_INTEGER; | ||
287 | arg.integer.value = 1; | ||
288 | status = acpi_evaluate_object(mem_device->device->handle, | ||
289 | "_EJ0", &arg_list, NULL); | ||
290 | /* Return on _EJ0 failure */ | ||
291 | if (ACPI_FAILURE(status)) { | ||
292 | ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed")); | ||
293 | return -ENODEV; | ||
294 | } | ||
295 | |||
296 | /* Evalute _STA to check if the device is disabled */ | ||
297 | status = acpi_evaluate_integer(mem_device->device->handle, "_STA", | ||
298 | NULL, ¤t_status); | ||
299 | if (ACPI_FAILURE(status)) | ||
300 | return -ENODEV; | ||
301 | |||
302 | /* Check for device status. Device should be disabled */ | ||
303 | if (current_status & ACPI_STA_DEVICE_ENABLED) | ||
304 | return -EINVAL; | ||
305 | 300 | ||
306 | return 0; | 301 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) { |
307 | } | 302 | if (info->failed) |
303 | /* The kernel does not use this memory block */ | ||
304 | continue; | ||
308 | 305 | ||
309 | static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) | 306 | if (!info->enabled) |
310 | { | 307 | /* |
311 | int result; | 308 | * The kernel uses this memory block, but it may be not |
312 | struct acpi_memory_info *info, *n; | 309 | * managed by us. |
310 | */ | ||
311 | return -EBUSY; | ||
313 | 312 | ||
313 | result = remove_memory(info->start_addr, info->length); | ||
314 | if (result) | ||
315 | return result; | ||
314 | 316 | ||
315 | /* | 317 | list_del(&info->list); |
316 | * Ask the VM to offline this memory range. | ||
317 | * Note: Assume that this function returns zero on success | ||
318 | */ | ||
319 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) { | ||
320 | if (info->enabled) { | ||
321 | result = remove_memory(info->start_addr, info->length); | ||
322 | if (result) | ||
323 | return result; | ||
324 | } | ||
325 | kfree(info); | 318 | kfree(info); |
326 | } | 319 | } |
327 | 320 | ||
328 | /* Power-off and eject the device */ | ||
329 | result = acpi_memory_powerdown_device(mem_device); | ||
330 | if (result) { | ||
331 | /* Set the status of the device to invalid */ | ||
332 | mem_device->state = MEMORY_INVALID_STATE; | ||
333 | return result; | ||
334 | } | ||
335 | |||
336 | mem_device->state = MEMORY_POWER_OFF_STATE; | ||
337 | return result; | 321 | return result; |
338 | } | 322 | } |
339 | 323 | ||
@@ -341,6 +325,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | |||
341 | { | 325 | { |
342 | struct acpi_memory_device *mem_device; | 326 | struct acpi_memory_device *mem_device; |
343 | struct acpi_device *device; | 327 | struct acpi_device *device; |
328 | struct acpi_eject_event *ej_event = NULL; | ||
344 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ | 329 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ |
345 | 330 | ||
346 | switch (event) { | 331 | switch (event) { |
@@ -353,7 +338,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | |||
353 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 338 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
354 | "\nReceived DEVICE CHECK notification for device\n")); | 339 | "\nReceived DEVICE CHECK notification for device\n")); |
355 | if (acpi_memory_get_device(handle, &mem_device)) { | 340 | if (acpi_memory_get_device(handle, &mem_device)) { |
356 | printk(KERN_ERR PREFIX "Cannot find driver data\n"); | 341 | acpi_handle_err(handle, "Cannot find driver data\n"); |
357 | break; | 342 | break; |
358 | } | 343 | } |
359 | 344 | ||
@@ -361,7 +346,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | |||
361 | break; | 346 | break; |
362 | 347 | ||
363 | if (acpi_memory_enable_device(mem_device)) { | 348 | if (acpi_memory_enable_device(mem_device)) { |
364 | printk(KERN_ERR PREFIX "Cannot enable memory device\n"); | 349 | acpi_handle_err(handle,"Cannot enable memory device\n"); |
365 | break; | 350 | break; |
366 | } | 351 | } |
367 | 352 | ||
@@ -373,40 +358,28 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | |||
373 | "\nReceived EJECT REQUEST notification for device\n")); | 358 | "\nReceived EJECT REQUEST notification for device\n")); |
374 | 359 | ||
375 | if (acpi_bus_get_device(handle, &device)) { | 360 | if (acpi_bus_get_device(handle, &device)) { |
376 | printk(KERN_ERR PREFIX "Device doesn't exist\n"); | 361 | acpi_handle_err(handle, "Device doesn't exist\n"); |
377 | break; | 362 | break; |
378 | } | 363 | } |
379 | mem_device = acpi_driver_data(device); | 364 | mem_device = acpi_driver_data(device); |
380 | if (!mem_device) { | 365 | if (!mem_device) { |
381 | printk(KERN_ERR PREFIX "Driver Data is NULL\n"); | 366 | acpi_handle_err(handle, "Driver Data is NULL\n"); |
382 | break; | 367 | break; |
383 | } | 368 | } |
384 | 369 | ||
385 | /* | 370 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); |
386 | * Currently disabling memory device from kernel mode | 371 | if (!ej_event) { |
387 | * TBD: Can also be disabled from user mode scripts | 372 | pr_err(PREFIX "No memory, dropping EJECT\n"); |
388 | * TBD: Can also be disabled by Callback registration | ||
389 | * with generic sysfs driver | ||
390 | */ | ||
391 | if (acpi_memory_disable_device(mem_device)) { | ||
392 | printk(KERN_ERR PREFIX "Disable memory device\n"); | ||
393 | /* | ||
394 | * If _EJ0 was called but failed, _OST is not | ||
395 | * necessary. | ||
396 | */ | ||
397 | if (mem_device->state == MEMORY_INVALID_STATE) | ||
398 | return; | ||
399 | |||
400 | break; | 373 | break; |
401 | } | 374 | } |
402 | 375 | ||
403 | /* | 376 | ej_event->handle = handle; |
404 | * TBD: Invoke acpi_bus_remove to cleanup data structures | 377 | ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; |
405 | */ | 378 | acpi_os_hotplug_execute(acpi_bus_hot_remove_device, |
379 | (void *)ej_event); | ||
406 | 380 | ||
407 | /* _EJ0 succeeded; _OST is not necessary */ | 381 | /* eject is performed asynchronously */ |
408 | return; | 382 | return; |
409 | |||
410 | default: | 383 | default: |
411 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 384 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
412 | "Unsupported event [0x%x]\n", event)); | 385 | "Unsupported event [0x%x]\n", event)); |
@@ -420,6 +393,15 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | |||
420 | return; | 393 | return; |
421 | } | 394 | } |
422 | 395 | ||
396 | static void acpi_memory_device_free(struct acpi_memory_device *mem_device) | ||
397 | { | ||
398 | if (!mem_device) | ||
399 | return; | ||
400 | |||
401 | acpi_memory_free_device_resources(mem_device); | ||
402 | kfree(mem_device); | ||
403 | } | ||
404 | |||
423 | static int acpi_memory_device_add(struct acpi_device *device) | 405 | static int acpi_memory_device_add(struct acpi_device *device) |
424 | { | 406 | { |
425 | int result; | 407 | int result; |
@@ -449,23 +431,16 @@ static int acpi_memory_device_add(struct acpi_device *device) | |||
449 | /* Set the device state */ | 431 | /* Set the device state */ |
450 | mem_device->state = MEMORY_POWER_ON_STATE; | 432 | mem_device->state = MEMORY_POWER_ON_STATE; |
451 | 433 | ||
452 | printk(KERN_DEBUG "%s \n", acpi_device_name(device)); | 434 | pr_debug("%s\n", acpi_device_name(device)); |
453 | |||
454 | /* | ||
455 | * Early boot code has recognized memory area by EFI/E820. | ||
456 | * If DSDT shows these memory devices on boot, hotplug is not necessary | ||
457 | * for them. So, it just returns until completion of this driver's | ||
458 | * start up. | ||
459 | */ | ||
460 | if (!acpi_hotmem_initialized) | ||
461 | return 0; | ||
462 | 435 | ||
463 | if (!acpi_memory_check_device(mem_device)) { | 436 | if (!acpi_memory_check_device(mem_device)) { |
464 | /* call add_memory func */ | 437 | /* call add_memory func */ |
465 | result = acpi_memory_enable_device(mem_device); | 438 | result = acpi_memory_enable_device(mem_device); |
466 | if (result) | 439 | if (result) { |
467 | printk(KERN_ERR PREFIX | 440 | dev_err(&device->dev, |
468 | "Error in acpi_memory_enable_device\n"); | 441 | "Error in acpi_memory_enable_device\n"); |
442 | acpi_memory_device_free(mem_device); | ||
443 | } | ||
469 | } | 444 | } |
470 | return result; | 445 | return result; |
471 | } | 446 | } |
@@ -473,13 +448,18 @@ static int acpi_memory_device_add(struct acpi_device *device) | |||
473 | static int acpi_memory_device_remove(struct acpi_device *device, int type) | 448 | static int acpi_memory_device_remove(struct acpi_device *device, int type) |
474 | { | 449 | { |
475 | struct acpi_memory_device *mem_device = NULL; | 450 | struct acpi_memory_device *mem_device = NULL; |
476 | 451 | int result; | |
477 | 452 | ||
478 | if (!device || !acpi_driver_data(device)) | 453 | if (!device || !acpi_driver_data(device)) |
479 | return -EINVAL; | 454 | return -EINVAL; |
480 | 455 | ||
481 | mem_device = acpi_driver_data(device); | 456 | mem_device = acpi_driver_data(device); |
482 | kfree(mem_device); | 457 | |
458 | result = acpi_memory_remove_memory(mem_device); | ||
459 | if (result) | ||
460 | return result; | ||
461 | |||
462 | acpi_memory_device_free(mem_device); | ||
483 | 463 | ||
484 | return 0; | 464 | return 0; |
485 | } | 465 | } |
@@ -568,7 +548,6 @@ static int __init acpi_memory_device_init(void) | |||
568 | return -ENODEV; | 548 | return -ENODEV; |
569 | } | 549 | } |
570 | 550 | ||
571 | acpi_hotmem_initialized = 1; | ||
572 | return 0; | 551 | return 0; |
573 | } | 552 | } |
574 | 553 | ||
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index af4aad6ee2eb..16fa979f7180 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
@@ -286,7 +286,7 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev, | |||
286 | struct device_attribute *attr, const char *buf, size_t count) | 286 | struct device_attribute *attr, const char *buf, size_t count) |
287 | { | 287 | { |
288 | unsigned long num; | 288 | unsigned long num; |
289 | if (strict_strtoul(buf, 0, &num)) | 289 | if (kstrtoul(buf, 0, &num)) |
290 | return -EINVAL; | 290 | return -EINVAL; |
291 | if (num < 1 || num >= 100) | 291 | if (num < 1 || num >= 100) |
292 | return -EINVAL; | 292 | return -EINVAL; |
@@ -309,7 +309,7 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev, | |||
309 | struct device_attribute *attr, const char *buf, size_t count) | 309 | struct device_attribute *attr, const char *buf, size_t count) |
310 | { | 310 | { |
311 | unsigned long num; | 311 | unsigned long num; |
312 | if (strict_strtoul(buf, 0, &num)) | 312 | if (kstrtoul(buf, 0, &num)) |
313 | return -EINVAL; | 313 | return -EINVAL; |
314 | if (num < 1 || num >= 100) | 314 | if (num < 1 || num >= 100) |
315 | return -EINVAL; | 315 | return -EINVAL; |
@@ -332,7 +332,7 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev, | |||
332 | struct device_attribute *attr, const char *buf, size_t count) | 332 | struct device_attribute *attr, const char *buf, size_t count) |
333 | { | 333 | { |
334 | unsigned long num; | 334 | unsigned long num; |
335 | if (strict_strtoul(buf, 0, &num)) | 335 | if (kstrtoul(buf, 0, &num)) |
336 | return -EINVAL; | 336 | return -EINVAL; |
337 | mutex_lock(&isolated_cpus_lock); | 337 | mutex_lock(&isolated_cpus_lock); |
338 | acpi_pad_idle_cpus(num); | 338 | acpi_pad_idle_cpus(num); |
@@ -457,7 +457,7 @@ static void acpi_pad_notify(acpi_handle handle, u32 event, | |||
457 | dev_name(&device->dev), event, 0); | 457 | dev_name(&device->dev), event, 0); |
458 | break; | 458 | break; |
459 | default: | 459 | default: |
460 | printk(KERN_WARNING "Unsupported event [0x%x]\n", event); | 460 | pr_warn("Unsupported event [0x%x]\n", event); |
461 | break; | 461 | break; |
462 | } | 462 | } |
463 | } | 463 | } |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c new file mode 100644 index 000000000000..db129b9f52cb --- /dev/null +++ b/drivers/acpi/acpi_platform.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * ACPI support for platform bus type. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
6 | * Mathias Nyman <mathias.nyman@linux.intel.com> | ||
7 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include "internal.h" | ||
21 | |||
22 | ACPI_MODULE_NAME("platform"); | ||
23 | |||
24 | /** | ||
25 | * acpi_create_platform_device - Create platform device for ACPI device node | ||
26 | * @adev: ACPI device node to create a platform device for. | ||
27 | * | ||
28 | * Check if the given @adev can be represented as a platform device and, if | ||
29 | * that's the case, create and register a platform device, populate its common | ||
30 | * resources and returns a pointer to it. Otherwise, return %NULL. | ||
31 | * | ||
32 | * The platform device's name will be taken from the @adev's _HID and _UID. | ||
33 | */ | ||
34 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | ||
35 | { | ||
36 | struct platform_device *pdev = NULL; | ||
37 | struct acpi_device *acpi_parent; | ||
38 | struct platform_device_info pdevinfo; | ||
39 | struct resource_list_entry *rentry; | ||
40 | struct list_head resource_list; | ||
41 | struct resource *resources; | ||
42 | int count; | ||
43 | |||
44 | /* If the ACPI node already has a physical device attached, skip it. */ | ||
45 | if (adev->physical_node_count) | ||
46 | return NULL; | ||
47 | |||
48 | INIT_LIST_HEAD(&resource_list); | ||
49 | count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); | ||
50 | if (count <= 0) | ||
51 | return NULL; | ||
52 | |||
53 | resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL); | ||
54 | if (!resources) { | ||
55 | dev_err(&adev->dev, "No memory for resources\n"); | ||
56 | acpi_dev_free_resource_list(&resource_list); | ||
57 | return NULL; | ||
58 | } | ||
59 | count = 0; | ||
60 | list_for_each_entry(rentry, &resource_list, node) | ||
61 | resources[count++] = rentry->res; | ||
62 | |||
63 | acpi_dev_free_resource_list(&resource_list); | ||
64 | |||
65 | memset(&pdevinfo, 0, sizeof(pdevinfo)); | ||
66 | /* | ||
67 | * If the ACPI node has a parent and that parent has a physical device | ||
68 | * attached to it, that physical device should be the parent of the | ||
69 | * platform device we are about to create. | ||
70 | */ | ||
71 | pdevinfo.parent = NULL; | ||
72 | acpi_parent = adev->parent; | ||
73 | if (acpi_parent) { | ||
74 | struct acpi_device_physical_node *entry; | ||
75 | struct list_head *list; | ||
76 | |||
77 | mutex_lock(&acpi_parent->physical_node_lock); | ||
78 | list = &acpi_parent->physical_node_list; | ||
79 | if (!list_empty(list)) { | ||
80 | entry = list_first_entry(list, | ||
81 | struct acpi_device_physical_node, | ||
82 | node); | ||
83 | pdevinfo.parent = entry->dev; | ||
84 | } | ||
85 | mutex_unlock(&acpi_parent->physical_node_lock); | ||
86 | } | ||
87 | pdevinfo.name = dev_name(&adev->dev); | ||
88 | pdevinfo.id = -1; | ||
89 | pdevinfo.res = resources; | ||
90 | pdevinfo.num_res = count; | ||
91 | pdevinfo.acpi_node.handle = adev->handle; | ||
92 | pdev = platform_device_register_full(&pdevinfo); | ||
93 | if (IS_ERR(pdev)) { | ||
94 | dev_err(&adev->dev, "platform device creation failed: %ld\n", | ||
95 | PTR_ERR(pdev)); | ||
96 | pdev = NULL; | ||
97 | } else { | ||
98 | dev_dbg(&adev->dev, "created platform device %s\n", | ||
99 | dev_name(&pdev->dev)); | ||
100 | } | ||
101 | |||
102 | kfree(resources); | ||
103 | return pdev; | ||
104 | } | ||
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 7f1d40797e80..c8bc24bd1f72 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile | |||
@@ -161,3 +161,6 @@ acpi-y += \ | |||
161 | utxfinit.o \ | 161 | utxfinit.o \ |
162 | utxferror.o \ | 162 | utxferror.o \ |
163 | utxfmutex.o | 163 | utxfmutex.o |
164 | |||
165 | acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o | ||
166 | |||
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 5e8abb07724f..432a318c9ed1 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h | |||
@@ -44,17 +44,28 @@ | |||
44 | #ifndef __ACDEBUG_H__ | 44 | #ifndef __ACDEBUG_H__ |
45 | #define __ACDEBUG_H__ | 45 | #define __ACDEBUG_H__ |
46 | 46 | ||
47 | #define ACPI_DEBUG_BUFFER_SIZE 4196 | 47 | #define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ |
48 | 48 | ||
49 | struct command_info { | 49 | struct acpi_db_command_info { |
50 | char *name; /* Command Name */ | 50 | char *name; /* Command Name */ |
51 | u8 min_args; /* Minimum arguments required */ | 51 | u8 min_args; /* Minimum arguments required */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct argument_info { | 54 | struct acpi_db_command_help { |
55 | u8 line_count; /* Number of help lines */ | ||
56 | char *invocation; /* Command Invocation */ | ||
57 | char *description; /* Command Description */ | ||
58 | }; | ||
59 | |||
60 | struct acpi_db_argument_info { | ||
55 | char *name; /* Argument Name */ | 61 | char *name; /* Argument Name */ |
56 | }; | 62 | }; |
57 | 63 | ||
64 | struct acpi_db_execute_walk { | ||
65 | u32 count; | ||
66 | u32 max_count; | ||
67 | }; | ||
68 | |||
58 | #define PARAM_LIST(pl) pl | 69 | #define PARAM_LIST(pl) pl |
59 | #define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) | 70 | #define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) |
60 | #define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ | 71 | #define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ |
@@ -77,59 +88,71 @@ acpi_db_single_step(struct acpi_walk_state *walk_state, | |||
77 | /* | 88 | /* |
78 | * dbcmds - debug commands and output routines | 89 | * dbcmds - debug commands and output routines |
79 | */ | 90 | */ |
80 | acpi_status acpi_db_disassemble_method(char *name); | 91 | struct acpi_namespace_node *acpi_db_convert_to_node(char *in_string); |
81 | 92 | ||
82 | void acpi_db_display_table_info(char *table_arg); | 93 | void acpi_db_display_table_info(char *table_arg); |
83 | 94 | ||
84 | void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg); | 95 | void acpi_db_display_template(char *buffer_arg); |
85 | 96 | ||
86 | void | 97 | void acpi_db_unload_acpi_table(char *name); |
87 | acpi_db_set_method_breakpoint(char *location, | ||
88 | struct acpi_walk_state *walk_state, | ||
89 | union acpi_parse_object *op); | ||
90 | 98 | ||
91 | void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); | 99 | void acpi_db_send_notify(char *name, u32 value); |
92 | 100 | ||
93 | void acpi_db_get_bus_info(void); | 101 | void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); |
94 | 102 | ||
95 | void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); | 103 | acpi_status acpi_db_sleep(char *object_arg); |
96 | 104 | ||
97 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); | 105 | void acpi_db_display_locks(void); |
98 | 106 | ||
99 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); | 107 | void acpi_db_display_resources(char *object_arg); |
100 | 108 | ||
101 | void acpi_db_send_notify(char *name, u32 value); | 109 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) |
110 | |||
111 | void acpi_db_display_handlers(void); | ||
112 | |||
113 | ACPI_HW_DEPENDENT_RETURN_VOID(void | ||
114 | acpi_db_generate_gpe(char *gpe_arg, | ||
115 | char *block_arg)) | ||
116 | |||
117 | /* | ||
118 | * dbmethod - control method commands | ||
119 | */ | ||
120 | void | ||
121 | acpi_db_set_method_breakpoint(char *location, | ||
122 | struct acpi_walk_state *walk_state, | ||
123 | union acpi_parse_object *op); | ||
124 | |||
125 | void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); | ||
102 | 126 | ||
103 | void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg); | 127 | void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg); |
104 | 128 | ||
105 | acpi_status | 129 | acpi_status acpi_db_disassemble_method(char *name); |
106 | acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); | ||
107 | 130 | ||
108 | void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); | 131 | void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); |
109 | 132 | ||
110 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); | 133 | void acpi_db_batch_execute(char *count_arg); |
111 | 134 | ||
135 | /* | ||
136 | * dbnames - namespace commands | ||
137 | */ | ||
112 | void acpi_db_set_scope(char *name); | 138 | void acpi_db_set_scope(char *name); |
113 | 139 | ||
114 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg)) | 140 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); |
115 | 141 | ||
116 | void acpi_db_find_references(char *object_arg); | 142 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); |
117 | 143 | ||
118 | void acpi_db_display_locks(void); | 144 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); |
119 | 145 | ||
120 | void acpi_db_display_resources(char *object_arg); | 146 | void acpi_db_check_predefined_names(void); |
121 | 147 | ||
122 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) | 148 | acpi_status |
149 | acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); | ||
123 | 150 | ||
124 | void acpi_db_check_integrity(void); | 151 | void acpi_db_check_integrity(void); |
125 | 152 | ||
126 | ACPI_HW_DEPENDENT_RETURN_VOID(void | 153 | void acpi_db_find_references(char *object_arg); |
127 | acpi_db_generate_gpe(char *gpe_arg, | ||
128 | char *block_arg)) | ||
129 | |||
130 | void acpi_db_check_predefined_names(void); | ||
131 | 154 | ||
132 | void acpi_db_batch_execute(void); | 155 | void acpi_db_get_bus_info(void); |
133 | 156 | ||
134 | /* | 157 | /* |
135 | * dbdisply - debug display commands | 158 | * dbdisply - debug display commands |
@@ -161,7 +184,8 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc, | |||
161 | /* | 184 | /* |
162 | * dbexec - debugger control method execution | 185 | * dbexec - debugger control method execution |
163 | */ | 186 | */ |
164 | void acpi_db_execute(char *name, char **args, u32 flags); | 187 | void |
188 | acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags); | ||
165 | 189 | ||
166 | void | 190 | void |
167 | acpi_db_create_execution_threads(char *num_threads_arg, | 191 | acpi_db_create_execution_threads(char *num_threads_arg, |
@@ -175,7 +199,8 @@ u32 acpi_db_get_cache_info(struct acpi_memory_list *cache); | |||
175 | * dbfileio - Debugger file I/O commands | 199 | * dbfileio - Debugger file I/O commands |
176 | */ | 200 | */ |
177 | acpi_object_type | 201 | acpi_object_type |
178 | acpi_db_match_argument(char *user_argument, struct argument_info *arguments); | 202 | acpi_db_match_argument(char *user_argument, |
203 | struct acpi_db_argument_info *arguments); | ||
179 | 204 | ||
180 | void acpi_db_close_debug_file(void); | 205 | void acpi_db_close_debug_file(void); |
181 | 206 | ||
@@ -208,6 +233,11 @@ acpi_db_command_dispatch(char *input_buffer, | |||
208 | 233 | ||
209 | void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); | 234 | void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); |
210 | 235 | ||
236 | acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op); | ||
237 | |||
238 | char *acpi_db_get_next_token(char *string, | ||
239 | char **next, acpi_object_type * return_type); | ||
240 | |||
211 | /* | 241 | /* |
212 | * dbstats - Generation and display of ACPI table statistics | 242 | * dbstats - Generation and display of ACPI table statistics |
213 | */ | 243 | */ |
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 5935ba6707e2..ed33ebcdaebe 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h | |||
@@ -309,10 +309,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state); | |||
309 | acpi_status | 309 | acpi_status |
310 | acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state); | 310 | acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state); |
311 | 311 | ||
312 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object | 312 | struct acpi_walk_state * acpi_ds_create_walk_state(acpi_owner_id owner_id, |
313 | *origin, union acpi_operand_object | 313 | union acpi_parse_object |
314 | *mth_desc, struct acpi_thread_state | 314 | *origin, |
315 | *thread); | 315 | union acpi_operand_object |
316 | *mth_desc, | ||
317 | struct acpi_thread_state | ||
318 | *thread); | ||
316 | 319 | ||
317 | acpi_status | 320 | acpi_status |
318 | acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, | 321 | acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c0a43b38c6a3..e975c6720448 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -84,9 +84,11 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); | |||
84 | 84 | ||
85 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 85 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
86 | 86 | ||
87 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | 87 | acpi_status |
88 | acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | ||
88 | 89 | ||
89 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | 90 | acpi_status |
91 | acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | ||
90 | 92 | ||
91 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | 93 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, |
92 | u32 gpe_number); | 94 | u32 gpe_number); |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ce79100fb5eb..64472e4ec329 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Enable "slack" in the AML interpreter? Default is FALSE, and the | 72 | * Enable "slack" in the AML interpreter? Default is FALSE, and the |
73 | * interpreter strictly follows the ACPI specification. Setting to TRUE | 73 | * interpreter strictly follows the ACPI specification. Setting to TRUE |
74 | * allows the interpreter to ignore certain errors and/or bad AML constructs. | 74 | * allows the interpreter to ignore certain errors and/or bad AML constructs. |
75 | * | 75 | * |
76 | * Currently, these features are enabled by this flag: | 76 | * Currently, these features are enabled by this flag: |
@@ -155,26 +155,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE); | |||
155 | 155 | ||
156 | /***************************************************************************** | 156 | /***************************************************************************** |
157 | * | 157 | * |
158 | * Debug support | ||
159 | * | ||
160 | ****************************************************************************/ | ||
161 | |||
162 | /* Procedure nesting level for debug output */ | ||
163 | |||
164 | extern u32 acpi_gbl_nesting_level; | ||
165 | |||
166 | ACPI_EXTERN u32 acpi_gpe_count; | ||
167 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
168 | |||
169 | /* Support for dynamic control method tracing mechanism */ | ||
170 | |||
171 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | ||
172 | ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; | ||
173 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; | ||
174 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; | ||
175 | |||
176 | /***************************************************************************** | ||
177 | * | ||
178 | * ACPI Table globals | 158 | * ACPI Table globals |
179 | * | 159 | * |
180 | ****************************************************************************/ | 160 | ****************************************************************************/ |
@@ -259,15 +239,6 @@ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE reg | |||
259 | * | 239 | * |
260 | ****************************************************************************/ | 240 | ****************************************************************************/ |
261 | 241 | ||
262 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
263 | |||
264 | /* Lists for tracking memory allocations */ | ||
265 | |||
266 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; | ||
267 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; | ||
268 | ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; | ||
269 | #endif | ||
270 | |||
271 | /* Object caches */ | 242 | /* Object caches */ |
272 | 243 | ||
273 | ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache; | 244 | ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache; |
@@ -326,6 +297,15 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; | |||
326 | 297 | ||
327 | #endif | 298 | #endif |
328 | 299 | ||
300 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
301 | |||
302 | /* Lists for tracking memory allocations */ | ||
303 | |||
304 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; | ||
305 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; | ||
306 | ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; | ||
307 | #endif | ||
308 | |||
329 | /***************************************************************************** | 309 | /***************************************************************************** |
330 | * | 310 | * |
331 | * Namespace globals | 311 | * Namespace globals |
@@ -396,13 +376,35 @@ ACPI_EXTERN struct acpi_gpe_block_info | |||
396 | #if (!ACPI_REDUCED_HARDWARE) | 376 | #if (!ACPI_REDUCED_HARDWARE) |
397 | 377 | ||
398 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; | 378 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; |
399 | ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; | 379 | ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler; |
400 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; | 380 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; |
401 | 381 | ||
402 | #endif /* !ACPI_REDUCED_HARDWARE */ | 382 | #endif /* !ACPI_REDUCED_HARDWARE */ |
403 | 383 | ||
404 | /***************************************************************************** | 384 | /***************************************************************************** |
405 | * | 385 | * |
386 | * Debug support | ||
387 | * | ||
388 | ****************************************************************************/ | ||
389 | |||
390 | /* Procedure nesting level for debug output */ | ||
391 | |||
392 | extern u32 acpi_gbl_nesting_level; | ||
393 | |||
394 | /* Event counters */ | ||
395 | |||
396 | ACPI_EXTERN u32 acpi_gpe_count; | ||
397 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
398 | |||
399 | /* Support for dynamic control method tracing mechanism */ | ||
400 | |||
401 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | ||
402 | ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; | ||
403 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; | ||
404 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; | ||
405 | |||
406 | /***************************************************************************** | ||
407 | * | ||
406 | * Debugger globals | 408 | * Debugger globals |
407 | * | 409 | * |
408 | ****************************************************************************/ | 410 | ****************************************************************************/ |
@@ -426,10 +428,11 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_stats; | |||
426 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; | 428 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; |
427 | 429 | ||
428 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; | 430 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; |
429 | ACPI_EXTERN char acpi_gbl_db_line_buf[80]; | 431 | ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; |
430 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[80]; | 432 | ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]; |
431 | ACPI_EXTERN char acpi_gbl_db_scope_buf[40]; | 433 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]; |
432 | ACPI_EXTERN char acpi_gbl_db_debug_filename[40]; | 434 | ACPI_EXTERN char acpi_gbl_db_scope_buf[80]; |
435 | ACPI_EXTERN char acpi_gbl_db_debug_filename[80]; | ||
433 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; | 436 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; |
434 | ACPI_EXTERN char *acpi_gbl_db_buffer; | 437 | ACPI_EXTERN char *acpi_gbl_db_buffer; |
435 | ACPI_EXTERN char *acpi_gbl_db_filename; | 438 | ACPI_EXTERN char *acpi_gbl_db_filename; |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index c816ee675094..ff8bd0061e8b 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -262,10 +262,10 @@ struct acpi_create_field_info { | |||
262 | }; | 262 | }; |
263 | 263 | ||
264 | typedef | 264 | typedef |
265 | acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state); | 265 | acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state); |
266 | 266 | ||
267 | /* | 267 | /* |
268 | * Bitmapped ACPI types. Used internally only | 268 | * Bitmapped ACPI types. Used internally only |
269 | */ | 269 | */ |
270 | #define ACPI_BTYPE_ANY 0x00000000 | 270 | #define ACPI_BTYPE_ANY 0x00000000 |
271 | #define ACPI_BTYPE_INTEGER 0x00000001 | 271 | #define ACPI_BTYPE_INTEGER 0x00000001 |
@@ -486,8 +486,10 @@ struct acpi_gpe_device_info { | |||
486 | struct acpi_namespace_node *gpe_device; | 486 | struct acpi_namespace_node *gpe_device; |
487 | }; | 487 | }; |
488 | 488 | ||
489 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 489 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info * |
490 | struct acpi_gpe_block_info *gpe_block, void *context); | 490 | gpe_xrupt_info, |
491 | struct acpi_gpe_block_info *gpe_block, | ||
492 | void *context); | ||
491 | 493 | ||
492 | /* Information about each particular fixed event */ | 494 | /* Information about each particular fixed event */ |
493 | 495 | ||
@@ -582,7 +584,7 @@ struct acpi_pscope_state { | |||
582 | }; | 584 | }; |
583 | 585 | ||
584 | /* | 586 | /* |
585 | * Thread state - one per thread across multiple walk states. Multiple walk | 587 | * Thread state - one per thread across multiple walk states. Multiple walk |
586 | * states are created when there are nested control methods executing. | 588 | * states are created when there are nested control methods executing. |
587 | */ | 589 | */ |
588 | struct acpi_thread_state { | 590 | struct acpi_thread_state { |
@@ -645,7 +647,7 @@ union acpi_generic_state { | |||
645 | * | 647 | * |
646 | ****************************************************************************/ | 648 | ****************************************************************************/ |
647 | 649 | ||
648 | typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); | 650 | typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state); |
649 | 651 | ||
650 | /* Address Range info block */ | 652 | /* Address Range info block */ |
651 | 653 | ||
@@ -1031,6 +1033,7 @@ struct acpi_db_method_info { | |||
1031 | acpi_handle method; | 1033 | acpi_handle method; |
1032 | acpi_handle main_thread_gate; | 1034 | acpi_handle main_thread_gate; |
1033 | acpi_handle thread_complete_gate; | 1035 | acpi_handle thread_complete_gate; |
1036 | acpi_handle info_gate; | ||
1034 | acpi_thread_id *threads; | 1037 | acpi_thread_id *threads; |
1035 | u32 num_threads; | 1038 | u32 num_threads; |
1036 | u32 num_created; | 1039 | u32 num_created; |
@@ -1041,6 +1044,7 @@ struct acpi_db_method_info { | |||
1041 | u32 num_loops; | 1044 | u32 num_loops; |
1042 | char pathname[128]; | 1045 | char pathname[128]; |
1043 | char **args; | 1046 | char **args; |
1047 | acpi_object_type *types; | ||
1044 | 1048 | ||
1045 | /* | 1049 | /* |
1046 | * Arguments to be passed to method for the command | 1050 | * Arguments to be passed to method for the command |
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index a7f68c47f517..5efad99f2169 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h | |||
@@ -84,29 +84,29 @@ | |||
84 | 84 | ||
85 | /* These macros reverse the bytes during the move, converting little-endian to big endian */ | 85 | /* These macros reverse the bytes during the move, converting little-endian to big endian */ |
86 | 86 | ||
87 | /* Big Endian <== Little Endian */ | 87 | /* Big Endian <== Little Endian */ |
88 | /* Hi...Lo Lo...Hi */ | 88 | /* Hi...Lo Lo...Hi */ |
89 | /* 16-bit source, 16/32/64 destination */ | 89 | /* 16-bit source, 16/32/64 destination */ |
90 | 90 | ||
91 | #define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ | 91 | #define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ |
92 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} | 92 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} |
93 | 93 | ||
94 | #define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\ | 94 | #define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\ |
95 | ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ | 95 | ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ |
96 | ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} | 96 | ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} |
97 | 97 | ||
98 | #define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ | 98 | #define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ |
99 | ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ | 99 | ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ |
100 | ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} | 100 | ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} |
101 | 101 | ||
102 | /* 32-bit source, 16/32/64 destination */ | 102 | /* 32-bit source, 16/32/64 destination */ |
103 | 103 | ||
104 | #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ | 104 | #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ |
105 | 105 | ||
106 | #define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ | 106 | #define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ |
107 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ | 107 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ |
108 | (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ | 108 | (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ |
109 | (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} | 109 | (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} |
110 | 110 | ||
111 | #define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ | 111 | #define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ |
112 | ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ | 112 | ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ |
@@ -196,24 +196,12 @@ | |||
196 | #endif | 196 | #endif |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | /* Macros based on machine integer width */ | ||
200 | |||
201 | #if ACPI_MACHINE_WIDTH == 32 | ||
202 | #define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_32_TO_16(d, s) | ||
203 | |||
204 | #elif ACPI_MACHINE_WIDTH == 64 | ||
205 | #define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_64_TO_16(d, s) | ||
206 | |||
207 | #else | ||
208 | #error unknown ACPI_MACHINE_WIDTH | ||
209 | #endif | ||
210 | |||
211 | /* | 199 | /* |
212 | * Fast power-of-two math macros for non-optimized compilers | 200 | * Fast power-of-two math macros for non-optimized compilers |
213 | */ | 201 | */ |
214 | #define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) | 202 | #define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) |
215 | #define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) | 203 | #define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) |
216 | #define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) | 204 | #define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) |
217 | 205 | ||
218 | #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) | 206 | #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) |
219 | #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) | 207 | #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) |
@@ -238,12 +226,12 @@ | |||
238 | /* | 226 | /* |
239 | * Rounding macros (Power of two boundaries only) | 227 | * Rounding macros (Power of two boundaries only) |
240 | */ | 228 | */ |
241 | #define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ | 229 | #define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ |
242 | (~(((acpi_size) boundary)-1))) | 230 | (~(((acpi_size) boundary)-1))) |
243 | 231 | ||
244 | #define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ | 232 | #define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ |
245 | (((acpi_size) boundary)-1)) & \ | 233 | (((acpi_size) boundary)-1)) & \ |
246 | (~(((acpi_size) boundary)-1))) | 234 | (~(((acpi_size) boundary)-1))) |
247 | 235 | ||
248 | /* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */ | 236 | /* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */ |
249 | 237 | ||
@@ -264,7 +252,7 @@ | |||
264 | 252 | ||
265 | #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) | 253 | #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) |
266 | 254 | ||
267 | #define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) | 255 | #define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) |
268 | 256 | ||
269 | /* | 257 | /* |
270 | * Bitmask creation | 258 | * Bitmask creation |
@@ -355,7 +343,6 @@ | |||
355 | * Ascii error messages can be configured out | 343 | * Ascii error messages can be configured out |
356 | */ | 344 | */ |
357 | #ifndef ACPI_NO_ERROR_MESSAGES | 345 | #ifndef ACPI_NO_ERROR_MESSAGES |
358 | |||
359 | /* | 346 | /* |
360 | * Error reporting. Callers module and line number are inserted by AE_INFO, | 347 | * Error reporting. Callers module and line number are inserted by AE_INFO, |
361 | * the plist contains a set of parens to allow variable-length lists. | 348 | * the plist contains a set of parens to allow variable-length lists. |
@@ -375,18 +362,15 @@ | |||
375 | #define ACPI_WARN_PREDEFINED(plist) | 362 | #define ACPI_WARN_PREDEFINED(plist) |
376 | #define ACPI_INFO_PREDEFINED(plist) | 363 | #define ACPI_INFO_PREDEFINED(plist) |
377 | 364 | ||
378 | #endif /* ACPI_NO_ERROR_MESSAGES */ | 365 | #endif /* ACPI_NO_ERROR_MESSAGES */ |
379 | 366 | ||
380 | /* | 367 | /* |
381 | * Debug macros that are conditionally compiled | 368 | * Debug macros that are conditionally compiled |
382 | */ | 369 | */ |
383 | #ifdef ACPI_DEBUG_OUTPUT | 370 | #ifdef ACPI_DEBUG_OUTPUT |
384 | |||
385 | /* | 371 | /* |
386 | * Function entry tracing | 372 | * Function entry tracing |
387 | */ | 373 | */ |
388 | #ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE | ||
389 | |||
390 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ | 374 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ |
391 | acpi_ut_trace(ACPI_DEBUG_PARAMETERS) | 375 | acpi_ut_trace(ACPI_DEBUG_PARAMETERS) |
392 | #define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \ | 376 | #define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \ |
@@ -464,45 +448,19 @@ | |||
464 | 448 | ||
465 | #endif /* ACPI_SIMPLE_RETURN_MACROS */ | 449 | #endif /* ACPI_SIMPLE_RETURN_MACROS */ |
466 | 450 | ||
467 | #else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */ | ||
468 | |||
469 | #define ACPI_FUNCTION_TRACE(a) | ||
470 | #define ACPI_FUNCTION_TRACE_PTR(a,b) | ||
471 | #define ACPI_FUNCTION_TRACE_U32(a,b) | ||
472 | #define ACPI_FUNCTION_TRACE_STR(a,b) | ||
473 | #define ACPI_FUNCTION_EXIT | ||
474 | #define ACPI_FUNCTION_STATUS_EXIT(s) | ||
475 | #define ACPI_FUNCTION_VALUE_EXIT(s) | ||
476 | #define ACPI_FUNCTION_TRACE(a) | ||
477 | #define ACPI_FUNCTION_ENTRY() | ||
478 | |||
479 | #define return_VOID return | ||
480 | #define return_ACPI_STATUS(s) return(s) | ||
481 | #define return_VALUE(s) return(s) | ||
482 | #define return_UINT8(s) return(s) | ||
483 | #define return_UINT32(s) return(s) | ||
484 | #define return_PTR(s) return(s) | ||
485 | |||
486 | #endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */ | ||
487 | |||
488 | /* Conditional execution */ | 451 | /* Conditional execution */ |
489 | 452 | ||
490 | #define ACPI_DEBUG_EXEC(a) a | 453 | #define ACPI_DEBUG_EXEC(a) a |
491 | #define ACPI_NORMAL_EXEC(a) | ||
492 | |||
493 | #define ACPI_DEBUG_DEFINE(a) a; | ||
494 | #define ACPI_DEBUG_ONLY_MEMBERS(a) a; | 454 | #define ACPI_DEBUG_ONLY_MEMBERS(a) a; |
495 | #define _VERBOSE_STRUCTURES | 455 | #define _VERBOSE_STRUCTURES |
496 | 456 | ||
497 | /* Stack and buffer dumping */ | 457 | /* Various object display routines for debug */ |
498 | 458 | ||
499 | #define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) | 459 | #define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) |
500 | #define ACPI_DUMP_OPERANDS(a, b, c) acpi_ex_dump_operands(a, b, c) | 460 | #define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) |
501 | |||
502 | #define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) | 461 | #define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) |
503 | #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) | 462 | #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) |
504 | #define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) | 463 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) |
505 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) | ||
506 | 464 | ||
507 | #else | 465 | #else |
508 | /* | 466 | /* |
@@ -510,25 +468,23 @@ | |||
510 | * leaving no executable debug code! | 468 | * leaving no executable debug code! |
511 | */ | 469 | */ |
512 | #define ACPI_DEBUG_EXEC(a) | 470 | #define ACPI_DEBUG_EXEC(a) |
513 | #define ACPI_NORMAL_EXEC(a) a; | 471 | #define ACPI_DEBUG_ONLY_MEMBERS(a) |
514 | 472 | #define ACPI_FUNCTION_TRACE(a) | |
515 | #define ACPI_DEBUG_DEFINE(a) do { } while(0) | 473 | #define ACPI_FUNCTION_TRACE_PTR(a, b) |
516 | #define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) | 474 | #define ACPI_FUNCTION_TRACE_U32(a, b) |
517 | #define ACPI_FUNCTION_TRACE(a) do { } while(0) | 475 | #define ACPI_FUNCTION_TRACE_STR(a, b) |
518 | #define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) | 476 | #define ACPI_FUNCTION_EXIT |
519 | #define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) | 477 | #define ACPI_FUNCTION_STATUS_EXIT(s) |
520 | #define ACPI_FUNCTION_TRACE_STR(a, b) do { } while(0) | 478 | #define ACPI_FUNCTION_VALUE_EXIT(s) |
521 | #define ACPI_FUNCTION_EXIT do { } while(0) | 479 | #define ACPI_FUNCTION_ENTRY() |
522 | #define ACPI_FUNCTION_STATUS_EXIT(s) do { } while(0) | 480 | #define ACPI_DUMP_STACK_ENTRY(a) |
523 | #define ACPI_FUNCTION_VALUE_EXIT(s) do { } while(0) | 481 | #define ACPI_DUMP_OPERANDS(a, b, c) |
524 | #define ACPI_FUNCTION_ENTRY() do { } while(0) | 482 | #define ACPI_DUMP_ENTRY(a, b) |
525 | #define ACPI_DUMP_STACK_ENTRY(a) do { } while(0) | 483 | #define ACPI_DUMP_TABLES(a, b) |
526 | #define ACPI_DUMP_OPERANDS(a, b, c) do { } while(0) | 484 | #define ACPI_DUMP_PATHNAME(a, b, c, d) |
527 | #define ACPI_DUMP_ENTRY(a, b) do { } while(0) | 485 | #define ACPI_DUMP_BUFFER(a, b) |
528 | #define ACPI_DUMP_TABLES(a, b) do { } while(0) | 486 | #define ACPI_DEBUG_PRINT(pl) |
529 | #define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) | 487 | #define ACPI_DEBUG_PRINT_RAW(pl) |
530 | #define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) | ||
531 | #define ACPI_DUMP_BUFFER(a, b) do { } while(0) | ||
532 | 488 | ||
533 | #define return_VOID return | 489 | #define return_VOID return |
534 | #define return_ACPI_STATUS(s) return(s) | 490 | #define return_ACPI_STATUS(s) return(s) |
@@ -556,18 +512,6 @@ | |||
556 | #define ACPI_DEBUGGER_EXEC(a) | 512 | #define ACPI_DEBUGGER_EXEC(a) |
557 | #endif | 513 | #endif |
558 | 514 | ||
559 | #ifdef ACPI_DEBUG_OUTPUT | ||
560 | /* | ||
561 | * 1) Set name to blanks | ||
562 | * 2) Copy the object name | ||
563 | */ | ||
564 | #define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\ | ||
565 | ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name)) | ||
566 | #else | ||
567 | |||
568 | #define ACPI_ADD_OBJECT_NAME(a,b) | ||
569 | #endif | ||
570 | |||
571 | /* | 515 | /* |
572 | * Memory allocation tracking (DEBUG ONLY) | 516 | * Memory allocation tracking (DEBUG ONLY) |
573 | */ | 517 | */ |
@@ -578,13 +522,13 @@ | |||
578 | /* Memory allocation */ | 522 | /* Memory allocation */ |
579 | 523 | ||
580 | #ifndef ACPI_ALLOCATE | 524 | #ifndef ACPI_ALLOCATE |
581 | #define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS) | 525 | #define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS) |
582 | #endif | 526 | #endif |
583 | #ifndef ACPI_ALLOCATE_ZEROED | 527 | #ifndef ACPI_ALLOCATE_ZEROED |
584 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS) | 528 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS) |
585 | #endif | 529 | #endif |
586 | #ifndef ACPI_FREE | 530 | #ifndef ACPI_FREE |
587 | #define ACPI_FREE(a) acpio_os_free(a) | 531 | #define ACPI_FREE(a) acpi_os_free(a) |
588 | #endif | 532 | #endif |
589 | #define ACPI_MEM_TRACKING(a) | 533 | #define ACPI_MEM_TRACKING(a) |
590 | 534 | ||
@@ -592,16 +536,25 @@ | |||
592 | 536 | ||
593 | /* Memory allocation */ | 537 | /* Memory allocation */ |
594 | 538 | ||
595 | #define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) | 539 | #define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) |
596 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) | 540 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) |
597 | #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) | 541 | #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) |
598 | #define ACPI_MEM_TRACKING(a) a | 542 | #define ACPI_MEM_TRACKING(a) a |
599 | 543 | ||
600 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ | 544 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ |
601 | 545 | ||
602 | /* Preemption point */ | 546 | /* |
603 | #ifndef ACPI_PREEMPTION_POINT | 547 | * Macros used for ACPICA utilities only |
604 | #define ACPI_PREEMPTION_POINT() /* no preemption */ | 548 | */ |
605 | #endif | 549 | |
550 | /* Generate a UUID */ | ||
551 | |||
552 | #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ | ||
553 | (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ | ||
554 | (b) & 0xFF, ((b) >> 8) & 0xFF, \ | ||
555 | (c) & 0xFF, ((c) >> 8) & 0xFF, \ | ||
556 | (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) | ||
557 | |||
558 | #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) | ||
606 | 559 | ||
607 | #endif /* ACMACROS_H */ | 560 | #endif /* ACMACROS_H */ |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 364a1303fb8f..24eb9eac9514 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) | 3 | * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) |
@@ -179,7 +178,7 @@ struct acpi_object_method { | |||
179 | union acpi_operand_object *mutex; | 178 | union acpi_operand_object *mutex; |
180 | u8 *aml_start; | 179 | u8 *aml_start; |
181 | union { | 180 | union { |
182 | ACPI_INTERNAL_METHOD implementation; | 181 | acpi_internal_method implementation; |
183 | union acpi_operand_object *handler; | 182 | union acpi_operand_object *handler; |
184 | } dispatch; | 183 | } dispatch; |
185 | 184 | ||
@@ -198,7 +197,7 @@ struct acpi_object_method { | |||
198 | 197 | ||
199 | /****************************************************************************** | 198 | /****************************************************************************** |
200 | * | 199 | * |
201 | * Objects that can be notified. All share a common notify_info area. | 200 | * Objects that can be notified. All share a common notify_info area. |
202 | * | 201 | * |
203 | *****************************************************************************/ | 202 | *****************************************************************************/ |
204 | 203 | ||
@@ -235,7 +234,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; | |||
235 | 234 | ||
236 | /****************************************************************************** | 235 | /****************************************************************************** |
237 | * | 236 | * |
238 | * Fields. All share a common header/info field. | 237 | * Fields. All share a common header/info field. |
239 | * | 238 | * |
240 | *****************************************************************************/ | 239 | *****************************************************************************/ |
241 | 240 | ||
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9440d053fbb3..d786a5128b78 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h | |||
@@ -54,7 +54,7 @@ | |||
54 | #define _UNK 0x6B | 54 | #define _UNK 0x6B |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Reserved ASCII characters. Do not use any of these for | 57 | * Reserved ASCII characters. Do not use any of these for |
58 | * internal opcodes, since they are used to differentiate | 58 | * internal opcodes, since they are used to differentiate |
59 | * name strings from AML opcodes | 59 | * name strings from AML opcodes |
60 | */ | 60 | */ |
@@ -63,7 +63,7 @@ | |||
63 | #define _PFX 0x6D | 63 | #define _PFX 0x6D |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * All AML opcodes and the parse-time arguments for each. Used by the AML | 66 | * All AML opcodes and the parse-time arguments for each. Used by the AML |
67 | * parser Each list is compressed into a 32-bit number and stored in the | 67 | * parser Each list is compressed into a 32-bit number and stored in the |
68 | * master opcode table (in psopcode.c). | 68 | * master opcode table (in psopcode.c). |
69 | */ | 69 | */ |
@@ -193,7 +193,7 @@ | |||
193 | #define ARGP_ZERO_OP ARG_NONE | 193 | #define ARGP_ZERO_OP ARG_NONE |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * All AML opcodes and the runtime arguments for each. Used by the AML | 196 | * All AML opcodes and the runtime arguments for each. Used by the AML |
197 | * interpreter Each list is compressed into a 32-bit number and stored | 197 | * interpreter Each list is compressed into a 32-bit number and stored |
198 | * in the master opcode table (in psopcode.c). | 198 | * in the master opcode table (in psopcode.c). |
199 | * | 199 | * |
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index b725d780d34d..eefcf47a61a0 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h | |||
@@ -150,8 +150,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state); | |||
150 | 150 | ||
151 | void | 151 | void |
152 | acpi_ps_pop_scope(struct acpi_parse_state *parser_state, | 152 | acpi_ps_pop_scope(struct acpi_parse_state *parser_state, |
153 | union acpi_parse_object **op, | 153 | union acpi_parse_object **op, u32 *arg_list, u32 *arg_count); |
154 | u32 * arg_list, u32 * arg_count); | ||
155 | 154 | ||
156 | acpi_status | 155 | acpi_status |
157 | acpi_ps_push_scope(struct acpi_parse_state *parser_state, | 156 | acpi_ps_push_scope(struct acpi_parse_state *parser_state, |
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 3080c017f5ba..9dfa1c83bd4e 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
@@ -150,8 +150,7 @@ enum acpi_return_package_types { | |||
150 | * is saved here (rather than in a separate table) in order to minimize the | 150 | * is saved here (rather than in a separate table) in order to minimize the |
151 | * overall size of the stored data. | 151 | * overall size of the stored data. |
152 | */ | 152 | */ |
153 | static const union acpi_predefined_info predefined_names[] = | 153 | static const union acpi_predefined_info predefined_names[] = { |
154 | { | ||
155 | {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, | 154 | {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, |
156 | {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, | 155 | {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, |
157 | {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, | 156 | {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, |
@@ -538,7 +537,8 @@ static const union acpi_predefined_info predefined_names[] = | |||
538 | 537 | ||
539 | /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ | 538 | /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ |
540 | 539 | ||
541 | {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, | 540 | {{"_WAK", 1, |
541 | ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, | ||
542 | {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ | 542 | {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ |
543 | 543 | ||
544 | /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ | 544 | /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ |
@@ -551,11 +551,12 @@ static const union acpi_predefined_info predefined_names[] = | |||
551 | }; | 551 | }; |
552 | 552 | ||
553 | #if 0 | 553 | #if 0 |
554 | |||
554 | /* This is an internally implemented control method, no need to check */ | 555 | /* This is an internally implemented control method, no need to check */ |
555 | {{"_OSI", 1, ACPI_RTYPE_INTEGER}}, | 556 | { { |
557 | "_OSI", 1, ACPI_RTYPE_INTEGER}}, | ||
556 | 558 | ||
557 | /* TBD: */ | 559 | /* TBD: */ |
558 | |||
559 | _PRT - currently ignore reversed entries. attempt to fix here? | 560 | _PRT - currently ignore reversed entries. attempt to fix here? |
560 | think about possibly fixing package elements like _BIF, etc. | 561 | think about possibly fixing package elements like _BIF, etc. |
561 | #endif | 562 | #endif |
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index f196e2c9a71f..937e66c65d1e 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h | |||
@@ -53,7 +53,7 @@ | |||
53 | ****************************************************************************/ | 53 | ****************************************************************************/ |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Walk state - current state of a parse tree walk. Used for both a leisurely | 56 | * Walk state - current state of a parse tree walk. Used for both a leisurely |
57 | * stroll through the tree (for whatever reason), and for control method | 57 | * stroll through the tree (for whatever reason), and for control method |
58 | * execution. | 58 | * execution. |
59 | */ | 59 | */ |
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5035327ebccc..b0f5f92b674a 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -69,6 +69,22 @@ extern const char *acpi_gbl_siz_decode[]; | |||
69 | extern const char *acpi_gbl_trs_decode[]; | 69 | extern const char *acpi_gbl_trs_decode[]; |
70 | extern const char *acpi_gbl_ttp_decode[]; | 70 | extern const char *acpi_gbl_ttp_decode[]; |
71 | extern const char *acpi_gbl_typ_decode[]; | 71 | extern const char *acpi_gbl_typ_decode[]; |
72 | extern const char *acpi_gbl_ppc_decode[]; | ||
73 | extern const char *acpi_gbl_ior_decode[]; | ||
74 | extern const char *acpi_gbl_dts_decode[]; | ||
75 | extern const char *acpi_gbl_ct_decode[]; | ||
76 | extern const char *acpi_gbl_sbt_decode[]; | ||
77 | extern const char *acpi_gbl_am_decode[]; | ||
78 | extern const char *acpi_gbl_sm_decode[]; | ||
79 | extern const char *acpi_gbl_wm_decode[]; | ||
80 | extern const char *acpi_gbl_cph_decode[]; | ||
81 | extern const char *acpi_gbl_cpo_decode[]; | ||
82 | extern const char *acpi_gbl_dp_decode[]; | ||
83 | extern const char *acpi_gbl_ed_decode[]; | ||
84 | extern const char *acpi_gbl_bpb_decode[]; | ||
85 | extern const char *acpi_gbl_sb_decode[]; | ||
86 | extern const char *acpi_gbl_fc_decode[]; | ||
87 | extern const char *acpi_gbl_pt_decode[]; | ||
72 | #endif | 88 | #endif |
73 | 89 | ||
74 | /* Types for Resource descriptor entries */ | 90 | /* Types for Resource descriptor entries */ |
@@ -79,14 +95,14 @@ extern const char *acpi_gbl_typ_decode[]; | |||
79 | #define ACPI_SMALL_VARIABLE_LENGTH 3 | 95 | #define ACPI_SMALL_VARIABLE_LENGTH 3 |
80 | 96 | ||
81 | typedef | 97 | typedef |
82 | acpi_status(*acpi_walk_aml_callback) (u8 * aml, | 98 | acpi_status(*acpi_walk_aml_callback) (u8 *aml, |
83 | u32 length, | 99 | u32 length, |
84 | u32 offset, | 100 | u32 offset, |
85 | u8 resource_index, void **context); | 101 | u8 resource_index, void **context); |
86 | 102 | ||
87 | typedef | 103 | typedef |
88 | acpi_status(*acpi_pkg_callback) (u8 object_type, | 104 | acpi_status(*acpi_pkg_callback) (u8 object_type, |
89 | union acpi_operand_object * source_object, | 105 | union acpi_operand_object *source_object, |
90 | union acpi_generic_state * state, | 106 | union acpi_generic_state * state, |
91 | void *context); | 107 | void *context); |
92 | 108 | ||
@@ -202,7 +218,9 @@ extern const u8 _acpi_ctype[]; | |||
202 | #define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) | 218 | #define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) |
203 | #define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) | 219 | #define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) |
204 | 220 | ||
205 | #endif /* ACPI_USE_SYSTEM_CLIBRARY */ | 221 | #endif /* !ACPI_USE_SYSTEM_CLIBRARY */ |
222 | |||
223 | #define ACPI_IS_ASCII(c) ((c) < 0x80) | ||
206 | 224 | ||
207 | /* | 225 | /* |
208 | * utcopy - Object construction and conversion interfaces | 226 | * utcopy - Object construction and conversion interfaces |
@@ -210,11 +228,11 @@ extern const u8 _acpi_ctype[]; | |||
210 | acpi_status | 228 | acpi_status |
211 | acpi_ut_build_simple_object(union acpi_operand_object *obj, | 229 | acpi_ut_build_simple_object(union acpi_operand_object *obj, |
212 | union acpi_object *user_obj, | 230 | union acpi_object *user_obj, |
213 | u8 * data_space, u32 * buffer_space_used); | 231 | u8 *data_space, u32 *buffer_space_used); |
214 | 232 | ||
215 | acpi_status | 233 | acpi_status |
216 | acpi_ut_build_package_object(union acpi_operand_object *obj, | 234 | acpi_ut_build_package_object(union acpi_operand_object *obj, |
217 | u8 * buffer, u32 * space_used); | 235 | u8 *buffer, u32 *space_used); |
218 | 236 | ||
219 | acpi_status | 237 | acpi_status |
220 | acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj, | 238 | acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj, |
@@ -287,9 +305,10 @@ acpi_ut_ptr_exit(u32 line_number, | |||
287 | const char *function_name, | 305 | const char *function_name, |
288 | const char *module_name, u32 component_id, u8 *ptr); | 306 | const char *module_name, u32 component_id, u8 *ptr); |
289 | 307 | ||
290 | void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id); | 308 | void |
309 | acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id); | ||
291 | 310 | ||
292 | void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display); | 311 | void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset); |
293 | 312 | ||
294 | void acpi_ut_report_error(char *module_name, u32 line_number); | 313 | void acpi_ut_report_error(char *module_name, u32 line_number); |
295 | 314 | ||
@@ -337,15 +356,19 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, | |||
337 | */ | 356 | */ |
338 | acpi_status | 357 | acpi_status |
339 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | 358 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, |
340 | struct acpica_device_id **return_id); | 359 | struct acpi_pnp_device_id ** return_id); |
341 | 360 | ||
342 | acpi_status | 361 | acpi_status |
343 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | 362 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, |
344 | struct acpica_device_id **return_id); | 363 | struct acpi_pnp_device_id ** return_id); |
364 | |||
365 | acpi_status | ||
366 | acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, | ||
367 | struct acpi_pnp_device_id **return_id); | ||
345 | 368 | ||
346 | acpi_status | 369 | acpi_status |
347 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | 370 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, |
348 | struct acpica_device_id_list **return_cid_list); | 371 | struct acpi_pnp_device_id_list ** return_cid_list); |
349 | 372 | ||
350 | /* | 373 | /* |
351 | * utlock - reader/writer locks | 374 | * utlock - reader/writer locks |
@@ -479,15 +502,19 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, | |||
479 | 502 | ||
480 | void acpi_ut_strupr(char *src_string); | 503 | void acpi_ut_strupr(char *src_string); |
481 | 504 | ||
505 | void acpi_ut_strlwr(char *src_string); | ||
506 | |||
507 | int acpi_ut_stricmp(char *string1, char *string2); | ||
508 | |||
482 | void acpi_ut_print_string(char *string, u8 max_length); | 509 | void acpi_ut_print_string(char *string, u8 max_length); |
483 | 510 | ||
484 | u8 acpi_ut_valid_acpi_name(u32 name); | 511 | u8 acpi_ut_valid_acpi_name(u32 name); |
485 | 512 | ||
486 | acpi_name acpi_ut_repair_name(char *name); | 513 | void acpi_ut_repair_name(char *name); |
487 | 514 | ||
488 | u8 acpi_ut_valid_acpi_char(char character, u32 position); | 515 | u8 acpi_ut_valid_acpi_char(char character, u32 position); |
489 | 516 | ||
490 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer); | 517 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); |
491 | 518 | ||
492 | /* Values for Base above (16=Hex, 10=Decimal) */ | 519 | /* Values for Base above (16=Hex, 10=Decimal) */ |
493 | 520 | ||
@@ -508,12 +535,12 @@ acpi_ut_display_init_pathname(u8 type, | |||
508 | * utresrc | 535 | * utresrc |
509 | */ | 536 | */ |
510 | acpi_status | 537 | acpi_status |
511 | acpi_ut_walk_aml_resources(u8 * aml, | 538 | acpi_ut_walk_aml_resources(u8 *aml, |
512 | acpi_size aml_length, | 539 | acpi_size aml_length, |
513 | acpi_walk_aml_callback user_function, | 540 | acpi_walk_aml_callback user_function, |
514 | void **context); | 541 | void **context); |
515 | 542 | ||
516 | acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index); | 543 | acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index); |
517 | 544 | ||
518 | u32 acpi_ut_get_descriptor_length(void *aml); | 545 | u32 acpi_ut_get_descriptor_length(void *aml); |
519 | 546 | ||
@@ -524,8 +551,7 @@ u8 acpi_ut_get_resource_header_length(void *aml); | |||
524 | u8 acpi_ut_get_resource_type(void *aml); | 551 | u8 acpi_ut_get_resource_type(void *aml); |
525 | 552 | ||
526 | acpi_status | 553 | acpi_status |
527 | acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, | 554 | acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag); |
528 | u8 ** end_tag); | ||
529 | 555 | ||
530 | /* | 556 | /* |
531 | * utmutex - mutex support | 557 | * utmutex - mutex support |
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index af4947956ec2..968449685e06 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: amlresrc.h - AML resource descriptors | 3 | * Module Name: amlresrc.h - AML resource descriptors |
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 465f02134b89..57895db3231a 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c | |||
@@ -280,7 +280,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, | |||
280 | 280 | ||
281 | /* | 281 | /* |
282 | * Get the return value and save as the last result | 282 | * Get the return value and save as the last result |
283 | * value. This is the only place where walk_state->return_desc | 283 | * value. This is the only place where walk_state->return_desc |
284 | * is set to anything other than zero! | 284 | * is set to anything other than zero! |
285 | */ | 285 | */ |
286 | walk_state->return_desc = walk_state->operands[0]; | 286 | walk_state->return_desc = walk_state->operands[0]; |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3da6fd8530c5..b5b904ee815f 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -277,7 +277,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, | |||
277 | * | 277 | * |
278 | * RETURN: Status | 278 | * RETURN: Status |
279 | * | 279 | * |
280 | * DESCRIPTION: Process all named fields in a field declaration. Names are | 280 | * DESCRIPTION: Process all named fields in a field declaration. Names are |
281 | * entered into the namespace. | 281 | * entered into the namespace. |
282 | * | 282 | * |
283 | ******************************************************************************/ | 283 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index aa9a5d4e4052..52eb4e01622a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -170,7 +170,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) | |||
170 | * | 170 | * |
171 | * RETURN: Status | 171 | * RETURN: Status |
172 | * | 172 | * |
173 | * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, | 173 | * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, |
174 | * increments the thread count, and waits at the method semaphore | 174 | * increments the thread count, and waits at the method semaphore |
175 | * for clearance to execute. | 175 | * for clearance to execute. |
176 | * | 176 | * |
@@ -444,7 +444,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, | |||
444 | * RETURN: Status | 444 | * RETURN: Status |
445 | * | 445 | * |
446 | * DESCRIPTION: Restart a method that was preempted by another (nested) method | 446 | * DESCRIPTION: Restart a method that was preempted by another (nested) method |
447 | * invocation. Handle the return value (if any) from the callee. | 447 | * invocation. Handle the return value (if any) from the callee. |
448 | * | 448 | * |
449 | ******************************************************************************/ | 449 | ******************************************************************************/ |
450 | 450 | ||
@@ -530,7 +530,7 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, | |||
530 | * | 530 | * |
531 | * RETURN: None | 531 | * RETURN: None |
532 | * | 532 | * |
533 | * DESCRIPTION: Terminate a control method. Delete everything that the method | 533 | * DESCRIPTION: Terminate a control method. Delete everything that the method |
534 | * created, delete all locals and arguments, and delete the parse | 534 | * created, delete all locals and arguments, and delete the parse |
535 | * tree if requested. | 535 | * tree if requested. |
536 | * | 536 | * |
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 8d55cebaa656..9a83b7e0f3ba 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c | |||
@@ -76,7 +76,7 @@ acpi_ds_method_data_get_type(u16 opcode, | |||
76 | * RETURN: Status | 76 | * RETURN: Status |
77 | * | 77 | * |
78 | * DESCRIPTION: Initialize the data structures that hold the method's arguments | 78 | * DESCRIPTION: Initialize the data structures that hold the method's arguments |
79 | * and locals. The data struct is an array of namespace nodes for | 79 | * and locals. The data struct is an array of namespace nodes for |
80 | * each - this allows ref_of and de_ref_of to work properly for these | 80 | * each - this allows ref_of and de_ref_of to work properly for these |
81 | * special data types. | 81 | * special data types. |
82 | * | 82 | * |
@@ -129,7 +129,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state) | |||
129 | * | 129 | * |
130 | * RETURN: None | 130 | * RETURN: None |
131 | * | 131 | * |
132 | * DESCRIPTION: Delete method locals and arguments. Arguments are only | 132 | * DESCRIPTION: Delete method locals and arguments. Arguments are only |
133 | * deleted if this method was called from another method. | 133 | * deleted if this method was called from another method. |
134 | * | 134 | * |
135 | ******************************************************************************/ | 135 | ******************************************************************************/ |
@@ -183,7 +183,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state) | |||
183 | * | 183 | * |
184 | * RETURN: Status | 184 | * RETURN: Status |
185 | * | 185 | * |
186 | * DESCRIPTION: Initialize arguments for a method. The parameter list is a list | 186 | * DESCRIPTION: Initialize arguments for a method. The parameter list is a list |
187 | * of ACPI operand objects, either null terminated or whose length | 187 | * of ACPI operand objects, either null terminated or whose length |
188 | * is defined by max_param_count. | 188 | * is defined by max_param_count. |
189 | * | 189 | * |
@@ -401,7 +401,7 @@ acpi_ds_method_data_get_value(u8 type, | |||
401 | * This means that either 1) The expected argument was | 401 | * This means that either 1) The expected argument was |
402 | * not passed to the method, or 2) A local variable | 402 | * not passed to the method, or 2) A local variable |
403 | * was referenced by the method (via the ASL) | 403 | * was referenced by the method (via the ASL) |
404 | * before it was initialized. Either case is an error. | 404 | * before it was initialized. Either case is an error. |
405 | */ | 405 | */ |
406 | 406 | ||
407 | /* If slack enabled, init the local_x/arg_x to an Integer of value zero */ | 407 | /* If slack enabled, init the local_x/arg_x to an Integer of value zero */ |
@@ -465,7 +465,7 @@ acpi_ds_method_data_get_value(u8 type, | |||
465 | * | 465 | * |
466 | * RETURN: None | 466 | * RETURN: None |
467 | * | 467 | * |
468 | * DESCRIPTION: Delete the entry at Opcode:Index. Inserts | 468 | * DESCRIPTION: Delete the entry at Opcode:Index. Inserts |
469 | * a null into the stack slot after the object is deleted. | 469 | * a null into the stack slot after the object is deleted. |
470 | * | 470 | * |
471 | ******************************************************************************/ | 471 | ******************************************************************************/ |
@@ -523,7 +523,7 @@ acpi_ds_method_data_delete_value(u8 type, | |||
523 | * | 523 | * |
524 | * RETURN: Status | 524 | * RETURN: Status |
525 | * | 525 | * |
526 | * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed | 526 | * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed |
527 | * as the new value for the Arg or Local and the reference count | 527 | * as the new value for the Arg or Local and the reference count |
528 | * for obj_desc is incremented. | 528 | * for obj_desc is incremented. |
529 | * | 529 | * |
@@ -566,7 +566,7 @@ acpi_ds_store_object_to_local(u8 type, | |||
566 | 566 | ||
567 | /* | 567 | /* |
568 | * If the reference count on the object is more than one, we must | 568 | * If the reference count on the object is more than one, we must |
569 | * take a copy of the object before we store. A reference count | 569 | * take a copy of the object before we store. A reference count |
570 | * of exactly 1 means that the object was just created during the | 570 | * of exactly 1 means that the object was just created during the |
571 | * evaluation of an expression, and we can safely use it since it | 571 | * evaluation of an expression, and we can safely use it since it |
572 | * is not used anywhere else. | 572 | * is not used anywhere else. |
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 68592dd34960..c9f15d3a3686 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c | |||
@@ -293,7 +293,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, | |||
293 | 293 | ||
294 | /* | 294 | /* |
295 | * Second arg is the buffer data (optional) byte_list can be either | 295 | * Second arg is the buffer data (optional) byte_list can be either |
296 | * individual bytes or a string initializer. In either case, a | 296 | * individual bytes or a string initializer. In either case, a |
297 | * byte_list appears in the AML. | 297 | * byte_list appears in the AML. |
298 | */ | 298 | */ |
299 | arg = op->common.value.arg; /* skip first arg */ | 299 | arg = op->common.value.arg; /* skip first arg */ |
@@ -568,7 +568,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, | |||
568 | 568 | ||
569 | /* | 569 | /* |
570 | * Because of the execution pass through the non-control-method | 570 | * Because of the execution pass through the non-control-method |
571 | * parts of the table, we can arrive here twice. Only init | 571 | * parts of the table, we can arrive here twice. Only init |
572 | * the named object node the first time through | 572 | * the named object node the first time through |
573 | */ | 573 | */ |
574 | if (acpi_ns_get_attached_object(node)) { | 574 | if (acpi_ns_get_attached_object(node)) { |
@@ -618,7 +618,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, | |||
618 | * RETURN: Status | 618 | * RETURN: Status |
619 | * | 619 | * |
620 | * DESCRIPTION: Initialize a namespace object from a parser Op and its | 620 | * DESCRIPTION: Initialize a namespace object from a parser Op and its |
621 | * associated arguments. The namespace object is a more compact | 621 | * associated arguments. The namespace object is a more compact |
622 | * representation of the Op and its arguments. | 622 | * representation of the Op and its arguments. |
623 | * | 623 | * |
624 | ******************************************************************************/ | 624 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index aa34d8984d34..0df024e5fb63 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -649,7 +649,8 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, | |||
649 | ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && | 649 | ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && |
650 | (op->common.parent->common.aml_opcode != | 650 | (op->common.parent->common.aml_opcode != |
651 | AML_VAR_PACKAGE_OP) | 651 | AML_VAR_PACKAGE_OP) |
652 | && (op->common.parent->common.aml_opcode != AML_NAME_OP))) { | 652 | && (op->common.parent->common.aml_opcode != |
653 | AML_NAME_OP))) { | ||
653 | walk_state->result_obj = obj_desc; | 654 | walk_state->result_obj = obj_desc; |
654 | } | 655 | } |
655 | } | 656 | } |
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 73a5447475f5..afeb99f49482 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c | |||
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("dsutils") | |||
61 | * | 61 | * |
62 | * RETURN: None. | 62 | * RETURN: None. |
63 | * | 63 | * |
64 | * DESCRIPTION: Clear and remove a reference on an implicit return value. Used | 64 | * DESCRIPTION: Clear and remove a reference on an implicit return value. Used |
65 | * to delete "stale" return values (if enabled, the return value | 65 | * to delete "stale" return values (if enabled, the return value |
66 | * from every operator is saved at least momentarily, in case the | 66 | * from every operator is saved at least momentarily, in case the |
67 | * parent method exits.) | 67 | * parent method exits.) |
@@ -107,7 +107,7 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state) | |||
107 | * | 107 | * |
108 | * DESCRIPTION: Implements the optional "implicit return". We save the result | 108 | * DESCRIPTION: Implements the optional "implicit return". We save the result |
109 | * of every ASL operator and control method invocation in case the | 109 | * of every ASL operator and control method invocation in case the |
110 | * parent method exit. Before storing a new return value, we | 110 | * parent method exit. Before storing a new return value, we |
111 | * delete the previous return value. | 111 | * delete the previous return value. |
112 | * | 112 | * |
113 | ******************************************************************************/ | 113 | ******************************************************************************/ |
@@ -198,7 +198,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
198 | * | 198 | * |
199 | * If there is no parent, or the parent is a scope_op, we are executing | 199 | * If there is no parent, or the parent is a scope_op, we are executing |
200 | * at the method level. An executing method typically has no parent, | 200 | * at the method level. An executing method typically has no parent, |
201 | * since each method is parsed separately. A method invoked externally | 201 | * since each method is parsed separately. A method invoked externally |
202 | * via execute_control_method has a scope_op as the parent. | 202 | * via execute_control_method has a scope_op as the parent. |
203 | */ | 203 | */ |
204 | if ((!op->common.parent) || | 204 | if ((!op->common.parent) || |
@@ -223,7 +223,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
223 | } | 223 | } |
224 | 224 | ||
225 | /* | 225 | /* |
226 | * Decide what to do with the result based on the parent. If | 226 | * Decide what to do with the result based on the parent. If |
227 | * the parent opcode will not use the result, delete the object. | 227 | * the parent opcode will not use the result, delete the object. |
228 | * Otherwise leave it as is, it will be deleted when it is used | 228 | * Otherwise leave it as is, it will be deleted when it is used |
229 | * as an operand later. | 229 | * as an operand later. |
@@ -266,7 +266,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
266 | 266 | ||
267 | /* | 267 | /* |
268 | * These opcodes allow term_arg(s) as operands and therefore | 268 | * These opcodes allow term_arg(s) as operands and therefore |
269 | * the operands can be method calls. The result is used. | 269 | * the operands can be method calls. The result is used. |
270 | */ | 270 | */ |
271 | goto result_used; | 271 | goto result_used; |
272 | 272 | ||
@@ -284,7 +284,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
284 | AML_BANK_FIELD_OP)) { | 284 | AML_BANK_FIELD_OP)) { |
285 | /* | 285 | /* |
286 | * These opcodes allow term_arg(s) as operands and therefore | 286 | * These opcodes allow term_arg(s) as operands and therefore |
287 | * the operands can be method calls. The result is used. | 287 | * the operands can be method calls. The result is used. |
288 | */ | 288 | */ |
289 | goto result_used; | 289 | goto result_used; |
290 | } | 290 | } |
@@ -329,9 +329,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
329 | * | 329 | * |
330 | * RETURN: Status | 330 | * RETURN: Status |
331 | * | 331 | * |
332 | * DESCRIPTION: Used after interpretation of an opcode. If there is an internal | 332 | * DESCRIPTION: Used after interpretation of an opcode. If there is an internal |
333 | * result descriptor, check if the parent opcode will actually use | 333 | * result descriptor, check if the parent opcode will actually use |
334 | * this result. If not, delete the result now so that it will | 334 | * this result. If not, delete the result now so that it will |
335 | * not become orphaned. | 335 | * not become orphaned. |
336 | * | 336 | * |
337 | ******************************************************************************/ | 337 | ******************************************************************************/ |
@@ -376,7 +376,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op, | |||
376 | * | 376 | * |
377 | * RETURN: Status | 377 | * RETURN: Status |
378 | * | 378 | * |
379 | * DESCRIPTION: Resolve all operands to their values. Used to prepare | 379 | * DESCRIPTION: Resolve all operands to their values. Used to prepare |
380 | * arguments to a control method invocation (a call from one | 380 | * arguments to a control method invocation (a call from one |
381 | * method to another.) | 381 | * method to another.) |
382 | * | 382 | * |
@@ -391,7 +391,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state) | |||
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Attempt to resolve each of the valid operands | 393 | * Attempt to resolve each of the valid operands |
394 | * Method arguments are passed by reference, not by value. This means | 394 | * Method arguments are passed by reference, not by value. This means |
395 | * that the actual objects are passed, not copies of the objects. | 395 | * that the actual objects are passed, not copies of the objects. |
396 | */ | 396 | */ |
397 | for (i = 0; i < walk_state->num_operands; i++) { | 397 | for (i = 0; i < walk_state->num_operands; i++) { |
@@ -451,7 +451,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state) | |||
451 | * RETURN: Status | 451 | * RETURN: Status |
452 | * | 452 | * |
453 | * DESCRIPTION: Translate a parse tree object that is an argument to an AML | 453 | * DESCRIPTION: Translate a parse tree object that is an argument to an AML |
454 | * opcode to the equivalent interpreter object. This may include | 454 | * opcode to the equivalent interpreter object. This may include |
455 | * looking up a name or entering a new name into the internal | 455 | * looking up a name or entering a new name into the internal |
456 | * namespace. | 456 | * namespace. |
457 | * | 457 | * |
@@ -496,9 +496,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
496 | /* | 496 | /* |
497 | * Special handling for buffer_field declarations. This is a deferred | 497 | * Special handling for buffer_field declarations. This is a deferred |
498 | * opcode that unfortunately defines the field name as the last | 498 | * opcode that unfortunately defines the field name as the last |
499 | * parameter instead of the first. We get here when we are performing | 499 | * parameter instead of the first. We get here when we are performing |
500 | * the deferred execution, so the actual name of the field is already | 500 | * the deferred execution, so the actual name of the field is already |
501 | * in the namespace. We don't want to attempt to look it up again | 501 | * in the namespace. We don't want to attempt to look it up again |
502 | * because we may be executing in a different scope than where the | 502 | * because we may be executing in a different scope than where the |
503 | * actual opcode exists. | 503 | * actual opcode exists. |
504 | */ | 504 | */ |
@@ -560,7 +560,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
560 | * indicate this to the interpreter, set the | 560 | * indicate this to the interpreter, set the |
561 | * object to the root | 561 | * object to the root |
562 | */ | 562 | */ |
563 | obj_desc = ACPI_CAST_PTR(union | 563 | obj_desc = |
564 | ACPI_CAST_PTR(union | ||
564 | acpi_operand_object, | 565 | acpi_operand_object, |
565 | acpi_gbl_root_node); | 566 | acpi_gbl_root_node); |
566 | status = AE_OK; | 567 | status = AE_OK; |
@@ -604,8 +605,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
604 | /* | 605 | /* |
605 | * If the name is null, this means that this is an | 606 | * If the name is null, this means that this is an |
606 | * optional result parameter that was not specified | 607 | * optional result parameter that was not specified |
607 | * in the original ASL. Create a Zero Constant for a | 608 | * in the original ASL. Create a Zero Constant for a |
608 | * placeholder. (Store to a constant is a Noop.) | 609 | * placeholder. (Store to a constant is a Noop.) |
609 | */ | 610 | */ |
610 | opcode = AML_ZERO_OP; /* Has no arguments! */ | 611 | opcode = AML_ZERO_OP; /* Has no arguments! */ |
611 | 612 | ||
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 642f3c053e87..58593931be96 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c | |||
@@ -57,7 +57,7 @@ ACPI_MODULE_NAME("dswexec") | |||
57 | /* | 57 | /* |
58 | * Dispatch table for opcode classes | 58 | * Dispatch table for opcode classes |
59 | */ | 59 | */ |
60 | static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = { | 60 | static acpi_execute_op acpi_gbl_op_type_dispatch[] = { |
61 | acpi_ex_opcode_0A_0T_1R, | 61 | acpi_ex_opcode_0A_0T_1R, |
62 | acpi_ex_opcode_1A_0T_0R, | 62 | acpi_ex_opcode_1A_0T_0R, |
63 | acpi_ex_opcode_1A_0T_1R, | 63 | acpi_ex_opcode_1A_0T_1R, |
@@ -204,7 +204,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, | |||
204 | * RETURN: Status | 204 | * RETURN: Status |
205 | * | 205 | * |
206 | * DESCRIPTION: Descending callback used during the execution of control | 206 | * DESCRIPTION: Descending callback used during the execution of control |
207 | * methods. This is where most operators and operands are | 207 | * methods. This is where most operators and operands are |
208 | * dispatched to the interpreter. | 208 | * dispatched to the interpreter. |
209 | * | 209 | * |
210 | ****************************************************************************/ | 210 | ****************************************************************************/ |
@@ -297,7 +297,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, | |||
297 | if (walk_state->walk_type & ACPI_WALK_METHOD) { | 297 | if (walk_state->walk_type & ACPI_WALK_METHOD) { |
298 | /* | 298 | /* |
299 | * Found a named object declaration during method execution; | 299 | * Found a named object declaration during method execution; |
300 | * we must enter this object into the namespace. The created | 300 | * we must enter this object into the namespace. The created |
301 | * object is temporary and will be deleted upon completion of | 301 | * object is temporary and will be deleted upon completion of |
302 | * the execution of this method. | 302 | * the execution of this method. |
303 | * | 303 | * |
@@ -348,7 +348,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, | |||
348 | * RETURN: Status | 348 | * RETURN: Status |
349 | * | 349 | * |
350 | * DESCRIPTION: Ascending callback used during the execution of control | 350 | * DESCRIPTION: Ascending callback used during the execution of control |
351 | * methods. The only thing we really need to do here is to | 351 | * methods. The only thing we really need to do here is to |
352 | * notice the beginning of IF, ELSE, and WHILE blocks. | 352 | * notice the beginning of IF, ELSE, and WHILE blocks. |
353 | * | 353 | * |
354 | ****************************************************************************/ | 354 | ****************************************************************************/ |
@@ -432,7 +432,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
432 | if (ACPI_SUCCESS(status)) { | 432 | if (ACPI_SUCCESS(status)) { |
433 | /* | 433 | /* |
434 | * Dispatch the request to the appropriate interpreter handler | 434 | * Dispatch the request to the appropriate interpreter handler |
435 | * routine. There is one routine per opcode "type" based upon the | 435 | * routine. There is one routine per opcode "type" based upon the |
436 | * number of opcode arguments and return type. | 436 | * number of opcode arguments and return type. |
437 | */ | 437 | */ |
438 | status = | 438 | status = |
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 89c0114210c0..379835748357 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c | |||
@@ -254,7 +254,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
254 | acpi_ut_get_type_name(node->type), | 254 | acpi_ut_get_type_name(node->type), |
255 | acpi_ut_get_node_name(node))); | 255 | acpi_ut_get_node_name(node))); |
256 | 256 | ||
257 | return (AE_AML_OPERAND_TYPE); | 257 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); |
258 | } | 258 | } |
259 | break; | 259 | break; |
260 | 260 | ||
@@ -602,7 +602,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
602 | region_space, | 602 | region_space, |
603 | walk_state); | 603 | walk_state); |
604 | if (ACPI_FAILURE(status)) { | 604 | if (ACPI_FAILURE(status)) { |
605 | return (status); | 605 | return_ACPI_STATUS(status); |
606 | } | 606 | } |
607 | 607 | ||
608 | acpi_ex_exit_interpreter(); | 608 | acpi_ex_exit_interpreter(); |
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index d0e6555061e4..3e65a15a735f 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c | |||
@@ -51,8 +51,9 @@ | |||
51 | ACPI_MODULE_NAME("dswstate") | 51 | ACPI_MODULE_NAME("dswstate") |
52 | 52 | ||
53 | /* Local prototypes */ | 53 | /* Local prototypes */ |
54 | static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws); | 54 | static acpi_status |
55 | static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws); | 55 | acpi_ds_result_stack_push(struct acpi_walk_state *walk_state); |
56 | static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state); | ||
56 | 57 | ||
57 | /******************************************************************************* | 58 | /******************************************************************************* |
58 | * | 59 | * |
@@ -347,7 +348,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state) | |||
347 | * | 348 | * |
348 | * RETURN: Status | 349 | * RETURN: Status |
349 | * | 350 | * |
350 | * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT | 351 | * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT |
351 | * deleted by this routine. | 352 | * deleted by this routine. |
352 | * | 353 | * |
353 | ******************************************************************************/ | 354 | ******************************************************************************/ |
@@ -491,7 +492,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state, | |||
491 | * RETURN: A walk_state object popped from the thread's stack | 492 | * RETURN: A walk_state object popped from the thread's stack |
492 | * | 493 | * |
493 | * DESCRIPTION: Remove and return the walkstate object that is at the head of | 494 | * DESCRIPTION: Remove and return the walkstate object that is at the head of |
494 | * the walk stack for the given walk list. NULL indicates that | 495 | * the walk stack for the given walk list. NULL indicates that |
495 | * the list is empty. | 496 | * the list is empty. |
496 | * | 497 | * |
497 | ******************************************************************************/ | 498 | ******************************************************************************/ |
@@ -531,14 +532,17 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread) | |||
531 | * | 532 | * |
532 | * RETURN: Pointer to the new walk state. | 533 | * RETURN: Pointer to the new walk state. |
533 | * | 534 | * |
534 | * DESCRIPTION: Allocate and initialize a new walk state. The current walk | 535 | * DESCRIPTION: Allocate and initialize a new walk state. The current walk |
535 | * state is set to this new state. | 536 | * state is set to this new state. |
536 | * | 537 | * |
537 | ******************************************************************************/ | 538 | ******************************************************************************/ |
538 | 539 | ||
539 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object | 540 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, |
540 | *origin, union acpi_operand_object | 541 | union acpi_parse_object |
541 | *method_desc, struct acpi_thread_state | 542 | *origin, |
543 | union acpi_operand_object | ||
544 | *method_desc, | ||
545 | struct acpi_thread_state | ||
542 | *thread) | 546 | *thread) |
543 | { | 547 | { |
544 | struct acpi_walk_state *walk_state; | 548 | struct acpi_walk_state *walk_state; |
@@ -653,7 +657,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, | |||
653 | /* | 657 | /* |
654 | * Setup the current scope. | 658 | * Setup the current scope. |
655 | * Find a Named Op that has a namespace node associated with it. | 659 | * Find a Named Op that has a namespace node associated with it. |
656 | * search upwards from this Op. Current scope is the first | 660 | * search upwards from this Op. Current scope is the first |
657 | * Op with a namespace node. | 661 | * Op with a namespace node. |
658 | */ | 662 | */ |
659 | extra_op = parser_state->start_op; | 663 | extra_op = parser_state->start_op; |
@@ -704,13 +708,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state) | |||
704 | ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); | 708 | ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); |
705 | 709 | ||
706 | if (!walk_state) { | 710 | if (!walk_state) { |
707 | return; | 711 | return_VOID; |
708 | } | 712 | } |
709 | 713 | ||
710 | if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { | 714 | if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { |
711 | ACPI_ERROR((AE_INFO, "%p is not a valid walk state", | 715 | ACPI_ERROR((AE_INFO, "%p is not a valid walk state", |
712 | walk_state)); | 716 | walk_state)); |
713 | return; | 717 | return_VOID; |
714 | } | 718 | } |
715 | 719 | ||
716 | /* There should not be any open scopes */ | 720 | /* There should not be any open scopes */ |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index ef0193d74b5d..36d120574423 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -89,7 +89,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
89 | /* Set the mask bit only if there are references to this GPE */ | 89 | /* Set the mask bit only if there are references to this GPE */ |
90 | 90 | ||
91 | if (gpe_event_info->runtime_count) { | 91 | if (gpe_event_info->runtime_count) { |
92 | ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); | 92 | ACPI_SET_BIT(gpe_register_info->enable_for_run, |
93 | (u8)register_bit); | ||
93 | } | 94 | } |
94 | 95 | ||
95 | return_ACPI_STATUS(AE_OK); | 96 | return_ACPI_STATUS(AE_OK); |
@@ -106,8 +107,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
106 | * DESCRIPTION: Clear a GPE of stale events and enable it. | 107 | * DESCRIPTION: Clear a GPE of stale events and enable it. |
107 | * | 108 | * |
108 | ******************************************************************************/ | 109 | ******************************************************************************/ |
109 | acpi_status | 110 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) |
110 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | ||
111 | { | 111 | { |
112 | acpi_status status; | 112 | acpi_status status; |
113 | 113 | ||
@@ -131,8 +131,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* Enable the requested GPE */ | 133 | /* Enable the requested GPE */ |
134 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | ||
135 | 134 | ||
135 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | ||
136 | return_ACPI_STATUS(status); | 136 | return_ACPI_STATUS(status); |
137 | } | 137 | } |
138 | 138 | ||
@@ -150,7 +150,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
150 | * | 150 | * |
151 | ******************************************************************************/ | 151 | ******************************************************************************/ |
152 | 152 | ||
153 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | 153 | acpi_status |
154 | acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | ||
154 | { | 155 | { |
155 | acpi_status status = AE_OK; | 156 | acpi_status status = AE_OK; |
156 | 157 | ||
@@ -191,7 +192,8 @@ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info | |||
191 | * | 192 | * |
192 | ******************************************************************************/ | 193 | ******************************************************************************/ |
193 | 194 | ||
194 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | 195 | acpi_status |
196 | acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | ||
195 | { | 197 | { |
196 | acpi_status status = AE_OK; | 198 | acpi_status status = AE_OK; |
197 | 199 | ||
@@ -208,7 +210,8 @@ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_i | |||
208 | 210 | ||
209 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 211 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
210 | if (ACPI_SUCCESS(status)) { | 212 | if (ACPI_SUCCESS(status)) { |
211 | status = acpi_hw_low_set_gpe(gpe_event_info, | 213 | status = |
214 | acpi_hw_low_set_gpe(gpe_event_info, | ||
212 | ACPI_GPE_DISABLE); | 215 | ACPI_GPE_DISABLE); |
213 | } | 216 | } |
214 | 217 | ||
@@ -306,7 +309,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
306 | 309 | ||
307 | /* A Non-NULL gpe_device means this is a GPE Block Device */ | 310 | /* A Non-NULL gpe_device means this is a GPE Block Device */ |
308 | 311 | ||
309 | obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) | 312 | obj_desc = |
313 | acpi_ns_get_attached_object((struct acpi_namespace_node *) | ||
310 | gpe_device); | 314 | gpe_device); |
311 | if (!obj_desc || !obj_desc->device.gpe_block) { | 315 | if (!obj_desc || !obj_desc->device.gpe_block) { |
312 | return (NULL); | 316 | return (NULL); |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 8cf4c104c7b7..1571a61a7833 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -486,7 +486,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
486 | if (ACPI_FAILURE(status)) { | 486 | if (ACPI_FAILURE(status)) { |
487 | ACPI_EXCEPTION((AE_INFO, status, | 487 | ACPI_EXCEPTION((AE_INFO, status, |
488 | "Could not enable GPE 0x%02X", | 488 | "Could not enable GPE 0x%02X", |
489 | gpe_index + gpe_block->block_base_number)); | 489 | gpe_index + |
490 | gpe_block->block_base_number)); | ||
490 | continue; | 491 | continue; |
491 | } | 492 | } |
492 | 493 | ||
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index cb50dd91bc18..228a0c3b1d49 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
@@ -374,7 +374,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
374 | gpe_event_info->dispatch.handler = NULL; | 374 | gpe_event_info->dispatch.handler = NULL; |
375 | gpe_event_info->flags &= | 375 | gpe_event_info->flags &= |
376 | ~ACPI_GPE_DISPATCH_MASK; | 376 | ~ACPI_GPE_DISPATCH_MASK; |
377 | } else if ((gpe_event_info-> | 377 | } else |
378 | if ((gpe_event_info-> | ||
378 | flags & ACPI_GPE_DISPATCH_MASK) == | 379 | flags & ACPI_GPE_DISPATCH_MASK) == |
379 | ACPI_GPE_DISPATCH_NOTIFY) { | 380 | ACPI_GPE_DISPATCH_NOTIFY) { |
380 | 381 | ||
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 4c1c8261166f..1474241bfc7e 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c | |||
@@ -227,8 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
227 | 227 | ||
228 | /* Install a handler for this PCI root bridge */ | 228 | /* Install a handler for this PCI root bridge */ |
229 | 229 | ||
230 | status = | 230 | status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); |
231 | acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); | ||
232 | if (ACPI_FAILURE(status)) { | 231 | if (ACPI_FAILURE(status)) { |
233 | if (status == AE_SAME_HANDLER) { | 232 | if (status == AE_SAME_HANDLER) { |
234 | /* | 233 | /* |
@@ -350,8 +349,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
350 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) | 349 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) |
351 | { | 350 | { |
352 | acpi_status status; | 351 | acpi_status status; |
353 | struct acpica_device_id *hid; | 352 | struct acpi_pnp_device_id *hid; |
354 | struct acpica_device_id_list *cid; | 353 | struct acpi_pnp_device_id_list *cid; |
355 | u32 i; | 354 | u32 i; |
356 | u8 match; | 355 | u8 match; |
357 | 356 | ||
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 7587eb6c9584..ae668f32cf16 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -398,7 +398,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | |||
398 | * | 398 | * |
399 | ******************************************************************************/ | 399 | ******************************************************************************/ |
400 | acpi_status | 400 | acpi_status |
401 | acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) | 401 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) |
402 | { | 402 | { |
403 | acpi_status status; | 403 | acpi_status status; |
404 | 404 | ||
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 87c5f2332260..3f30e753b652 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -221,7 +221,8 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
221 | if (wake_device == ACPI_ROOT_OBJECT) { | 221 | if (wake_device == ACPI_ROOT_OBJECT) { |
222 | device_node = acpi_gbl_root_node; | 222 | device_node = acpi_gbl_root_node; |
223 | } else { | 223 | } else { |
224 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | 224 | device_node = |
225 | ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
225 | } | 226 | } |
226 | 227 | ||
227 | /* Validate WakeDevice is of type Device */ | 228 | /* Validate WakeDevice is of type Device */ |
@@ -324,7 +325,8 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) | |||
324 | * | 325 | * |
325 | ******************************************************************************/ | 326 | ******************************************************************************/ |
326 | 327 | ||
327 | acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | 328 | acpi_status |
329 | acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
328 | { | 330 | { |
329 | acpi_status status = AE_OK; | 331 | acpi_status status = AE_OK; |
330 | struct acpi_gpe_event_info *gpe_event_info; | 332 | struct acpi_gpe_event_info *gpe_event_info; |
@@ -567,7 +569,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, | |||
567 | 569 | ||
568 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 570 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
569 | if (ACPI_FAILURE(status)) { | 571 | if (ACPI_FAILURE(status)) { |
570 | return (status); | 572 | return_ACPI_STATUS(status); |
571 | } | 573 | } |
572 | 574 | ||
573 | node = acpi_ns_validate_handle(gpe_device); | 575 | node = acpi_ns_validate_handle(gpe_device); |
@@ -650,7 +652,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | |||
650 | 652 | ||
651 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 653 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
652 | if (ACPI_FAILURE(status)) { | 654 | if (ACPI_FAILURE(status)) { |
653 | return (status); | 655 | return_ACPI_STATUS(status); |
654 | } | 656 | } |
655 | 657 | ||
656 | node = acpi_ns_validate_handle(gpe_device); | 658 | node = acpi_ns_validate_handle(gpe_device); |
@@ -694,8 +696,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | |||
694 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | 696 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. |
695 | * | 697 | * |
696 | ******************************************************************************/ | 698 | ******************************************************************************/ |
697 | acpi_status | 699 | acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device) |
698 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
699 | { | 700 | { |
700 | struct acpi_gpe_device_info info; | 701 | struct acpi_gpe_device_info info; |
701 | acpi_status status; | 702 | acpi_status status; |
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index bfb062e4c4b4..4492a4e03022 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c | |||
@@ -516,8 +516,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, | |||
516 | string_length--; | 516 | string_length--; |
517 | } | 517 | } |
518 | 518 | ||
519 | return_desc = acpi_ut_create_string_object((acpi_size) | 519 | return_desc = |
520 | string_length); | 520 | acpi_ut_create_string_object((acpi_size) string_length); |
521 | if (!return_desc) { | 521 | if (!return_desc) { |
522 | return_ACPI_STATUS(AE_NO_MEMORY); | 522 | return_ACPI_STATUS(AE_NO_MEMORY); |
523 | } | 523 | } |
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 691d4763102c..66554bc6f9a8 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c | |||
@@ -78,7 +78,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
78 | (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { | 78 | (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { |
79 | /* | 79 | /* |
80 | * Dereference an existing alias so that we don't create a chain | 80 | * Dereference an existing alias so that we don't create a chain |
81 | * of aliases. With this code, we guarantee that an alias is | 81 | * of aliases. With this code, we guarantee that an alias is |
82 | * always exactly one level of indirection away from the | 82 | * always exactly one level of indirection away from the |
83 | * actual aliased name. | 83 | * actual aliased name. |
84 | */ | 84 | */ |
@@ -90,7 +90,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
90 | /* | 90 | /* |
91 | * For objects that can never change (i.e., the NS node will | 91 | * For objects that can never change (i.e., the NS node will |
92 | * permanently point to the same object), we can simply attach | 92 | * permanently point to the same object), we can simply attach |
93 | * the object to the new NS node. For other objects (such as | 93 | * the object to the new NS node. For other objects (such as |
94 | * Integers, buffers, etc.), we have to point the Alias node | 94 | * Integers, buffers, etc.), we have to point the Alias node |
95 | * to the original Node. | 95 | * to the original Node. |
96 | */ | 96 | */ |
@@ -139,7 +139,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
139 | 139 | ||
140 | /* | 140 | /* |
141 | * The new alias assumes the type of the target, and it points | 141 | * The new alias assumes the type of the target, and it points |
142 | * to the same object. The reference count of the object has an | 142 | * to the same object. The reference count of the object has an |
143 | * additional reference to prevent deletion out from under either the | 143 | * additional reference to prevent deletion out from under either the |
144 | * target node or the alias Node | 144 | * target node or the alias Node |
145 | */ | 145 | */ |
@@ -243,8 +243,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) | |||
243 | 243 | ||
244 | /* Init object and attach to NS node */ | 244 | /* Init object and attach to NS node */ |
245 | 245 | ||
246 | obj_desc->mutex.sync_level = | 246 | obj_desc->mutex.sync_level = (u8)walk_state->operands[1]->integer.value; |
247 | (u8) walk_state->operands[1]->integer.value; | ||
248 | obj_desc->mutex.node = | 247 | obj_desc->mutex.node = |
249 | (struct acpi_namespace_node *)walk_state->operands[0]; | 248 | (struct acpi_namespace_node *)walk_state->operands[0]; |
250 | 249 | ||
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index bc5b9a6a1316..d7c9f51608a7 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c | |||
@@ -145,10 +145,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, | |||
145 | case ACPI_TYPE_BUFFER: | 145 | case ACPI_TYPE_BUFFER: |
146 | 146 | ||
147 | acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); | 147 | acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); |
148 | acpi_ut_dump_buffer2(source_desc->buffer.pointer, | 148 | acpi_ut_dump_buffer(source_desc->buffer.pointer, |
149 | (source_desc->buffer.length < 256) ? | 149 | (source_desc->buffer.length < 256) ? |
150 | source_desc->buffer.length : 256, | 150 | source_desc->buffer.length : 256, |
151 | DB_BYTE_DISPLAY); | 151 | DB_BYTE_DISPLAY, 0); |
152 | break; | 152 | break; |
153 | 153 | ||
154 | case ACPI_TYPE_STRING: | 154 | case ACPI_TYPE_STRING: |
@@ -190,7 +190,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, | |||
190 | 190 | ||
191 | acpi_os_printf("Table Index 0x%X\n", | 191 | acpi_os_printf("Table Index 0x%X\n", |
192 | source_desc->reference.value); | 192 | source_desc->reference.value); |
193 | return; | 193 | return_VOID; |
194 | 194 | ||
195 | default: | 195 | default: |
196 | break; | 196 | break; |
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 213c081776fc..858b43a7dcf6 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c | |||
@@ -464,7 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) | |||
464 | 464 | ||
465 | ACPI_FUNCTION_NAME(ex_dump_operand) | 465 | ACPI_FUNCTION_NAME(ex_dump_operand) |
466 | 466 | ||
467 | if (!((ACPI_LV_EXEC & acpi_dbg_level) | 467 | if (! |
468 | ((ACPI_LV_EXEC & acpi_dbg_level) | ||
468 | && (_COMPONENT & acpi_dbg_layer))) { | 469 | && (_COMPONENT & acpi_dbg_layer))) { |
469 | return; | 470 | return; |
470 | } | 471 | } |
@@ -777,7 +778,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands, | |||
777 | * PARAMETERS: title - Descriptive text | 778 | * PARAMETERS: title - Descriptive text |
778 | * value - Value to be displayed | 779 | * value - Value to be displayed |
779 | * | 780 | * |
780 | * DESCRIPTION: Object dump output formatting functions. These functions | 781 | * DESCRIPTION: Object dump output formatting functions. These functions |
781 | * reduce the number of format strings required and keeps them | 782 | * reduce the number of format strings required and keeps them |
782 | * all in one place for easy modification. | 783 | * all in one place for easy modification. |
783 | * | 784 | * |
@@ -810,7 +811,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags) | |||
810 | ACPI_FUNCTION_ENTRY(); | 811 | ACPI_FUNCTION_ENTRY(); |
811 | 812 | ||
812 | if (!flags) { | 813 | if (!flags) { |
813 | if (!((ACPI_LV_OBJECTS & acpi_dbg_level) | 814 | if (! |
815 | ((ACPI_LV_OBJECTS & acpi_dbg_level) | ||
814 | && (_COMPONENT & acpi_dbg_layer))) { | 816 | && (_COMPONENT & acpi_dbg_layer))) { |
815 | return; | 817 | return; |
816 | } | 818 | } |
@@ -940,10 +942,11 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, | |||
940 | acpi_os_printf("[Buffer] Length %.2X = ", | 942 | acpi_os_printf("[Buffer] Length %.2X = ", |
941 | obj_desc->buffer.length); | 943 | obj_desc->buffer.length); |
942 | if (obj_desc->buffer.length) { | 944 | if (obj_desc->buffer.length) { |
943 | acpi_ut_dump_buffer(ACPI_CAST_PTR | 945 | acpi_ut_debug_dump_buffer(ACPI_CAST_PTR |
944 | (u8, obj_desc->buffer.pointer), | 946 | (u8, |
945 | obj_desc->buffer.length, | 947 | obj_desc->buffer.pointer), |
946 | DB_DWORD_DISPLAY, _COMPONENT); | 948 | obj_desc->buffer.length, |
949 | DB_DWORD_DISPLAY, _COMPONENT); | ||
947 | } else { | 950 | } else { |
948 | acpi_os_printf("\n"); | 951 | acpi_os_printf("\n"); |
949 | } | 952 | } |
@@ -996,7 +999,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) | |||
996 | } | 999 | } |
997 | 1000 | ||
998 | if (!flags) { | 1001 | if (!flags) { |
999 | if (!((ACPI_LV_OBJECTS & acpi_dbg_level) | 1002 | if (! |
1003 | ((ACPI_LV_OBJECTS & acpi_dbg_level) | ||
1000 | && (_COMPONENT & acpi_dbg_layer))) { | 1004 | && (_COMPONENT & acpi_dbg_layer))) { |
1001 | return_VOID; | 1005 | return_VOID; |
1002 | } | 1006 | } |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index dc092f5b35d6..ebc55fbf3ff7 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -59,7 +59,7 @@ ACPI_MODULE_NAME("exfield") | |||
59 | * | 59 | * |
60 | * RETURN: Status | 60 | * RETURN: Status |
61 | * | 61 | * |
62 | * DESCRIPTION: Read from a named field. Returns either an Integer or a | 62 | * DESCRIPTION: Read from a named field. Returns either an Integer or a |
63 | * Buffer, depending on the size of the field. | 63 | * Buffer, depending on the size of the field. |
64 | * | 64 | * |
65 | ******************************************************************************/ | 65 | ******************************************************************************/ |
@@ -149,7 +149,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, | |||
149 | * Allocate a buffer for the contents of the field. | 149 | * Allocate a buffer for the contents of the field. |
150 | * | 150 | * |
151 | * If the field is larger than the current integer width, create | 151 | * If the field is larger than the current integer width, create |
152 | * a BUFFER to hold it. Otherwise, use an INTEGER. This allows | 152 | * a BUFFER to hold it. Otherwise, use an INTEGER. This allows |
153 | * the use of arithmetic operators on the returned value if the | 153 | * the use of arithmetic operators on the returned value if the |
154 | * field size is equal or smaller than an Integer. | 154 | * field size is equal or smaller than an Integer. |
155 | * | 155 | * |
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index a7784152ed30..aa2ccfb7cb61 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -54,8 +54,7 @@ ACPI_MODULE_NAME("exfldio") | |||
54 | /* Local prototypes */ | 54 | /* Local prototypes */ |
55 | static acpi_status | 55 | static acpi_status |
56 | acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, | 56 | acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, |
57 | u32 field_datum_byte_offset, | 57 | u32 field_datum_byte_offset, u64 *value, u32 read_write); |
58 | u64 *value, u32 read_write); | ||
59 | 58 | ||
60 | static u8 | 59 | static u8 |
61 | acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); | 60 | acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); |
@@ -155,7 +154,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
155 | #endif | 154 | #endif |
156 | 155 | ||
157 | /* | 156 | /* |
158 | * Validate the request. The entire request from the byte offset for a | 157 | * Validate the request. The entire request from the byte offset for a |
159 | * length of one field datum (access width) must fit within the region. | 158 | * length of one field datum (access width) must fit within the region. |
160 | * (Region length is specified in bytes) | 159 | * (Region length is specified in bytes) |
161 | */ | 160 | */ |
@@ -183,7 +182,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
183 | obj_desc->common_field.access_byte_width) { | 182 | obj_desc->common_field.access_byte_width) { |
184 | /* | 183 | /* |
185 | * This is the case where the access_type (acc_word, etc.) is wider | 184 | * This is the case where the access_type (acc_word, etc.) is wider |
186 | * than the region itself. For example, a region of length one | 185 | * than the region itself. For example, a region of length one |
187 | * byte, and a field with Dword access specified. | 186 | * byte, and a field with Dword access specified. |
188 | */ | 187 | */ |
189 | ACPI_ERROR((AE_INFO, | 188 | ACPI_ERROR((AE_INFO, |
@@ -321,7 +320,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, | |||
321 | * | 320 | * |
322 | * DESCRIPTION: Check if a value is out of range of the field being written. | 321 | * DESCRIPTION: Check if a value is out of range of the field being written. |
323 | * Used to check if the values written to Index and Bank registers | 322 | * Used to check if the values written to Index and Bank registers |
324 | * are out of range. Normally, the value is simply truncated | 323 | * are out of range. Normally, the value is simply truncated |
325 | * to fit the field, but this case is most likely a serious | 324 | * to fit the field, but this case is most likely a serious |
326 | * coding error in the ASL. | 325 | * coding error in the ASL. |
327 | * | 326 | * |
@@ -370,7 +369,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) | |||
370 | * | 369 | * |
371 | * RETURN: Status | 370 | * RETURN: Status |
372 | * | 371 | * |
373 | * DESCRIPTION: Read or Write a single datum of a field. The field_type is | 372 | * DESCRIPTION: Read or Write a single datum of a field. The field_type is |
374 | * demultiplexed here to handle the different types of fields | 373 | * demultiplexed here to handle the different types of fields |
375 | * (buffer_field, region_field, index_field, bank_field) | 374 | * (buffer_field, region_field, index_field, bank_field) |
376 | * | 375 | * |
@@ -860,7 +859,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
860 | ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); | 859 | ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); |
861 | /* | 860 | /* |
862 | * We must have a buffer that is at least as long as the field | 861 | * We must have a buffer that is at least as long as the field |
863 | * we are writing to. This is because individual fields are | 862 | * we are writing to. This is because individual fields are |
864 | * indivisible and partial writes are not supported -- as per | 863 | * indivisible and partial writes are not supported -- as per |
865 | * the ACPI specification. | 864 | * the ACPI specification. |
866 | */ | 865 | */ |
@@ -875,7 +874,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
875 | 874 | ||
876 | /* | 875 | /* |
877 | * Copy the original data to the new buffer, starting | 876 | * Copy the original data to the new buffer, starting |
878 | * at Byte zero. All unused (upper) bytes of the | 877 | * at Byte zero. All unused (upper) bytes of the |
879 | * buffer will be 0. | 878 | * buffer will be 0. |
880 | */ | 879 | */ |
881 | ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); | 880 | ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); |
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 271c0c57ea10..84058705ed12 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes | 3 | * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes |
@@ -254,7 +253,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, | |||
254 | ACPI_FUNCTION_TRACE(ex_do_concatenate); | 253 | ACPI_FUNCTION_TRACE(ex_do_concatenate); |
255 | 254 | ||
256 | /* | 255 | /* |
257 | * Convert the second operand if necessary. The first operand | 256 | * Convert the second operand if necessary. The first operand |
258 | * determines the type of the second operand, (See the Data Types | 257 | * determines the type of the second operand, (See the Data Types |
259 | * section of the ACPI specification.) Both object types are | 258 | * section of the ACPI specification.) Both object types are |
260 | * guaranteed to be either Integer/String/Buffer by the operand | 259 | * guaranteed to be either Integer/String/Buffer by the operand |
@@ -573,7 +572,7 @@ acpi_ex_do_logical_op(u16 opcode, | |||
573 | ACPI_FUNCTION_TRACE(ex_do_logical_op); | 572 | ACPI_FUNCTION_TRACE(ex_do_logical_op); |
574 | 573 | ||
575 | /* | 574 | /* |
576 | * Convert the second operand if necessary. The first operand | 575 | * Convert the second operand if necessary. The first operand |
577 | * determines the type of the second operand, (See the Data Types | 576 | * determines the type of the second operand, (See the Data Types |
578 | * section of the ACPI 3.0+ specification.) Both object types are | 577 | * section of the ACPI 3.0+ specification.) Both object types are |
579 | * guaranteed to be either Integer/String/Buffer by the operand | 578 | * guaranteed to be either Integer/String/Buffer by the operand |
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index bcceda5be9e3..d1f449d93dcf 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exmutex - ASL Mutex Acquire/Release functions | 3 | * Module Name: exmutex - ASL Mutex Acquire/Release functions |
@@ -305,7 +304,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) | |||
305 | ACPI_FUNCTION_TRACE(ex_release_mutex_object); | 304 | ACPI_FUNCTION_TRACE(ex_release_mutex_object); |
306 | 305 | ||
307 | if (obj_desc->mutex.acquisition_depth == 0) { | 306 | if (obj_desc->mutex.acquisition_depth == 0) { |
308 | return (AE_NOT_ACQUIRED); | 307 | return_ACPI_STATUS(AE_NOT_ACQUIRED); |
309 | } | 308 | } |
310 | 309 | ||
311 | /* Match multiple Acquires with multiple Releases */ | 310 | /* Match multiple Acquires with multiple Releases */ |
@@ -462,7 +461,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) | |||
462 | union acpi_operand_object *next = thread->acquired_mutex_list; | 461 | union acpi_operand_object *next = thread->acquired_mutex_list; |
463 | union acpi_operand_object *obj_desc; | 462 | union acpi_operand_object *obj_desc; |
464 | 463 | ||
465 | ACPI_FUNCTION_ENTRY(); | 464 | ACPI_FUNCTION_NAME(ex_release_all_mutexes); |
466 | 465 | ||
467 | /* Traverse the list of owned mutexes, releasing each one */ | 466 | /* Traverse the list of owned mutexes, releasing each one */ |
468 | 467 | ||
@@ -474,6 +473,10 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) | |||
474 | obj_desc->mutex.next = NULL; | 473 | obj_desc->mutex.next = NULL; |
475 | obj_desc->mutex.acquisition_depth = 0; | 474 | obj_desc->mutex.acquisition_depth = 0; |
476 | 475 | ||
476 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
477 | "Force-releasing held mutex: %p\n", | ||
478 | obj_desc)); | ||
479 | |||
477 | /* Release the mutex, special case for Global Lock */ | 480 | /* Release the mutex, special case for Global Lock */ |
478 | 481 | ||
479 | if (obj_desc == acpi_gbl_global_lock_mutex) { | 482 | if (obj_desc == acpi_gbl_global_lock_mutex) { |
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index fcc75fa27d32..2ff578a16adc 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exnames - interpreter/scanner name load/execute | 3 | * Module Name: exnames - interpreter/scanner name load/execute |
@@ -53,8 +52,7 @@ ACPI_MODULE_NAME("exnames") | |||
53 | /* Local prototypes */ | 52 | /* Local prototypes */ |
54 | static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs); | 53 | static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs); |
55 | 54 | ||
56 | static acpi_status | 55 | static acpi_status acpi_ex_name_segment(u8 **in_aml_address, char *name_string); |
57 | acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); | ||
58 | 56 | ||
59 | /******************************************************************************* | 57 | /******************************************************************************* |
60 | * | 58 | * |
@@ -64,7 +62,7 @@ acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); | |||
64 | * (-1)==root, 0==none | 62 | * (-1)==root, 0==none |
65 | * num_name_segs - count of 4-character name segments | 63 | * num_name_segs - count of 4-character name segments |
66 | * | 64 | * |
67 | * RETURN: A pointer to the allocated string segment. This segment must | 65 | * RETURN: A pointer to the allocated string segment. This segment must |
68 | * be deleted by the caller. | 66 | * be deleted by the caller. |
69 | * | 67 | * |
70 | * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name | 68 | * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name |
@@ -178,7 +176,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string) | |||
178 | 176 | ||
179 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); | 177 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); |
180 | 178 | ||
181 | for (index = 0; (index < ACPI_NAME_SIZE) | 179 | for (index = 0; |
180 | (index < ACPI_NAME_SIZE) | ||
182 | && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { | 181 | && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { |
183 | char_buf[index] = *aml_address++; | 182 | char_buf[index] = *aml_address++; |
184 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); | 183 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); |
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 9ba8c73cea16..bbf01e9bf057 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg1 - AML execution - opcodes with 1 argument | 3 | * Module Name: exoparg1 - AML execution - opcodes with 1 argument |
@@ -606,7 +605,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
606 | } | 605 | } |
607 | 606 | ||
608 | /* | 607 | /* |
609 | * Set result to ONES (TRUE) if Value == 0. Note: | 608 | * Set result to ONES (TRUE) if Value == 0. Note: |
610 | * return_desc->Integer.Value is initially == 0 (FALSE) from above. | 609 | * return_desc->Integer.Value is initially == 0 (FALSE) from above. |
611 | */ | 610 | */ |
612 | if (!operand[0]->integer.value) { | 611 | if (!operand[0]->integer.value) { |
@@ -618,7 +617,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
618 | case AML_INCREMENT_OP: /* Increment (Operand) */ | 617 | case AML_INCREMENT_OP: /* Increment (Operand) */ |
619 | 618 | ||
620 | /* | 619 | /* |
621 | * Create a new integer. Can't just get the base integer and | 620 | * Create a new integer. Can't just get the base integer and |
622 | * increment it because it may be an Arg or Field. | 621 | * increment it because it may be an Arg or Field. |
623 | */ | 622 | */ |
624 | return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); | 623 | return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); |
@@ -686,7 +685,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
686 | 685 | ||
687 | /* | 686 | /* |
688 | * Note: The operand is not resolved at this point because we want to | 687 | * Note: The operand is not resolved at this point because we want to |
689 | * get the associated object, not its value. For example, we don't | 688 | * get the associated object, not its value. For example, we don't |
690 | * want to resolve a field_unit to its value, we want the actual | 689 | * want to resolve a field_unit to its value, we want the actual |
691 | * field_unit object. | 690 | * field_unit object. |
692 | */ | 691 | */ |
@@ -727,7 +726,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
727 | 726 | ||
728 | /* | 727 | /* |
729 | * The type of the base object must be integer, buffer, string, or | 728 | * The type of the base object must be integer, buffer, string, or |
730 | * package. All others are not supported. | 729 | * package. All others are not supported. |
731 | * | 730 | * |
732 | * NOTE: Integer is not specifically supported by the ACPI spec, | 731 | * NOTE: Integer is not specifically supported by the ACPI spec, |
733 | * but is supported implicitly via implicit operand conversion. | 732 | * but is supported implicitly via implicit operand conversion. |
@@ -965,7 +964,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
965 | case ACPI_TYPE_PACKAGE: | 964 | case ACPI_TYPE_PACKAGE: |
966 | 965 | ||
967 | /* | 966 | /* |
968 | * Return the referenced element of the package. We must | 967 | * Return the referenced element of the package. We must |
969 | * add another reference to the referenced object, however. | 968 | * add another reference to the referenced object, however. |
970 | */ | 969 | */ |
971 | return_desc = | 970 | return_desc = |
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 879e8a277b94..ee5634a074c4 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c | |||
@@ -123,7 +123,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state) | |||
123 | /* | 123 | /* |
124 | * Dispatch the notify to the appropriate handler | 124 | * Dispatch the notify to the appropriate handler |
125 | * NOTE: the request is queued for execution after this method | 125 | * NOTE: the request is queued for execution after this method |
126 | * completes. The notify handlers are NOT invoked synchronously | 126 | * completes. The notify handlers are NOT invoked synchronously |
127 | * from this thread -- because handlers may in turn run other | 127 | * from this thread -- because handlers may in turn run other |
128 | * control methods. | 128 | * control methods. |
129 | */ | 129 | */ |
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 71fcc65c9ffa..2c89b4651f08 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg3 - AML execution - opcodes with 3 arguments | 3 | * Module Name: exoparg3 - AML execution - opcodes with 3 arguments |
@@ -158,7 +157,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) | |||
158 | case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */ | 157 | case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */ |
159 | 158 | ||
160 | /* | 159 | /* |
161 | * Create the return object. The Source operand is guaranteed to be | 160 | * Create the return object. The Source operand is guaranteed to be |
162 | * either a String or a Buffer, so just use its type. | 161 | * either a String or a Buffer, so just use its type. |
163 | */ | 162 | */ |
164 | return_desc = acpi_ut_create_internal_object((operand[0])-> | 163 | return_desc = acpi_ut_create_internal_object((operand[0])-> |
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 0786b8659061..3e08695c3b30 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg6 - AML execution - opcodes with 6 arguments | 3 | * Module Name: exoparg6 - AML execution - opcodes with 6 arguments |
@@ -198,7 +197,7 @@ acpi_ex_do_match(u32 match_op, | |||
198 | return (FALSE); | 197 | return (FALSE); |
199 | } | 198 | } |
200 | 199 | ||
201 | return logical_result; | 200 | return (logical_result); |
202 | } | 201 | } |
203 | 202 | ||
204 | /******************************************************************************* | 203 | /******************************************************************************* |
@@ -269,7 +268,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state) | |||
269 | * and the next should be examined. | 268 | * and the next should be examined. |
270 | * | 269 | * |
271 | * Upon finding a match, the loop will terminate via "break" at | 270 | * Upon finding a match, the loop will terminate via "break" at |
272 | * the bottom. If it terminates "normally", match_value will be | 271 | * the bottom. If it terminates "normally", match_value will be |
273 | * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no | 272 | * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no |
274 | * match was found. | 273 | * match was found. |
275 | */ | 274 | */ |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 81eca60d2748..ba9db4de7c89 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities | 3 | * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities |
@@ -78,8 +77,8 @@ acpi_ex_generate_access(u32 field_bit_offset, | |||
78 | * any_acc keyword. | 77 | * any_acc keyword. |
79 | * | 78 | * |
80 | * NOTE: Need to have the region_length in order to check for boundary | 79 | * NOTE: Need to have the region_length in order to check for boundary |
81 | * conditions (end-of-region). However, the region_length is a deferred | 80 | * conditions (end-of-region). However, the region_length is a deferred |
82 | * operation. Therefore, to complete this implementation, the generation | 81 | * operation. Therefore, to complete this implementation, the generation |
83 | * of this access width must be deferred until the region length has | 82 | * of this access width must be deferred until the region length has |
84 | * been evaluated. | 83 | * been evaluated. |
85 | * | 84 | * |
@@ -308,7 +307,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc, | |||
308 | * RETURN: Status | 307 | * RETURN: Status |
309 | * | 308 | * |
310 | * DESCRIPTION: Initialize the areas of the field object that are common | 309 | * DESCRIPTION: Initialize the areas of the field object that are common |
311 | * to the various types of fields. Note: This is very "sensitive" | 310 | * to the various types of fields. Note: This is very "sensitive" |
312 | * code because we are solving the general case for field | 311 | * code because we are solving the general case for field |
313 | * alignment. | 312 | * alignment. |
314 | * | 313 | * |
@@ -336,13 +335,13 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, | |||
336 | obj_desc->common_field.bit_length = field_bit_length; | 335 | obj_desc->common_field.bit_length = field_bit_length; |
337 | 336 | ||
338 | /* | 337 | /* |
339 | * Decode the access type so we can compute offsets. The access type gives | 338 | * Decode the access type so we can compute offsets. The access type gives |
340 | * two pieces of information - the width of each field access and the | 339 | * two pieces of information - the width of each field access and the |
341 | * necessary byte_alignment (address granularity) of the access. | 340 | * necessary byte_alignment (address granularity) of the access. |
342 | * | 341 | * |
343 | * For any_acc, the access_bit_width is the largest width that is both | 342 | * For any_acc, the access_bit_width is the largest width that is both |
344 | * necessary and possible in an attempt to access the whole field in one | 343 | * necessary and possible in an attempt to access the whole field in one |
345 | * I/O operation. However, for any_acc, the byte_alignment is always one | 344 | * I/O operation. However, for any_acc, the byte_alignment is always one |
346 | * byte. | 345 | * byte. |
347 | * | 346 | * |
348 | * For all Buffer Fields, the byte_alignment is always one byte. | 347 | * For all Buffer Fields, the byte_alignment is always one byte. |
@@ -363,7 +362,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, | |||
363 | 362 | ||
364 | /* | 363 | /* |
365 | * base_byte_offset is the address of the start of the field within the | 364 | * base_byte_offset is the address of the start of the field within the |
366 | * region. It is the byte address of the first *datum* (field-width data | 365 | * region. It is the byte address of the first *datum* (field-width data |
367 | * unit) of the field. (i.e., the first datum that contains at least the | 366 | * unit) of the field. (i.e., the first datum that contains at least the |
368 | * first *bit* of the field.) | 367 | * first *bit* of the field.) |
369 | * | 368 | * |
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 1f1ce0c3d2f8..1db2c0bfde0b 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exregion - ACPI default op_region (address space) handlers | 3 | * Module Name: exregion - ACPI default op_region (address space) handlers |
@@ -202,7 +201,7 @@ acpi_ex_system_memory_space_handler(u32 function, | |||
202 | * Perform the memory read or write | 201 | * Perform the memory read or write |
203 | * | 202 | * |
204 | * Note: For machines that do not support non-aligned transfers, the target | 203 | * Note: For machines that do not support non-aligned transfers, the target |
205 | * address was checked for alignment above. We do not attempt to break the | 204 | * address was checked for alignment above. We do not attempt to break the |
206 | * transfer up into smaller (byte-size) chunks because the AML specifically | 205 | * transfer up into smaller (byte-size) chunks because the AML specifically |
207 | * asked for a transfer width that the hardware may require. | 206 | * asked for a transfer width that the hardware may require. |
208 | */ | 207 | */ |
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index fa50e77e64a8..6239956786eb 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresnte - AML Interpreter object resolution | 3 | * Module Name: exresnte - AML Interpreter object resolution |
@@ -58,8 +57,8 @@ ACPI_MODULE_NAME("exresnte") | |||
58 | * PARAMETERS: object_ptr - Pointer to a location that contains | 57 | * PARAMETERS: object_ptr - Pointer to a location that contains |
59 | * a pointer to a NS node, and will receive a | 58 | * a pointer to a NS node, and will receive a |
60 | * pointer to the resolved object. | 59 | * pointer to the resolved object. |
61 | * walk_state - Current state. Valid only if executing AML | 60 | * walk_state - Current state. Valid only if executing AML |
62 | * code. NULL if simply resolving an object | 61 | * code. NULL if simply resolving an object |
63 | * | 62 | * |
64 | * RETURN: Status | 63 | * RETURN: Status |
65 | * | 64 | * |
@@ -67,7 +66,7 @@ ACPI_MODULE_NAME("exresnte") | |||
67 | * | 66 | * |
68 | * Note: for some of the data types, the pointer attached to the Node | 67 | * Note: for some of the data types, the pointer attached to the Node |
69 | * can be either a pointer to an actual internal object or a pointer into the | 68 | * can be either a pointer to an actual internal object or a pointer into the |
70 | * AML stream itself. These types are currently: | 69 | * AML stream itself. These types are currently: |
71 | * | 70 | * |
72 | * ACPI_TYPE_INTEGER | 71 | * ACPI_TYPE_INTEGER |
73 | * ACPI_TYPE_STRING | 72 | * ACPI_TYPE_STRING |
@@ -89,7 +88,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, | |||
89 | ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); | 88 | ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); |
90 | 89 | ||
91 | /* | 90 | /* |
92 | * The stack pointer points to a struct acpi_namespace_node (Node). Get the | 91 | * The stack pointer points to a struct acpi_namespace_node (Node). Get the |
93 | * object that is attached to the Node. | 92 | * object that is attached to the Node. |
94 | */ | 93 | */ |
95 | node = *object_ptr; | 94 | node = *object_ptr; |
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index bbf40ac27585..cc176b245e22 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresolv - AML Interpreter object resolution | 3 | * Module Name: exresolv - AML Interpreter object resolution |
@@ -327,7 +326,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, | |||
327 | * | 326 | * |
328 | * RETURN: Status | 327 | * RETURN: Status |
329 | * | 328 | * |
330 | * DESCRIPTION: Return the base object and type. Traverse a reference list if | 329 | * DESCRIPTION: Return the base object and type. Traverse a reference list if |
331 | * necessary to get to the base object. | 330 | * necessary to get to the base object. |
332 | * | 331 | * |
333 | ******************************************************************************/ | 332 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index f232fbabdea8..b9ebff2f6a09 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresop - AML Interpreter operand/object resolution | 3 | * Module Name: exresop - AML Interpreter operand/object resolution |
@@ -87,7 +86,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed, | |||
87 | if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { | 86 | if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { |
88 | /* | 87 | /* |
89 | * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference | 88 | * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference |
90 | * objects and thus allow them to be targets. (As per the ACPI | 89 | * objects and thus allow them to be targets. (As per the ACPI |
91 | * specification, a store to a constant is a noop.) | 90 | * specification, a store to a constant is a noop.) |
92 | */ | 91 | */ |
93 | if ((this_type == ACPI_TYPE_INTEGER) && | 92 | if ((this_type == ACPI_TYPE_INTEGER) && |
@@ -337,7 +336,8 @@ acpi_ex_resolve_operands(u16 opcode, | |||
337 | if ((opcode == AML_STORE_OP) && | 336 | if ((opcode == AML_STORE_OP) && |
338 | ((*stack_ptr)->common.type == | 337 | ((*stack_ptr)->common.type == |
339 | ACPI_TYPE_LOCAL_REFERENCE) | 338 | ACPI_TYPE_LOCAL_REFERENCE) |
340 | && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) { | 339 | && ((*stack_ptr)->reference.class == |
340 | ACPI_REFCLASS_INDEX)) { | ||
341 | goto next_operand; | 341 | goto next_operand; |
342 | } | 342 | } |
343 | break; | 343 | break; |
@@ -638,7 +638,7 @@ acpi_ex_resolve_operands(u16 opcode, | |||
638 | if (acpi_gbl_enable_interpreter_slack) { | 638 | if (acpi_gbl_enable_interpreter_slack) { |
639 | /* | 639 | /* |
640 | * Enable original behavior of Store(), allowing any and all | 640 | * Enable original behavior of Store(), allowing any and all |
641 | * objects as the source operand. The ACPI spec does not | 641 | * objects as the source operand. The ACPI spec does not |
642 | * allow this, however. | 642 | * allow this, however. |
643 | */ | 643 | */ |
644 | break; | 644 | break; |
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 5fffe7ab5ece..90431f12f831 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c | |||
@@ -374,7 +374,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc, | |||
374 | * with the input value. | 374 | * with the input value. |
375 | * | 375 | * |
376 | * When storing into an object the data is converted to the | 376 | * When storing into an object the data is converted to the |
377 | * target object type then stored in the object. This means | 377 | * target object type then stored in the object. This means |
378 | * that the target object type (for an initialized target) will | 378 | * that the target object type (for an initialized target) will |
379 | * not be changed by a store operation. | 379 | * not be changed by a store operation. |
380 | * | 380 | * |
@@ -491,7 +491,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, | |||
491 | acpi_ut_get_object_type_name(source_desc), | 491 | acpi_ut_get_object_type_name(source_desc), |
492 | source_desc, node)); | 492 | source_desc, node)); |
493 | 493 | ||
494 | /* No conversions for all other types. Just attach the source object */ | 494 | /* No conversions for all other types. Just attach the source object */ |
495 | 495 | ||
496 | status = acpi_ns_attach_object(node, source_desc, | 496 | status = acpi_ns_attach_object(node, source_desc, |
497 | source_desc->common.type); | 497 | source_desc->common.type); |
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index b35bed52e061..87153bbc4b43 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exstoren - AML Interpreter object store support, | 3 | * Module Name: exstoren - AML Interpreter object store support, |
@@ -61,7 +60,7 @@ ACPI_MODULE_NAME("exstoren") | |||
61 | * | 60 | * |
62 | * RETURN: Status, resolved object in source_desc_ptr. | 61 | * RETURN: Status, resolved object in source_desc_ptr. |
63 | * | 62 | * |
64 | * DESCRIPTION: Resolve an object. If the object is a reference, dereference | 63 | * DESCRIPTION: Resolve an object. If the object is a reference, dereference |
65 | * it and return the actual object in the source_desc_ptr. | 64 | * it and return the actual object in the source_desc_ptr. |
66 | * | 65 | * |
67 | ******************************************************************************/ | 66 | ******************************************************************************/ |
@@ -93,7 +92,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
93 | 92 | ||
94 | /* | 93 | /* |
95 | * Stores into a Field/Region or into a Integer/Buffer/String | 94 | * Stores into a Field/Region or into a Integer/Buffer/String |
96 | * are all essentially the same. This case handles the | 95 | * are all essentially the same. This case handles the |
97 | * "interchangeable" types Integer, String, and Buffer. | 96 | * "interchangeable" types Integer, String, and Buffer. |
98 | */ | 97 | */ |
99 | if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { | 98 | if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { |
@@ -167,7 +166,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
167 | * | 166 | * |
168 | * RETURN: Status | 167 | * RETURN: Status |
169 | * | 168 | * |
170 | * DESCRIPTION: "Store" an object to another object. This may include | 169 | * DESCRIPTION: "Store" an object to another object. This may include |
171 | * converting the source type to the target type (implicit | 170 | * converting the source type to the target type (implicit |
172 | * conversion), and a copy of the value of the source to | 171 | * conversion), and a copy of the value of the source to |
173 | * the target. | 172 | * the target. |
@@ -178,14 +177,14 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
178 | * with the input value. | 177 | * with the input value. |
179 | * | 178 | * |
180 | * When storing into an object the data is converted to the | 179 | * When storing into an object the data is converted to the |
181 | * target object type then stored in the object. This means | 180 | * target object type then stored in the object. This means |
182 | * that the target object type (for an initialized target) will | 181 | * that the target object type (for an initialized target) will |
183 | * not be changed by a store operation. | 182 | * not be changed by a store operation. |
184 | * | 183 | * |
185 | * This module allows destination types of Number, String, | 184 | * This module allows destination types of Number, String, |
186 | * Buffer, and Package. | 185 | * Buffer, and Package. |
187 | * | 186 | * |
188 | * Assumes parameters are already validated. NOTE: source_desc | 187 | * Assumes parameters are already validated. NOTE: source_desc |
189 | * resolution (from a reference object) must be performed by | 188 | * resolution (from a reference object) must be performed by |
190 | * the caller if necessary. | 189 | * the caller if necessary. |
191 | * | 190 | * |
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 53c248473547..b5f339cb1305 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exstorob - AML Interpreter object store support, store to object | 3 | * Module Name: exstorob - AML Interpreter object store support, store to object |
@@ -108,7 +107,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, | |||
108 | #ifdef ACPI_OBSOLETE_BEHAVIOR | 107 | #ifdef ACPI_OBSOLETE_BEHAVIOR |
109 | /* | 108 | /* |
110 | * NOTE: ACPI versions up to 3.0 specified that the buffer must be | 109 | * NOTE: ACPI versions up to 3.0 specified that the buffer must be |
111 | * truncated if the string is smaller than the buffer. However, "other" | 110 | * truncated if the string is smaller than the buffer. However, "other" |
112 | * implementations of ACPI never did this and thus became the defacto | 111 | * implementations of ACPI never did this and thus became the defacto |
113 | * standard. ACPI 3.0A changes this behavior such that the buffer | 112 | * standard. ACPI 3.0A changes this behavior such that the buffer |
114 | * is no longer truncated. | 113 | * is no longer truncated. |
@@ -117,7 +116,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, | |||
117 | /* | 116 | /* |
118 | * OBSOLETE BEHAVIOR: | 117 | * OBSOLETE BEHAVIOR: |
119 | * If the original source was a string, we must truncate the buffer, | 118 | * If the original source was a string, we must truncate the buffer, |
120 | * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer | 119 | * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer |
121 | * copy must not truncate the original buffer. | 120 | * copy must not truncate the original buffer. |
122 | */ | 121 | */ |
123 | if (original_src_type == ACPI_TYPE_STRING) { | 122 | if (original_src_type == ACPI_TYPE_STRING) { |
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index b760641e2fc6..c8a0ad5c1f55 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exsystem - Interface to OS services | 3 | * Module Name: exsystem - Interface to OS services |
@@ -59,7 +58,7 @@ ACPI_MODULE_NAME("exsystem") | |||
59 | * RETURN: Status | 58 | * RETURN: Status |
60 | * | 59 | * |
61 | * DESCRIPTION: Implements a semaphore wait with a check to see if the | 60 | * DESCRIPTION: Implements a semaphore wait with a check to see if the |
62 | * semaphore is available immediately. If it is not, the | 61 | * semaphore is available immediately. If it is not, the |
63 | * interpreter is released before waiting. | 62 | * interpreter is released before waiting. |
64 | * | 63 | * |
65 | ******************************************************************************/ | 64 | ******************************************************************************/ |
@@ -104,7 +103,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) | |||
104 | * RETURN: Status | 103 | * RETURN: Status |
105 | * | 104 | * |
106 | * DESCRIPTION: Implements a mutex wait with a check to see if the | 105 | * DESCRIPTION: Implements a mutex wait with a check to see if the |
107 | * mutex is available immediately. If it is not, the | 106 | * mutex is available immediately. If it is not, the |
108 | * interpreter is released before waiting. | 107 | * interpreter is released before waiting. |
109 | * | 108 | * |
110 | ******************************************************************************/ | 109 | ******************************************************************************/ |
@@ -152,7 +151,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) | |||
152 | * DESCRIPTION: Suspend running thread for specified amount of time. | 151 | * DESCRIPTION: Suspend running thread for specified amount of time. |
153 | * Note: ACPI specification requires that Stall() does not | 152 | * Note: ACPI specification requires that Stall() does not |
154 | * relinquish the processor, and delays longer than 100 usec | 153 | * relinquish the processor, and delays longer than 100 usec |
155 | * should use Sleep() instead. We allow stalls up to 255 usec | 154 | * should use Sleep() instead. We allow stalls up to 255 usec |
156 | * for compatibility with other interpreters and existing BIOSs. | 155 | * for compatibility with other interpreters and existing BIOSs. |
157 | * | 156 | * |
158 | ******************************************************************************/ | 157 | ******************************************************************************/ |
@@ -254,7 +253,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc) | |||
254 | * RETURN: Status | 253 | * RETURN: Status |
255 | * | 254 | * |
256 | * DESCRIPTION: Provides an access point to perform synchronization operations | 255 | * DESCRIPTION: Provides an access point to perform synchronization operations |
257 | * within the AML. This operation is a request to wait for an | 256 | * within the AML. This operation is a request to wait for an |
258 | * event. | 257 | * event. |
259 | * | 258 | * |
260 | ******************************************************************************/ | 259 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index d1ab7917eed7..264d22d8018c 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exutils - interpreter/scanner utilities | 3 | * Module Name: exutils - interpreter/scanner utilities |
@@ -45,12 +44,12 @@ | |||
45 | /* | 44 | /* |
46 | * DEFINE_AML_GLOBALS is tested in amlcode.h | 45 | * DEFINE_AML_GLOBALS is tested in amlcode.h |
47 | * to determine whether certain global names should be "defined" or only | 46 | * to determine whether certain global names should be "defined" or only |
48 | * "declared" in the current compilation. This enhances maintainability | 47 | * "declared" in the current compilation. This enhances maintainability |
49 | * by enabling a single header file to embody all knowledge of the names | 48 | * by enabling a single header file to embody all knowledge of the names |
50 | * in question. | 49 | * in question. |
51 | * | 50 | * |
52 | * Exactly one module of any executable should #define DEFINE_GLOBALS | 51 | * Exactly one module of any executable should #define DEFINE_GLOBALS |
53 | * before #including the header files which use this convention. The | 52 | * before #including the header files which use this convention. The |
54 | * names in question will be defined and initialized in that module, | 53 | * names in question will be defined and initialized in that module, |
55 | * and declared as extern in all other modules which #include those | 54 | * and declared as extern in all other modules which #include those |
56 | * header files. | 55 | * header files. |
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index a1e71d0ef57b..90a9aea1cee9 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface | 3 | * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface |
@@ -136,7 +135,7 @@ acpi_status acpi_hw_set_mode(u32 mode) | |||
136 | * | 135 | * |
137 | * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY | 136 | * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY |
138 | * | 137 | * |
139 | * DESCRIPTION: Return current operating state of system. Determined by | 138 | * DESCRIPTION: Return current operating state of system. Determined by |
140 | * querying the SCI_EN bit. | 139 | * querying the SCI_EN bit. |
141 | * | 140 | * |
142 | ******************************************************************************/ | 141 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index db4076580e2b..64560045052d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwgpe - Low level GPE enable/disable/clear functions | 3 | * Module Name: hwgpe - Low level GPE enable/disable/clear functions |
@@ -339,7 +338,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
339 | 338 | ||
340 | acpi_status | 339 | acpi_status |
341 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 340 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
342 | struct acpi_gpe_block_info *gpe_block, void *context) | 341 | struct acpi_gpe_block_info * gpe_block, |
342 | void *context) | ||
343 | { | 343 | { |
344 | u32 i; | 344 | u32 i; |
345 | acpi_status status; | 345 | acpi_status status; |
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 1455ddcdc32c..65bc3453a29c 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c | |||
@@ -259,7 +259,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, | |||
259 | status = acpi_hw_get_pci_device_info(pci_id, info->device, | 259 | status = acpi_hw_get_pci_device_info(pci_id, info->device, |
260 | &bus_number, &is_bridge); | 260 | &bus_number, &is_bridge); |
261 | if (ACPI_FAILURE(status)) { | 261 | if (ACPI_FAILURE(status)) { |
262 | return_ACPI_STATUS(status); | 262 | return (status); |
263 | } | 263 | } |
264 | 264 | ||
265 | info = info->next; | 265 | info = info->next; |
@@ -271,7 +271,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, | |||
271 | pci_id->segment, pci_id->bus, pci_id->device, | 271 | pci_id->segment, pci_id->bus, pci_id->device, |
272 | pci_id->function, status, bus_number, is_bridge)); | 272 | pci_id->function, status, bus_number, is_bridge)); |
273 | 273 | ||
274 | return_ACPI_STATUS(AE_OK); | 274 | return (AE_OK); |
275 | } | 275 | } |
276 | 276 | ||
277 | /******************************************************************************* | 277 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 4af6d20ef077..f4e57503576b 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /******************************************************************************* | 1 | /******************************************************************************* |
3 | * | 2 | * |
4 | * Module Name: hwregs - Read/write access functions for the various ACPI | 3 | * Module Name: hwregs - Read/write access functions for the various ACPI |
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b6411f16832f..bfdce22f3798 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: hwtimer.c - ACPI Power Management Timer Interface | 3 | * Name: hwtimer.c - ACPI Power Management Timer Interface |
@@ -101,8 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks) | |||
101 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 100 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
102 | } | 101 | } |
103 | 102 | ||
104 | status = | 103 | status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); |
105 | acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); | ||
106 | 104 | ||
107 | return_ACPI_STATUS(status); | 105 | return_ACPI_STATUS(status); |
108 | } | 106 | } |
@@ -129,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer) | |||
129 | * a versatile and accurate timer. | 127 | * a versatile and accurate timer. |
130 | * | 128 | * |
131 | * Note that this function accommodates only a single timer | 129 | * Note that this function accommodates only a single timer |
132 | * rollover. Thus for 24-bit timers, this function should only | 130 | * rollover. Thus for 24-bit timers, this function should only |
133 | * be used for calculating durations less than ~4.6 seconds | 131 | * be used for calculating durations less than ~4.6 seconds |
134 | * (~20 minutes for 32-bit timers) -- calculations below: | 132 | * (~20 minutes for 32-bit timers) -- calculations below: |
135 | * | 133 | * |
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index c99d546b217f..b6aae58299dc 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwvalid - I/O request validation | 3 | * Module Name: hwvalid - I/O request validation |
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 7bfd649d1996..05a154c3c9ac 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwxface - Public ACPICA hardware interfaces | 3 | * Module Name: hwxface - Public ACPICA hardware interfaces |
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 0ff1ecea5c3a..ae443fe2ebf6 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c | |||
@@ -49,8 +49,7 @@ | |||
49 | ACPI_MODULE_NAME("hwxfsleep") | 49 | ACPI_MODULE_NAME("hwxfsleep") |
50 | 50 | ||
51 | /* Local prototypes */ | 51 | /* Local prototypes */ |
52 | static acpi_status | 52 | static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); |
53 | acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); | ||
54 | 53 | ||
55 | /* | 54 | /* |
56 | * Dispatch table used to efficiently branch to the various sleep | 55 | * Dispatch table used to efficiently branch to the various sleep |
@@ -234,8 +233,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) | |||
234 | * function. | 233 | * function. |
235 | * | 234 | * |
236 | ******************************************************************************/ | 235 | ******************************************************************************/ |
237 | static acpi_status | 236 | static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) |
238 | acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) | ||
239 | { | 237 | { |
240 | acpi_status status; | 238 | acpi_status status; |
241 | struct acpi_sleep_functions *sleep_functions = | 239 | struct acpi_sleep_functions *sleep_functions = |
@@ -369,8 +367,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) | |||
369 | return_ACPI_STATUS(AE_AML_OPERAND_VALUE); | 367 | return_ACPI_STATUS(AE_AML_OPERAND_VALUE); |
370 | } | 368 | } |
371 | 369 | ||
372 | status = | 370 | status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); |
373 | acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); | ||
374 | return_ACPI_STATUS(status); | 371 | return_ACPI_STATUS(status); |
375 | } | 372 | } |
376 | 373 | ||
@@ -396,8 +393,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) | |||
396 | ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); | 393 | ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); |
397 | 394 | ||
398 | status = | 395 | status = |
399 | acpi_hw_sleep_dispatch(sleep_state, | 396 | acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); |
400 | ACPI_WAKE_PREP_FUNCTION_ID); | ||
401 | return_ACPI_STATUS(status); | 397 | return_ACPI_STATUS(status); |
402 | } | 398 | } |
403 | 399 | ||
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 23db53ce2293..d70eaf39dfdf 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c | |||
@@ -110,11 +110,11 @@ acpi_status acpi_ns_root_initialize(void) | |||
110 | status = acpi_ns_lookup(NULL, init_val->name, init_val->type, | 110 | status = acpi_ns_lookup(NULL, init_val->name, init_val->type, |
111 | ACPI_IMODE_LOAD_PASS2, | 111 | ACPI_IMODE_LOAD_PASS2, |
112 | ACPI_NS_NO_UPSEARCH, NULL, &new_node); | 112 | ACPI_NS_NO_UPSEARCH, NULL, &new_node); |
113 | 113 | if (ACPI_FAILURE(status)) { | |
114 | if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */ | ||
115 | ACPI_EXCEPTION((AE_INFO, status, | 114 | ACPI_EXCEPTION((AE_INFO, status, |
116 | "Could not create predefined name %s", | 115 | "Could not create predefined name %s", |
117 | init_val->name)); | 116 | init_val->name)); |
117 | continue; | ||
118 | } | 118 | } |
119 | 119 | ||
120 | /* | 120 | /* |
@@ -179,8 +179,7 @@ acpi_status acpi_ns_root_initialize(void) | |||
179 | 179 | ||
180 | /* Build an object around the static string */ | 180 | /* Build an object around the static string */ |
181 | 181 | ||
182 | obj_desc->string.length = | 182 | obj_desc->string.length = (u32)ACPI_STRLEN(val); |
183 | (u32) ACPI_STRLEN(val); | ||
184 | obj_desc->string.pointer = val; | 183 | obj_desc->string.pointer = val; |
185 | obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; | 184 | obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; |
186 | break; | 185 | break; |
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index ac389e5bb594..15143c44f5e5 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c | |||
@@ -332,7 +332,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) | |||
332 | * | 332 | * |
333 | * RETURN: None. | 333 | * RETURN: None. |
334 | * | 334 | * |
335 | * DESCRIPTION: Delete a subtree of the namespace. This includes all objects | 335 | * DESCRIPTION: Delete a subtree of the namespace. This includes all objects |
336 | * stored within the subtree. | 336 | * stored within the subtree. |
337 | * | 337 | * |
338 | ******************************************************************************/ | 338 | ******************************************************************************/ |
@@ -418,7 +418,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) | |||
418 | * RETURN: Status | 418 | * RETURN: Status |
419 | * | 419 | * |
420 | * DESCRIPTION: Delete entries within the namespace that are owned by a | 420 | * DESCRIPTION: Delete entries within the namespace that are owned by a |
421 | * specific ID. Used to delete entire ACPI tables. All | 421 | * specific ID. Used to delete entire ACPI tables. All |
422 | * reference counts are updated. | 422 | * reference counts are updated. |
423 | * | 423 | * |
424 | * MUTEX: Locks namespace during deletion walk. | 424 | * MUTEX: Locks namespace during deletion walk. |
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 2526aaf945ee..924b3c71473a 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c | |||
@@ -209,14 +209,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, | |||
209 | "Invalid ACPI Object Type 0x%08X", type)); | 209 | "Invalid ACPI Object Type 0x%08X", type)); |
210 | } | 210 | } |
211 | 211 | ||
212 | if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { | ||
213 | this_node->name.integer = | ||
214 | acpi_ut_repair_name(this_node->name.ascii); | ||
215 | |||
216 | ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", | ||
217 | this_node->name.integer)); | ||
218 | } | ||
219 | |||
220 | acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); | 212 | acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); |
221 | } | 213 | } |
222 | 214 | ||
@@ -700,7 +692,7 @@ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level) | |||
700 | * | 692 | * |
701 | * PARAMETERS: search_base - Root of subtree to be dumped, or | 693 | * PARAMETERS: search_base - Root of subtree to be dumped, or |
702 | * NS_ALL to dump the entire namespace | 694 | * NS_ALL to dump the entire namespace |
703 | * max_depth - Maximum depth of dump. Use INT_MAX | 695 | * max_depth - Maximum depth of dump. Use INT_MAX |
704 | * for an effectively unlimited depth. | 696 | * for an effectively unlimited depth. |
705 | * | 697 | * |
706 | * RETURN: None | 698 | * RETURN: None |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 95ffe8dfa1f1..4328e2adfeb9 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -96,8 +96,8 @@ acpi_status acpi_ns_initialize_objects(void) | |||
96 | /* Walk entire namespace from the supplied root */ | 96 | /* Walk entire namespace from the supplied root */ |
97 | 97 | ||
98 | status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, | 98 | status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, |
99 | ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL, | 99 | ACPI_UINT32_MAX, acpi_ns_init_one_object, |
100 | &info, NULL); | 100 | NULL, &info, NULL); |
101 | if (ACPI_FAILURE(status)) { | 101 | if (ACPI_FAILURE(status)) { |
102 | ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); | 102 | ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); |
103 | } | 103 | } |
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 76935ff29289..911f99127b99 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c | |||
@@ -80,8 +80,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) | |||
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Parse the table and load the namespace with all named | 82 | * Parse the table and load the namespace with all named |
83 | * objects found within. Control methods are NOT parsed | 83 | * objects found within. Control methods are NOT parsed |
84 | * at this time. In fact, the control methods cannot be | 84 | * at this time. In fact, the control methods cannot be |
85 | * parsed until the entire namespace is loaded, because | 85 | * parsed until the entire namespace is loaded, because |
86 | * if a control method makes a forward reference (call) | 86 | * if a control method makes a forward reference (call) |
87 | * to another control method, we can't continue parsing | 87 | * to another control method, we can't continue parsing |
@@ -122,7 +122,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) | |||
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Now we can parse the control methods. We always parse | 125 | * Now we can parse the control methods. We always parse |
126 | * them here for a sanity check, and if configured for | 126 | * them here for a sanity check, and if configured for |
127 | * just-in-time parsing, we delete the control method | 127 | * just-in-time parsing, we delete the control method |
128 | * parse trees. | 128 | * parse trees. |
@@ -166,7 +166,7 @@ acpi_status acpi_ns_load_namespace(void) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Load the namespace. The DSDT is required, | 169 | * Load the namespace. The DSDT is required, |
170 | * but the SSDT and PSDT tables are optional. | 170 | * but the SSDT and PSDT tables are optional. |
171 | */ | 171 | */ |
172 | status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); | 172 | status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); |
@@ -283,7 +283,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle) | |||
283 | * RETURN: Status | 283 | * RETURN: Status |
284 | * | 284 | * |
285 | * DESCRIPTION: Shrinks the namespace, typically in response to an undocking | 285 | * DESCRIPTION: Shrinks the namespace, typically in response to an undocking |
286 | * event. Deletes an entire subtree starting from (and | 286 | * event. Deletes an entire subtree starting from (and |
287 | * including) the given handle. | 287 | * including) the given handle. |
288 | * | 288 | * |
289 | ******************************************************************************/ | 289 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 96e0eb609bb4..55a175eadcc3 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c | |||
@@ -195,7 +195,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) | |||
195 | ACPI_ERROR((AE_INFO, | 195 | ACPI_ERROR((AE_INFO, |
196 | "Invalid Namespace Node (%p) while traversing namespace", | 196 | "Invalid Namespace Node (%p) while traversing namespace", |
197 | next_node)); | 197 | next_node)); |
198 | return 0; | 198 | return (0); |
199 | } | 199 | } |
200 | size += ACPI_PATH_SEGMENT_LENGTH; | 200 | size += ACPI_PATH_SEGMENT_LENGTH; |
201 | next_node = next_node->parent; | 201 | next_node = next_node->parent; |
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index d6c9a3cc6716..e69f7fa2579d 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c | |||
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("nsobject") | |||
61 | * RETURN: Status | 61 | * RETURN: Status |
62 | * | 62 | * |
63 | * DESCRIPTION: Record the given object as the value associated with the | 63 | * DESCRIPTION: Record the given object as the value associated with the |
64 | * name whose acpi_handle is passed. If Object is NULL | 64 | * name whose acpi_handle is passed. If Object is NULL |
65 | * and Type is ACPI_TYPE_ANY, set the name as having no value. | 65 | * and Type is ACPI_TYPE_ANY, set the name as having no value. |
66 | * Note: Future may require that the Node->Flags field be passed | 66 | * Note: Future may require that the Node->Flags field be passed |
67 | * as a parameter. | 67 | * as a parameter. |
@@ -133,7 +133,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node, | |||
133 | ((struct acpi_namespace_node *)object)->object) { | 133 | ((struct acpi_namespace_node *)object)->object) { |
134 | /* | 134 | /* |
135 | * Value passed is a name handle and that name has a | 135 | * Value passed is a name handle and that name has a |
136 | * non-null value. Use that name's value and type. | 136 | * non-null value. Use that name's value and type. |
137 | */ | 137 | */ |
138 | obj_desc = ((struct acpi_namespace_node *)object)->object; | 138 | obj_desc = ((struct acpi_namespace_node *)object)->object; |
139 | object_type = ((struct acpi_namespace_node *)object)->type; | 139 | object_type = ((struct acpi_namespace_node *)object)->type; |
@@ -321,7 +321,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union | |||
321 | * | 321 | * |
322 | * RETURN: Status | 322 | * RETURN: Status |
323 | * | 323 | * |
324 | * DESCRIPTION: Low-level attach data. Create and attach a Data object. | 324 | * DESCRIPTION: Low-level attach data. Create and attach a Data object. |
325 | * | 325 | * |
326 | ******************************************************************************/ | 326 | ******************************************************************************/ |
327 | 327 | ||
@@ -377,7 +377,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node, | |||
377 | * | 377 | * |
378 | * RETURN: Status | 378 | * RETURN: Status |
379 | * | 379 | * |
380 | * DESCRIPTION: Low-level detach data. Delete the data node, but the caller | 380 | * DESCRIPTION: Low-level detach data. Delete the data node, but the caller |
381 | * is responsible for the actual data. | 381 | * is responsible for the actual data. |
382 | * | 382 | * |
383 | ******************************************************************************/ | 383 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index ec7ba2d3463c..233f756d5cfa 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
@@ -168,11 +168,11 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
168 | /* | 168 | /* |
169 | * AML Parse, pass 1 | 169 | * AML Parse, pass 1 |
170 | * | 170 | * |
171 | * In this pass, we load most of the namespace. Control methods | 171 | * In this pass, we load most of the namespace. Control methods |
172 | * are not parsed until later. A parse tree is not created. Instead, | 172 | * are not parsed until later. A parse tree is not created. Instead, |
173 | * each Parser Op subtree is deleted when it is finished. This saves | 173 | * each Parser Op subtree is deleted when it is finished. This saves |
174 | * a great deal of memory, and allows a small cache of parse objects | 174 | * a great deal of memory, and allows a small cache of parse objects |
175 | * to service the entire parse. The second pass of the parse then | 175 | * to service the entire parse. The second pass of the parse then |
176 | * performs another complete parse of the AML. | 176 | * performs another complete parse of the AML. |
177 | */ | 177 | */ |
178 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); | 178 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); |
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index 456cc859f869..1d2d8ffc1bc5 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c | |||
@@ -314,22 +314,7 @@ acpi_ns_search_and_enter(u32 target_name, | |||
314 | * this problem, and we want to be able to enable ACPI support for them, | 314 | * this problem, and we want to be able to enable ACPI support for them, |
315 | * even though there are a few bad names. | 315 | * even though there are a few bad names. |
316 | */ | 316 | */ |
317 | if (!acpi_ut_valid_acpi_name(target_name)) { | 317 | acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); |
318 | target_name = | ||
319 | acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); | ||
320 | |||
321 | /* Report warning only if in strict mode or debug mode */ | ||
322 | |||
323 | if (!acpi_gbl_enable_interpreter_slack) { | ||
324 | ACPI_WARNING((AE_INFO, | ||
325 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
326 | ACPI_CAST_PTR(char, &target_name))); | ||
327 | } else { | ||
328 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
329 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
330 | ACPI_CAST_PTR(char, &target_name))); | ||
331 | } | ||
332 | } | ||
333 | 318 | ||
334 | /* Try to find the name in the namespace level specified by the caller */ | 319 | /* Try to find the name in the namespace level specified by the caller */ |
335 | 320 | ||
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index ef753a41e087..b5b4cb72a8a8 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -530,7 +530,7 @@ acpi_ns_externalize_name(u32 internal_name_length, | |||
530 | ((num_segments > 0) ? (num_segments - 1) : 0) + 1; | 530 | ((num_segments > 0) ? (num_segments - 1) : 0) + 1; |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * Check to see if we're still in bounds. If not, there's a problem | 533 | * Check to see if we're still in bounds. If not, there's a problem |
534 | * with internal_name (invalid format). | 534 | * with internal_name (invalid format). |
535 | */ | 535 | */ |
536 | if (required_length > internal_name_length) { | 536 | if (required_length > internal_name_length) { |
@@ -557,10 +557,14 @@ acpi_ns_externalize_name(u32 internal_name_length, | |||
557 | (*converted_name)[j++] = '.'; | 557 | (*converted_name)[j++] = '.'; |
558 | } | 558 | } |
559 | 559 | ||
560 | (*converted_name)[j++] = internal_name[names_index++]; | 560 | /* Copy and validate the 4-char name segment */ |
561 | (*converted_name)[j++] = internal_name[names_index++]; | 561 | |
562 | (*converted_name)[j++] = internal_name[names_index++]; | 562 | ACPI_MOVE_NAME(&(*converted_name)[j], |
563 | (*converted_name)[j++] = internal_name[names_index++]; | 563 | &internal_name[names_index]); |
564 | acpi_ut_repair_name(&(*converted_name)[j]); | ||
565 | |||
566 | j += ACPI_NAME_SIZE; | ||
567 | names_index += ACPI_NAME_SIZE; | ||
564 | } | 568 | } |
565 | } | 569 | } |
566 | 570 | ||
@@ -681,7 +685,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) | |||
681 | * \ (backslash) and ^ (carat) prefixes, and the | 685 | * \ (backslash) and ^ (carat) prefixes, and the |
682 | * . (period) to separate segments are supported. | 686 | * . (period) to separate segments are supported. |
683 | * prefix_node - Root of subtree to be searched, or NS_ALL for the | 687 | * prefix_node - Root of subtree to be searched, or NS_ALL for the |
684 | * root of the name space. If Name is fully | 688 | * root of the name space. If Name is fully |
685 | * qualified (first s8 is '\'), the passed value | 689 | * qualified (first s8 is '\'), the passed value |
686 | * of Scope will not be accessed. | 690 | * of Scope will not be accessed. |
687 | * flags - Used to indicate whether to perform upsearch or | 691 | * flags - Used to indicate whether to perform upsearch or |
@@ -689,7 +693,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) | |||
689 | * return_node - Where the Node is returned | 693 | * return_node - Where the Node is returned |
690 | * | 694 | * |
691 | * DESCRIPTION: Look up a name relative to a given scope and return the | 695 | * DESCRIPTION: Look up a name relative to a given scope and return the |
692 | * corresponding Node. NOTE: Scope can be null. | 696 | * corresponding Node. NOTE: Scope can be null. |
693 | * | 697 | * |
694 | * MUTEX: Locks namespace | 698 | * MUTEX: Locks namespace |
695 | * | 699 | * |
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 730bccc5e7f7..0483877f26b8 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c | |||
@@ -60,8 +60,8 @@ ACPI_MODULE_NAME("nswalk") | |||
60 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if | 60 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if |
61 | * none is found. | 61 | * none is found. |
62 | * | 62 | * |
63 | * DESCRIPTION: Return the next peer node within the namespace. If Handle | 63 | * DESCRIPTION: Return the next peer node within the namespace. If Handle |
64 | * is valid, Scope is ignored. Otherwise, the first node | 64 | * is valid, Scope is ignored. Otherwise, the first node |
65 | * within Scope is returned. | 65 | * within Scope is returned. |
66 | * | 66 | * |
67 | ******************************************************************************/ | 67 | ******************************************************************************/ |
@@ -97,8 +97,8 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node | |||
97 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if | 97 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if |
98 | * none is found. | 98 | * none is found. |
99 | * | 99 | * |
100 | * DESCRIPTION: Return the next peer node within the namespace. If Handle | 100 | * DESCRIPTION: Return the next peer node within the namespace. If Handle |
101 | * is valid, Scope is ignored. Otherwise, the first node | 101 | * is valid, Scope is ignored. Otherwise, the first node |
102 | * within Scope is returned. | 102 | * within Scope is returned. |
103 | * | 103 | * |
104 | ******************************************************************************/ | 104 | ******************************************************************************/ |
@@ -305,7 +305,7 @@ acpi_ns_walk_namespace(acpi_object_type type, | |||
305 | 305 | ||
306 | /* | 306 | /* |
307 | * Depth first search: Attempt to go down another level in the | 307 | * Depth first search: Attempt to go down another level in the |
308 | * namespace if we are allowed to. Don't go any further if we have | 308 | * namespace if we are allowed to. Don't go any further if we have |
309 | * reached the caller specified maximum depth or if the user | 309 | * reached the caller specified maximum depth or if the user |
310 | * function has specified that the maximum depth has been reached. | 310 | * function has specified that the maximum depth has been reached. |
311 | */ | 311 | */ |
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 9692e6702333..d6a9f77972b6 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
@@ -61,16 +61,16 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info); | |||
61 | * PARAMETERS: handle - Object handle (optional) | 61 | * PARAMETERS: handle - Object handle (optional) |
62 | * pathname - Object pathname (optional) | 62 | * pathname - Object pathname (optional) |
63 | * external_params - List of parameters to pass to method, | 63 | * external_params - List of parameters to pass to method, |
64 | * terminated by NULL. May be NULL | 64 | * terminated by NULL. May be NULL |
65 | * if no parameters are being passed. | 65 | * if no parameters are being passed. |
66 | * return_buffer - Where to put method's return value (if | 66 | * return_buffer - Where to put method's return value (if |
67 | * any). If NULL, no value is returned. | 67 | * any). If NULL, no value is returned. |
68 | * return_type - Expected type of return object | 68 | * return_type - Expected type of return object |
69 | * | 69 | * |
70 | * RETURN: Status | 70 | * RETURN: Status |
71 | * | 71 | * |
72 | * DESCRIPTION: Find and evaluate the given object, passing the given | 72 | * DESCRIPTION: Find and evaluate the given object, passing the given |
73 | * parameters if necessary. One of "Handle" or "Pathname" must | 73 | * parameters if necessary. One of "Handle" or "Pathname" must |
74 | * be valid (non-null) | 74 | * be valid (non-null) |
75 | * | 75 | * |
76 | ******************************************************************************/ | 76 | ******************************************************************************/ |
@@ -155,15 +155,15 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) | |||
155 | * PARAMETERS: handle - Object handle (optional) | 155 | * PARAMETERS: handle - Object handle (optional) |
156 | * pathname - Object pathname (optional) | 156 | * pathname - Object pathname (optional) |
157 | * external_params - List of parameters to pass to method, | 157 | * external_params - List of parameters to pass to method, |
158 | * terminated by NULL. May be NULL | 158 | * terminated by NULL. May be NULL |
159 | * if no parameters are being passed. | 159 | * if no parameters are being passed. |
160 | * return_buffer - Where to put method's return value (if | 160 | * return_buffer - Where to put method's return value (if |
161 | * any). If NULL, no value is returned. | 161 | * any). If NULL, no value is returned. |
162 | * | 162 | * |
163 | * RETURN: Status | 163 | * RETURN: Status |
164 | * | 164 | * |
165 | * DESCRIPTION: Find and evaluate the given object, passing the given | 165 | * DESCRIPTION: Find and evaluate the given object, passing the given |
166 | * parameters if necessary. One of "Handle" or "Pathname" must | 166 | * parameters if necessary. One of "Handle" or "Pathname" must |
167 | * be valid (non-null) | 167 | * be valid (non-null) |
168 | * | 168 | * |
169 | ******************************************************************************/ | 169 | ******************************************************************************/ |
@@ -542,15 +542,15 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, | |||
542 | acpi_status status; | 542 | acpi_status status; |
543 | struct acpi_namespace_node *node; | 543 | struct acpi_namespace_node *node; |
544 | u32 flags; | 544 | u32 flags; |
545 | struct acpica_device_id *hid; | 545 | struct acpi_pnp_device_id *hid; |
546 | struct acpica_device_id_list *cid; | 546 | struct acpi_pnp_device_id_list *cid; |
547 | u32 i; | 547 | u32 i; |
548 | u8 found; | 548 | u8 found; |
549 | int no_match; | 549 | int no_match; |
550 | 550 | ||
551 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 551 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
552 | if (ACPI_FAILURE(status)) { | 552 | if (ACPI_FAILURE(status)) { |
553 | return (status); | 553 | return_ACPI_STATUS(status); |
554 | } | 554 | } |
555 | 555 | ||
556 | node = acpi_ns_validate_handle(obj_handle); | 556 | node = acpi_ns_validate_handle(obj_handle); |
@@ -656,7 +656,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, | |||
656 | * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, | 656 | * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, |
657 | * starting (and ending) at the object specified by start_handle. | 657 | * starting (and ending) at the object specified by start_handle. |
658 | * The user_function is called whenever an object of type | 658 | * The user_function is called whenever an object of type |
659 | * Device is found. If the user function returns | 659 | * Device is found. If the user function returns |
660 | * a non-zero value, the search is terminated immediately and this | 660 | * a non-zero value, the search is terminated immediately and this |
661 | * value is returned to the caller. | 661 | * value is returned to the caller. |
662 | * | 662 | * |
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 08e9610b34ca..811c6f13f476 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c | |||
@@ -53,8 +53,8 @@ | |||
53 | ACPI_MODULE_NAME("nsxfname") | 53 | ACPI_MODULE_NAME("nsxfname") |
54 | 54 | ||
55 | /* Local prototypes */ | 55 | /* Local prototypes */ |
56 | static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | 56 | static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, |
57 | struct acpica_device_id *source, | 57 | struct acpi_pnp_device_id *source, |
58 | char *string_area); | 58 | char *string_area); |
59 | 59 | ||
60 | /****************************************************************************** | 60 | /****************************************************************************** |
@@ -69,8 +69,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | |||
69 | * RETURN: Status | 69 | * RETURN: Status |
70 | * | 70 | * |
71 | * DESCRIPTION: This routine will search for a caller specified name in the | 71 | * DESCRIPTION: This routine will search for a caller specified name in the |
72 | * name space. The caller can restrict the search region by | 72 | * name space. The caller can restrict the search region by |
73 | * specifying a non NULL parent. The parent value is itself a | 73 | * specifying a non NULL parent. The parent value is itself a |
74 | * namespace handle. | 74 | * namespace handle. |
75 | * | 75 | * |
76 | ******************************************************************************/ | 76 | ******************************************************************************/ |
@@ -149,7 +149,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle) | |||
149 | * RETURN: Pointer to a string containing the fully qualified Name. | 149 | * RETURN: Pointer to a string containing the fully qualified Name. |
150 | * | 150 | * |
151 | * DESCRIPTION: This routine returns the fully qualified name associated with | 151 | * DESCRIPTION: This routine returns the fully qualified name associated with |
152 | * the Handle parameter. This and the acpi_pathname_to_handle are | 152 | * the Handle parameter. This and the acpi_pathname_to_handle are |
153 | * complementary functions. | 153 | * complementary functions. |
154 | * | 154 | * |
155 | ******************************************************************************/ | 155 | ******************************************************************************/ |
@@ -202,8 +202,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) | |||
202 | 202 | ||
203 | /* Just copy the ACPI name from the Node and zero terminate it */ | 203 | /* Just copy the ACPI name from the Node and zero terminate it */ |
204 | 204 | ||
205 | ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node), | 205 | ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node)); |
206 | ACPI_NAME_SIZE); | ||
207 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; | 206 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; |
208 | status = AE_OK; | 207 | status = AE_OK; |
209 | 208 | ||
@@ -219,20 +218,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_name) | |||
219 | * | 218 | * |
220 | * FUNCTION: acpi_ns_copy_device_id | 219 | * FUNCTION: acpi_ns_copy_device_id |
221 | * | 220 | * |
222 | * PARAMETERS: dest - Pointer to the destination DEVICE_ID | 221 | * PARAMETERS: dest - Pointer to the destination PNP_DEVICE_ID |
223 | * source - Pointer to the source DEVICE_ID | 222 | * source - Pointer to the source PNP_DEVICE_ID |
224 | * string_area - Pointer to where to copy the dest string | 223 | * string_area - Pointer to where to copy the dest string |
225 | * | 224 | * |
226 | * RETURN: Pointer to the next string area | 225 | * RETURN: Pointer to the next string area |
227 | * | 226 | * |
228 | * DESCRIPTION: Copy a single DEVICE_ID, including the string data. | 227 | * DESCRIPTION: Copy a single PNP_DEVICE_ID, including the string data. |
229 | * | 228 | * |
230 | ******************************************************************************/ | 229 | ******************************************************************************/ |
231 | static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | 230 | static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, |
232 | struct acpica_device_id *source, | 231 | struct acpi_pnp_device_id *source, |
233 | char *string_area) | 232 | char *string_area) |
234 | { | 233 | { |
235 | /* Create the destination DEVICE_ID */ | 234 | |
235 | /* Create the destination PNP_DEVICE_ID */ | ||
236 | 236 | ||
237 | dest->string = string_area; | 237 | dest->string = string_area; |
238 | dest->length = source->length; | 238 | dest->length = source->length; |
@@ -256,8 +256,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | |||
256 | * namespace node and possibly by running several standard | 256 | * namespace node and possibly by running several standard |
257 | * control methods (Such as in the case of a device.) | 257 | * control methods (Such as in the case of a device.) |
258 | * | 258 | * |
259 | * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA, | 259 | * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB, |
260 | * _ADR, _sx_w, and _sx_d methods. | 260 | * _STA, _ADR, _sx_w, and _sx_d methods. |
261 | * | 261 | * |
262 | * Note: Allocates the return buffer, must be freed by the caller. | 262 | * Note: Allocates the return buffer, must be freed by the caller. |
263 | * | 263 | * |
@@ -269,9 +269,10 @@ acpi_get_object_info(acpi_handle handle, | |||
269 | { | 269 | { |
270 | struct acpi_namespace_node *node; | 270 | struct acpi_namespace_node *node; |
271 | struct acpi_device_info *info; | 271 | struct acpi_device_info *info; |
272 | struct acpica_device_id_list *cid_list = NULL; | 272 | struct acpi_pnp_device_id_list *cid_list = NULL; |
273 | struct acpica_device_id *hid = NULL; | 273 | struct acpi_pnp_device_id *hid = NULL; |
274 | struct acpica_device_id *uid = NULL; | 274 | struct acpi_pnp_device_id *uid = NULL; |
275 | struct acpi_pnp_device_id *sub = NULL; | ||
275 | char *next_id_string; | 276 | char *next_id_string; |
276 | acpi_object_type type; | 277 | acpi_object_type type; |
277 | acpi_name name; | 278 | acpi_name name; |
@@ -316,7 +317,7 @@ acpi_get_object_info(acpi_handle handle, | |||
316 | if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { | 317 | if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { |
317 | /* | 318 | /* |
318 | * Get extra info for ACPI Device/Processor objects only: | 319 | * Get extra info for ACPI Device/Processor objects only: |
319 | * Run the Device _HID, _UID, and _CID methods. | 320 | * Run the Device _HID, _UID, _SUB, and _CID methods. |
320 | * | 321 | * |
321 | * Note: none of these methods are required, so they may or may | 322 | * Note: none of these methods are required, so they may or may |
322 | * not be present for this device. The Info->Valid bitfield is used | 323 | * not be present for this device. The Info->Valid bitfield is used |
@@ -339,6 +340,14 @@ acpi_get_object_info(acpi_handle handle, | |||
339 | valid |= ACPI_VALID_UID; | 340 | valid |= ACPI_VALID_UID; |
340 | } | 341 | } |
341 | 342 | ||
343 | /* Execute the Device._SUB method */ | ||
344 | |||
345 | status = acpi_ut_execute_SUB(node, &sub); | ||
346 | if (ACPI_SUCCESS(status)) { | ||
347 | info_size += sub->length; | ||
348 | valid |= ACPI_VALID_SUB; | ||
349 | } | ||
350 | |||
342 | /* Execute the Device._CID method */ | 351 | /* Execute the Device._CID method */ |
343 | 352 | ||
344 | status = acpi_ut_execute_CID(node, &cid_list); | 353 | status = acpi_ut_execute_CID(node, &cid_list); |
@@ -348,7 +357,7 @@ acpi_get_object_info(acpi_handle handle, | |||
348 | 357 | ||
349 | info_size += | 358 | info_size += |
350 | (cid_list->list_size - | 359 | (cid_list->list_size - |
351 | sizeof(struct acpica_device_id_list)); | 360 | sizeof(struct acpi_pnp_device_id_list)); |
352 | valid |= ACPI_VALID_CID; | 361 | valid |= ACPI_VALID_CID; |
353 | } | 362 | } |
354 | } | 363 | } |
@@ -418,16 +427,17 @@ acpi_get_object_info(acpi_handle handle, | |||
418 | next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); | 427 | next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); |
419 | if (cid_list) { | 428 | if (cid_list) { |
420 | 429 | ||
421 | /* Point past the CID DEVICE_ID array */ | 430 | /* Point past the CID PNP_DEVICE_ID array */ |
422 | 431 | ||
423 | next_id_string += | 432 | next_id_string += |
424 | ((acpi_size) cid_list->count * | 433 | ((acpi_size) cid_list->count * |
425 | sizeof(struct acpica_device_id)); | 434 | sizeof(struct acpi_pnp_device_id)); |
426 | } | 435 | } |
427 | 436 | ||
428 | /* | 437 | /* |
429 | * Copy the HID, UID, and CIDs to the return buffer. The variable-length | 438 | * Copy the HID, UID, SUB, and CIDs to the return buffer. |
430 | * strings are copied to the reserved area at the end of the buffer. | 439 | * The variable-length strings are copied to the reserved area |
440 | * at the end of the buffer. | ||
431 | * | 441 | * |
432 | * For HID and CID, check if the ID is a PCI Root Bridge. | 442 | * For HID and CID, check if the ID is a PCI Root Bridge. |
433 | */ | 443 | */ |
@@ -445,6 +455,11 @@ acpi_get_object_info(acpi_handle handle, | |||
445 | uid, next_id_string); | 455 | uid, next_id_string); |
446 | } | 456 | } |
447 | 457 | ||
458 | if (sub) { | ||
459 | next_id_string = acpi_ns_copy_device_id(&info->subsystem_id, | ||
460 | sub, next_id_string); | ||
461 | } | ||
462 | |||
448 | if (cid_list) { | 463 | if (cid_list) { |
449 | info->compatible_id_list.count = cid_list->count; | 464 | info->compatible_id_list.count = cid_list->count; |
450 | info->compatible_id_list.list_size = cid_list->list_size; | 465 | info->compatible_id_list.list_size = cid_list->list_size; |
@@ -481,6 +496,9 @@ acpi_get_object_info(acpi_handle handle, | |||
481 | if (uid) { | 496 | if (uid) { |
482 | ACPI_FREE(uid); | 497 | ACPI_FREE(uid); |
483 | } | 498 | } |
499 | if (sub) { | ||
500 | ACPI_FREE(sub); | ||
501 | } | ||
484 | if (cid_list) { | 502 | if (cid_list) { |
485 | ACPI_FREE(cid_list); | 503 | ACPI_FREE(cid_list); |
486 | } | 504 | } |
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 6766fc4f088f..9d029dac6b64 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c | |||
@@ -220,8 +220,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent) | |||
220 | * | 220 | * |
221 | * RETURN: Status | 221 | * RETURN: Status |
222 | * | 222 | * |
223 | * DESCRIPTION: Return the next peer object within the namespace. If Handle is | 223 | * DESCRIPTION: Return the next peer object within the namespace. If Handle is |
224 | * valid, Scope is ignored. Otherwise, the first object within | 224 | * valid, Scope is ignored. Otherwise, the first object within |
225 | * Scope is returned. | 225 | * Scope is returned. |
226 | * | 226 | * |
227 | ******************************************************************************/ | 227 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 844464c4f901..cb79e2d4d743 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c | |||
@@ -120,7 +120,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) | |||
120 | * RETURN: Pointer to end-of-package +1 | 120 | * RETURN: Pointer to end-of-package +1 |
121 | * | 121 | * |
122 | * DESCRIPTION: Get next package length and return a pointer past the end of | 122 | * DESCRIPTION: Get next package length and return a pointer past the end of |
123 | * the package. Consumes the package length field | 123 | * the package. Consumes the package length field |
124 | * | 124 | * |
125 | ******************************************************************************/ | 125 | ******************************************************************************/ |
126 | 126 | ||
@@ -147,8 +147,8 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state) | |||
147 | * RETURN: Pointer to the start of the name string (pointer points into | 147 | * RETURN: Pointer to the start of the name string (pointer points into |
148 | * the AML. | 148 | * the AML. |
149 | * | 149 | * |
150 | * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name | 150 | * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name |
151 | * prefix characters. Set parser state to point past the string. | 151 | * prefix characters. Set parser state to point past the string. |
152 | * (Name is consumed from the AML.) | 152 | * (Name is consumed from the AML.) |
153 | * | 153 | * |
154 | ******************************************************************************/ | 154 | ******************************************************************************/ |
@@ -220,7 +220,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) | |||
220 | * | 220 | * |
221 | * DESCRIPTION: Get next name (if method call, return # of required args). | 221 | * DESCRIPTION: Get next name (if method call, return # of required args). |
222 | * Names are looked up in the internal namespace to determine | 222 | * Names are looked up in the internal namespace to determine |
223 | * if the name represents a control method. If a method | 223 | * if the name represents a control method. If a method |
224 | * is found, the number of arguments to the method is returned. | 224 | * is found, the number of arguments to the method is returned. |
225 | * This information is critical for parsing to continue correctly. | 225 | * This information is critical for parsing to continue correctly. |
226 | * | 226 | * |
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 799162c1b6df..5607805aab26 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c | |||
@@ -133,18 +133,46 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) | |||
133 | 133 | ||
134 | case AML_CLASS_UNKNOWN: | 134 | case AML_CLASS_UNKNOWN: |
135 | 135 | ||
136 | /* The opcode is unrecognized. Just skip unknown opcodes */ | 136 | /* The opcode is unrecognized. Complain and skip unknown opcodes */ |
137 | 137 | ||
138 | ACPI_ERROR((AE_INFO, | 138 | if (walk_state->pass_number == 2) { |
139 | "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring", | 139 | ACPI_ERROR((AE_INFO, |
140 | walk_state->opcode, walk_state->parser_state.aml, | 140 | "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring", |
141 | walk_state->aml_offset)); | 141 | walk_state->opcode, |
142 | (u32)(walk_state->aml_offset + | ||
143 | sizeof(struct acpi_table_header)))); | ||
142 | 144 | ||
143 | ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128); | 145 | ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48); |
144 | 146 | ||
145 | /* Assume one-byte bad opcode */ | 147 | #ifdef ACPI_ASL_COMPILER |
148 | /* | ||
149 | * This is executed for the disassembler only. Output goes | ||
150 | * to the disassembled ASL output file. | ||
151 | */ | ||
152 | acpi_os_printf | ||
153 | ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n", | ||
154 | walk_state->opcode, | ||
155 | (u32)(walk_state->aml_offset + | ||
156 | sizeof(struct acpi_table_header))); | ||
157 | |||
158 | /* Dump the context surrounding the invalid opcode */ | ||
159 | |||
160 | acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. | ||
161 | aml - 16), 48, DB_BYTE_DISPLAY, | ||
162 | walk_state->aml_offset + | ||
163 | sizeof(struct acpi_table_header) - | ||
164 | 16); | ||
165 | acpi_os_printf(" */\n"); | ||
166 | #endif | ||
167 | } | ||
168 | |||
169 | /* Increment past one-byte or two-byte opcode */ | ||
146 | 170 | ||
147 | walk_state->parser_state.aml++; | 171 | walk_state->parser_state.aml++; |
172 | if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */ | ||
173 | walk_state->parser_state.aml++; | ||
174 | } | ||
175 | |||
148 | return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); | 176 | return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); |
149 | 177 | ||
150 | default: | 178 | default: |
@@ -519,11 +547,18 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, | |||
519 | if ((op_info->class == | 547 | if ((op_info->class == |
520 | AML_CLASS_EXECUTE) && (!arg)) { | 548 | AML_CLASS_EXECUTE) && (!arg)) { |
521 | ACPI_WARNING((AE_INFO, | 549 | ACPI_WARNING((AE_INFO, |
522 | "Detected an unsupported executable opcode " | 550 | "Unsupported module-level executable opcode " |
523 | "at module-level: [0x%.4X] at table offset 0x%.4X", | 551 | "0x%.2X at table offset 0x%.4X", |
524 | op->common.aml_opcode, | 552 | op->common. |
525 | (u32)((aml_op_start - walk_state->parser_state.aml_start) | 553 | aml_opcode, |
526 | + sizeof(struct acpi_table_header)))); | 554 | (u32) |
555 | (ACPI_PTR_DIFF | ||
556 | (aml_op_start, | ||
557 | walk_state-> | ||
558 | parser_state. | ||
559 | aml_start) + | ||
560 | sizeof(struct | ||
561 | acpi_table_header)))); | ||
527 | } | 562 | } |
528 | } | 563 | } |
529 | break; | 564 | break; |
@@ -843,8 +878,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, | |||
843 | *op = NULL; | 878 | *op = NULL; |
844 | } | 879 | } |
845 | 880 | ||
846 | ACPI_PREEMPTION_POINT(); | ||
847 | |||
848 | return_ACPI_STATUS(AE_OK); | 881 | return_ACPI_STATUS(AE_OK); |
849 | } | 882 | } |
850 | 883 | ||
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index ed1d457bd5ca..1793d934aa30 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c | |||
@@ -59,7 +59,7 @@ static const u8 acpi_gbl_argument_count[] = | |||
59 | * | 59 | * |
60 | * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands> | 60 | * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands> |
61 | * The name is a simple ascii string, the operand specifier is an | 61 | * The name is a simple ascii string, the operand specifier is an |
62 | * ascii string with one letter per operand. The letter specifies | 62 | * ascii string with one letter per operand. The letter specifies |
63 | * the operand type. | 63 | * the operand type. |
64 | * | 64 | * |
65 | ******************************************************************************/ | 65 | ******************************************************************************/ |
@@ -183,7 +183,7 @@ static const u8 acpi_gbl_argument_count[] = | |||
183 | ******************************************************************************/ | 183 | ******************************************************************************/ |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Master Opcode information table. A summary of everything we know about each | 186 | * Master Opcode information table. A summary of everything we know about each |
187 | * opcode, all in one place. | 187 | * opcode, all in one place. |
188 | */ | 188 | */ |
189 | const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | 189 | const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { |
@@ -392,10 +392,12 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
392 | AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), | 392 | AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), |
393 | /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, | 393 | /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, |
394 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, | 394 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, |
395 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), | 395 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | |
396 | AML_CONSTANT), | ||
396 | /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, | 397 | /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, |
397 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, | 398 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, |
398 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), | 399 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | |
400 | AML_CONSTANT), | ||
399 | /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, | 401 | /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, |
400 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, | 402 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, |
401 | AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), | 403 | AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), |
@@ -495,7 +497,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
495 | AML_NSNODE | AML_NAMED | AML_DEFER), | 497 | AML_NSNODE | AML_NAMED | AML_DEFER), |
496 | /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, | 498 | /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, |
497 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, | 499 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, |
498 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), | 500 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
501 | AML_FIELD), | ||
499 | /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, | 502 | /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, |
500 | ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, | 503 | ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, |
501 | AML_TYPE_NAMED_NO_OBJ, | 504 | AML_TYPE_NAMED_NO_OBJ, |
@@ -519,12 +522,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
519 | /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, | 522 | /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, |
520 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, | 523 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, |
521 | AML_TYPE_NAMED_FIELD, | 524 | AML_TYPE_NAMED_FIELD, |
522 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), | 525 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
526 | AML_FIELD), | ||
523 | /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, | 527 | /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, |
524 | ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT, | 528 | ACPI_TYPE_LOCAL_BANK_FIELD, |
525 | AML_TYPE_NAMED_FIELD, | 529 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, |
526 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD | | 530 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
527 | AML_DEFER), | 531 | AML_FIELD | AML_DEFER), |
528 | 532 | ||
529 | /* Internal opcodes that map to invalid AML opcodes */ | 533 | /* Internal opcodes that map to invalid AML opcodes */ |
530 | 534 | ||
@@ -632,7 +636,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
632 | /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, | 636 | /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, |
633 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, | 637 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, |
634 | AML_TYPE_NAMED_NO_OBJ, | 638 | AML_TYPE_NAMED_NO_OBJ, |
635 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE), | 639 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
640 | AML_NSNODE), | ||
636 | 641 | ||
637 | /* ACPI 3.0 opcodes */ | 642 | /* ACPI 3.0 opcodes */ |
638 | 643 | ||
@@ -695,7 +700,7 @@ static const u8 acpi_gbl_short_op_index[256] = { | |||
695 | 700 | ||
696 | /* | 701 | /* |
697 | * This table is indexed by the second opcode of the extended opcode | 702 | * This table is indexed by the second opcode of the extended opcode |
698 | * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) | 703 | * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) |
699 | */ | 704 | */ |
700 | static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { | 705 | static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { |
701 | /* 0 1 2 3 4 5 6 7 */ | 706 | /* 0 1 2 3 4 5 6 7 */ |
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 01985703bb98..2494caf47755 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c | |||
@@ -43,9 +43,9 @@ | |||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Parse the AML and build an operation tree as most interpreters, | 45 | * Parse the AML and build an operation tree as most interpreters, |
46 | * like Perl, do. Parsing is done by hand rather than with a YACC | 46 | * like Perl, do. Parsing is done by hand rather than with a YACC |
47 | * generated parser to tightly constrain stack and dynamic memory | 47 | * generated parser to tightly constrain stack and dynamic memory |
48 | * usage. At the same time, parsing is kept flexible and the code | 48 | * usage. At the same time, parsing is kept flexible and the code |
49 | * fairly compact by parsing based on a list of AML opcode | 49 | * fairly compact by parsing based on a list of AML opcode |
50 | * templates in aml_op_info[] | 50 | * templates in aml_op_info[] |
51 | */ | 51 | */ |
@@ -379,7 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state, | |||
379 | case AE_CTRL_FALSE: | 379 | case AE_CTRL_FALSE: |
380 | /* | 380 | /* |
381 | * Either an IF/WHILE Predicate was false or we encountered a BREAK | 381 | * Either an IF/WHILE Predicate was false or we encountered a BREAK |
382 | * opcode. In both cases, we do not execute the rest of the | 382 | * opcode. In both cases, we do not execute the rest of the |
383 | * package; We simply close out the parent (finishing the walk of | 383 | * package; We simply close out the parent (finishing the walk of |
384 | * this branch of the tree) and continue execution at the parent | 384 | * this branch of the tree) and continue execution at the parent |
385 | * level. | 385 | * level. |
@@ -459,8 +459,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
459 | 459 | ||
460 | /* Executing a control method - additional cleanup */ | 460 | /* Executing a control method - additional cleanup */ |
461 | 461 | ||
462 | acpi_ds_terminate_control_method( | 462 | acpi_ds_terminate_control_method(walk_state-> |
463 | walk_state->method_desc, walk_state); | 463 | method_desc, |
464 | walk_state); | ||
464 | } | 465 | } |
465 | 466 | ||
466 | acpi_ds_delete_walk_state(walk_state); | 467 | acpi_ds_delete_walk_state(walk_state); |
@@ -487,7 +488,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
487 | acpi_gbl_current_walk_list = thread; | 488 | acpi_gbl_current_walk_list = thread; |
488 | 489 | ||
489 | /* | 490 | /* |
490 | * Execute the walk loop as long as there is a valid Walk State. This | 491 | * Execute the walk loop as long as there is a valid Walk State. This |
491 | * handles nested control method invocations without recursion. | 492 | * handles nested control method invocations without recursion. |
492 | */ | 493 | */ |
493 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state)); | 494 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state)); |
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 8736ad5f04d3..4137dcb352d1 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c | |||
@@ -108,7 +108,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode) | |||
108 | * RETURN: Pointer to the new Op, null on failure | 108 | * RETURN: Pointer to the new Op, null on failure |
109 | * | 109 | * |
110 | * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on | 110 | * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on |
111 | * opcode. A cache of opcodes is available for the pure | 111 | * opcode. A cache of opcodes is available for the pure |
112 | * GENERIC_OP, since this is by far the most commonly used. | 112 | * GENERIC_OP, since this is by far the most commonly used. |
113 | * | 113 | * |
114 | ******************************************************************************/ | 114 | ******************************************************************************/ |
@@ -164,7 +164,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) | |||
164 | * | 164 | * |
165 | * RETURN: None. | 165 | * RETURN: None. |
166 | * | 166 | * |
167 | * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list | 167 | * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list |
168 | * or actually free it. | 168 | * or actually free it. |
169 | * | 169 | * |
170 | ******************************************************************************/ | 170 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index de12469d1c9c..147feb6aa2a0 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c | |||
@@ -457,6 +457,15 @@ acpi_rs_get_list_length(u8 * aml_buffer, | |||
457 | * Get the number of vendor data bytes | 457 | * Get the number of vendor data bytes |
458 | */ | 458 | */ |
459 | extra_struct_bytes = resource_length; | 459 | extra_struct_bytes = resource_length; |
460 | |||
461 | /* | ||
462 | * There is already one byte included in the minimum | ||
463 | * descriptor size. If there are extra struct bytes, | ||
464 | * subtract one from the count. | ||
465 | */ | ||
466 | if (extra_struct_bytes) { | ||
467 | extra_struct_bytes--; | ||
468 | } | ||
460 | break; | 469 | break; |
461 | 470 | ||
462 | case ACPI_RESOURCE_NAME_END_TAG: | 471 | case ACPI_RESOURCE_NAME_END_TAG: |
@@ -601,7 +610,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, | |||
601 | /* | 610 | /* |
602 | * Calculate the size of the return buffer. | 611 | * Calculate the size of the return buffer. |
603 | * The base size is the number of elements * the sizes of the | 612 | * The base size is the number of elements * the sizes of the |
604 | * structures. Additional space for the strings is added below. | 613 | * structures. Additional space for the strings is added below. |
605 | * The minus one is to subtract the size of the u8 Source[1] | 614 | * The minus one is to subtract the size of the u8 Source[1] |
606 | * member because it is added below. | 615 | * member because it is added below. |
607 | * | 616 | * |
@@ -664,8 +673,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, | |||
664 | (*sub_object_list)->string. | 673 | (*sub_object_list)->string. |
665 | length + 1); | 674 | length + 1); |
666 | } else { | 675 | } else { |
667 | temp_size_needed += | 676 | temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); |
668 | acpi_ns_get_pathname_length((*sub_object_list)->reference.node); | ||
669 | } | 677 | } |
670 | } else { | 678 | } else { |
671 | /* | 679 | /* |
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 46b5324b22d6..8b64db9a3fd2 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c | |||
@@ -109,7 +109,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, | |||
109 | ACPI_ERROR((AE_INFO, | 109 | ACPI_ERROR((AE_INFO, |
110 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", | 110 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", |
111 | resource_index)); | 111 | resource_index)); |
112 | return (AE_AML_INVALID_RESOURCE_TYPE); | 112 | return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Convert the AML byte stream resource to a local resource struct */ | 115 | /* Convert the AML byte stream resource to a local resource struct */ |
@@ -200,7 +200,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, | |||
200 | ACPI_ERROR((AE_INFO, | 200 | ACPI_ERROR((AE_INFO, |
201 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", | 201 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", |
202 | resource->type)); | 202 | resource->type)); |
203 | return (AE_AML_INVALID_RESOURCE_TYPE); | 203 | return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); |
204 | } | 204 | } |
205 | 205 | ||
206 | status = acpi_rs_convert_resource_to_aml(resource, | 206 | status = acpi_rs_convert_resource_to_aml(resource, |
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index 57deae166577..77d1db29a725 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c | |||
@@ -77,7 +77,7 @@ acpi_tb_find_table(char *signature, | |||
77 | /* Normalize the input strings */ | 77 | /* Normalize the input strings */ |
78 | 78 | ||
79 | ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); | 79 | ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); |
80 | ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE); | 80 | ACPI_MOVE_NAME(header.signature, signature); |
81 | ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); | 81 | ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); |
82 | ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); | 82 | ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); |
83 | 83 | ||
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 70f9d787c82c..f540ae462925 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -526,6 +526,8 @@ void acpi_tb_terminate(void) | |||
526 | 526 | ||
527 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); | 527 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); |
528 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 528 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
529 | |||
530 | return_VOID; | ||
529 | } | 531 | } |
530 | 532 | ||
531 | /******************************************************************************* | 533 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index b6cea30da638..285e24b97382 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -354,7 +354,7 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length) | |||
354 | sum = (u8) (sum + *(buffer++)); | 354 | sum = (u8) (sum + *(buffer++)); |
355 | } | 355 | } |
356 | 356 | ||
357 | return sum; | 357 | return (sum); |
358 | } | 358 | } |
359 | 359 | ||
360 | /******************************************************************************* | 360 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 21101262e47a..f5632780421d 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -236,7 +236,7 @@ acpi_get_table_header(char *signature, | |||
236 | sizeof(struct | 236 | sizeof(struct |
237 | acpi_table_header)); | 237 | acpi_table_header)); |
238 | if (!header) { | 238 | if (!header) { |
239 | return AE_NO_MEMORY; | 239 | return (AE_NO_MEMORY); |
240 | } | 240 | } |
241 | ACPI_MEMCPY(out_table_header, header, | 241 | ACPI_MEMCPY(out_table_header, header, |
242 | sizeof(struct acpi_table_header)); | 242 | sizeof(struct acpi_table_header)); |
@@ -244,7 +244,7 @@ acpi_get_table_header(char *signature, | |||
244 | sizeof(struct | 244 | sizeof(struct |
245 | acpi_table_header)); | 245 | acpi_table_header)); |
246 | } else { | 246 | } else { |
247 | return AE_NOT_FOUND; | 247 | return (AE_NOT_FOUND); |
248 | } | 248 | } |
249 | } else { | 249 | } else { |
250 | ACPI_MEMCPY(out_table_header, | 250 | ACPI_MEMCPY(out_table_header, |
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index f87cc63e69a1..a5e1e4e47098 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c | |||
@@ -211,7 +211,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
211 | * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must | 211 | * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must |
212 | * be a valid ACPI table with a valid ACPI table header. | 212 | * be a valid ACPI table with a valid ACPI table header. |
213 | * Note1: Mainly intended to support hotplug addition of SSDTs. | 213 | * Note1: Mainly intended to support hotplug addition of SSDTs. |
214 | * Note2: Does not copy the incoming table. User is reponsible | 214 | * Note2: Does not copy the incoming table. User is responsible |
215 | * to ensure that the table is not deleted or unmapped. | 215 | * to ensure that the table is not deleted or unmapped. |
216 | * | 216 | * |
217 | ******************************************************************************/ | 217 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 74e720800037..28f330230f99 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c | |||
@@ -67,7 +67,6 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); | |||
67 | 67 | ||
68 | static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | 68 | static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) |
69 | { | 69 | { |
70 | ACPI_FUNCTION_ENTRY(); | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * The signature and checksum must both be correct | 72 | * The signature and checksum must both be correct |
@@ -108,7 +107,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | |||
108 | * RETURN: Status, RSDP physical address | 107 | * RETURN: Status, RSDP physical address |
109 | * | 108 | * |
110 | * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor | 109 | * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor |
111 | * pointer structure. If it is found, set *RSDP to point to it. | 110 | * pointer structure. If it is found, set *RSDP to point to it. |
112 | * | 111 | * |
113 | * NOTE1: The RSDP must be either in the first 1K of the Extended | 112 | * NOTE1: The RSDP must be either in the first 1K of the Extended |
114 | * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) | 113 | * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) |
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c new file mode 100644 index 000000000000..e1d40ed26390 --- /dev/null +++ b/drivers/acpi/acpica/utcache.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: utcache - local cache allocation routines | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | |||
47 | #define _COMPONENT ACPI_UTILITIES | ||
48 | ACPI_MODULE_NAME("utcache") | ||
49 | |||
50 | #ifdef ACPI_USE_LOCAL_CACHE | ||
51 | /******************************************************************************* | ||
52 | * | ||
53 | * FUNCTION: acpi_os_create_cache | ||
54 | * | ||
55 | * PARAMETERS: cache_name - Ascii name for the cache | ||
56 | * object_size - Size of each cached object | ||
57 | * max_depth - Maximum depth of the cache (in objects) | ||
58 | * return_cache - Where the new cache object is returned | ||
59 | * | ||
60 | * RETURN: Status | ||
61 | * | ||
62 | * DESCRIPTION: Create a cache object | ||
63 | * | ||
64 | ******************************************************************************/ | ||
65 | acpi_status | ||
66 | acpi_os_create_cache(char *cache_name, | ||
67 | u16 object_size, | ||
68 | u16 max_depth, struct acpi_memory_list ** return_cache) | ||
69 | { | ||
70 | struct acpi_memory_list *cache; | ||
71 | |||
72 | ACPI_FUNCTION_ENTRY(); | ||
73 | |||
74 | if (!cache_name || !return_cache || (object_size < 16)) { | ||
75 | return (AE_BAD_PARAMETER); | ||
76 | } | ||
77 | |||
78 | /* Create the cache object */ | ||
79 | |||
80 | cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); | ||
81 | if (!cache) { | ||
82 | return (AE_NO_MEMORY); | ||
83 | } | ||
84 | |||
85 | /* Populate the cache object and return it */ | ||
86 | |||
87 | ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); | ||
88 | cache->link_offset = 8; | ||
89 | cache->list_name = cache_name; | ||
90 | cache->object_size = object_size; | ||
91 | cache->max_depth = max_depth; | ||
92 | |||
93 | *return_cache = cache; | ||
94 | return (AE_OK); | ||
95 | } | ||
96 | |||
97 | /******************************************************************************* | ||
98 | * | ||
99 | * FUNCTION: acpi_os_purge_cache | ||
100 | * | ||
101 | * PARAMETERS: cache - Handle to cache object | ||
102 | * | ||
103 | * RETURN: Status | ||
104 | * | ||
105 | * DESCRIPTION: Free all objects within the requested cache. | ||
106 | * | ||
107 | ******************************************************************************/ | ||
108 | |||
109 | acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) | ||
110 | { | ||
111 | char *next; | ||
112 | acpi_status status; | ||
113 | |||
114 | ACPI_FUNCTION_ENTRY(); | ||
115 | |||
116 | if (!cache) { | ||
117 | return (AE_BAD_PARAMETER); | ||
118 | } | ||
119 | |||
120 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
121 | if (ACPI_FAILURE(status)) { | ||
122 | return (status); | ||
123 | } | ||
124 | |||
125 | /* Walk the list of objects in this cache */ | ||
126 | |||
127 | while (cache->list_head) { | ||
128 | |||
129 | /* Delete and unlink one cached state object */ | ||
130 | |||
131 | next = *(ACPI_CAST_INDIRECT_PTR(char, | ||
132 | &(((char *)cache-> | ||
133 | list_head)[cache-> | ||
134 | link_offset]))); | ||
135 | ACPI_FREE(cache->list_head); | ||
136 | |||
137 | cache->list_head = next; | ||
138 | cache->current_depth--; | ||
139 | } | ||
140 | |||
141 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
142 | return (AE_OK); | ||
143 | } | ||
144 | |||
145 | /******************************************************************************* | ||
146 | * | ||
147 | * FUNCTION: acpi_os_delete_cache | ||
148 | * | ||
149 | * PARAMETERS: cache - Handle to cache object | ||
150 | * | ||
151 | * RETURN: Status | ||
152 | * | ||
153 | * DESCRIPTION: Free all objects within the requested cache and delete the | ||
154 | * cache object. | ||
155 | * | ||
156 | ******************************************************************************/ | ||
157 | |||
158 | acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache) | ||
159 | { | ||
160 | acpi_status status; | ||
161 | |||
162 | ACPI_FUNCTION_ENTRY(); | ||
163 | |||
164 | /* Purge all objects in the cache */ | ||
165 | |||
166 | status = acpi_os_purge_cache(cache); | ||
167 | if (ACPI_FAILURE(status)) { | ||
168 | return (status); | ||
169 | } | ||
170 | |||
171 | /* Now we can delete the cache object */ | ||
172 | |||
173 | acpi_os_free(cache); | ||
174 | return (AE_OK); | ||
175 | } | ||
176 | |||
177 | /******************************************************************************* | ||
178 | * | ||
179 | * FUNCTION: acpi_os_release_object | ||
180 | * | ||
181 | * PARAMETERS: cache - Handle to cache object | ||
182 | * object - The object to be released | ||
183 | * | ||
184 | * RETURN: None | ||
185 | * | ||
186 | * DESCRIPTION: Release an object to the specified cache. If cache is full, | ||
187 | * the object is deleted. | ||
188 | * | ||
189 | ******************************************************************************/ | ||
190 | |||
191 | acpi_status | ||
192 | acpi_os_release_object(struct acpi_memory_list * cache, void *object) | ||
193 | { | ||
194 | acpi_status status; | ||
195 | |||
196 | ACPI_FUNCTION_ENTRY(); | ||
197 | |||
198 | if (!cache || !object) { | ||
199 | return (AE_BAD_PARAMETER); | ||
200 | } | ||
201 | |||
202 | /* If cache is full, just free this object */ | ||
203 | |||
204 | if (cache->current_depth >= cache->max_depth) { | ||
205 | ACPI_FREE(object); | ||
206 | ACPI_MEM_TRACKING(cache->total_freed++); | ||
207 | } | ||
208 | |||
209 | /* Otherwise put this object back into the cache */ | ||
210 | |||
211 | else { | ||
212 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
213 | if (ACPI_FAILURE(status)) { | ||
214 | return (status); | ||
215 | } | ||
216 | |||
217 | /* Mark the object as cached */ | ||
218 | |||
219 | ACPI_MEMSET(object, 0xCA, cache->object_size); | ||
220 | ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED); | ||
221 | |||
222 | /* Put the object at the head of the cache list */ | ||
223 | |||
224 | *(ACPI_CAST_INDIRECT_PTR(char, | ||
225 | &(((char *)object)[cache-> | ||
226 | link_offset]))) = | ||
227 | cache->list_head; | ||
228 | cache->list_head = object; | ||
229 | cache->current_depth++; | ||
230 | |||
231 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
232 | } | ||
233 | |||
234 | return (AE_OK); | ||
235 | } | ||
236 | |||
237 | /******************************************************************************* | ||
238 | * | ||
239 | * FUNCTION: acpi_os_acquire_object | ||
240 | * | ||
241 | * PARAMETERS: cache - Handle to cache object | ||
242 | * | ||
243 | * RETURN: the acquired object. NULL on error | ||
244 | * | ||
245 | * DESCRIPTION: Get an object from the specified cache. If cache is empty, | ||
246 | * the object is allocated. | ||
247 | * | ||
248 | ******************************************************************************/ | ||
249 | |||
250 | void *acpi_os_acquire_object(struct acpi_memory_list *cache) | ||
251 | { | ||
252 | acpi_status status; | ||
253 | void *object; | ||
254 | |||
255 | ACPI_FUNCTION_NAME(os_acquire_object); | ||
256 | |||
257 | if (!cache) { | ||
258 | return (NULL); | ||
259 | } | ||
260 | |||
261 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
262 | if (ACPI_FAILURE(status)) { | ||
263 | return (NULL); | ||
264 | } | ||
265 | |||
266 | ACPI_MEM_TRACKING(cache->requests++); | ||
267 | |||
268 | /* Check the cache first */ | ||
269 | |||
270 | if (cache->list_head) { | ||
271 | |||
272 | /* There is an object available, use it */ | ||
273 | |||
274 | object = cache->list_head; | ||
275 | cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char, | ||
276 | &(((char *) | ||
277 | object)[cache-> | ||
278 | link_offset]))); | ||
279 | |||
280 | cache->current_depth--; | ||
281 | |||
282 | ACPI_MEM_TRACKING(cache->hits++); | ||
283 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
284 | "Object %p from %s cache\n", object, | ||
285 | cache->list_name)); | ||
286 | |||
287 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
288 | if (ACPI_FAILURE(status)) { | ||
289 | return (NULL); | ||
290 | } | ||
291 | |||
292 | /* Clear (zero) the previously used Object */ | ||
293 | |||
294 | ACPI_MEMSET(object, 0, cache->object_size); | ||
295 | } else { | ||
296 | /* The cache is empty, create a new object */ | ||
297 | |||
298 | ACPI_MEM_TRACKING(cache->total_allocated++); | ||
299 | |||
300 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
301 | if ((cache->total_allocated - cache->total_freed) > | ||
302 | cache->max_occupied) { | ||
303 | cache->max_occupied = | ||
304 | cache->total_allocated - cache->total_freed; | ||
305 | } | ||
306 | #endif | ||
307 | |||
308 | /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ | ||
309 | |||
310 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
311 | if (ACPI_FAILURE(status)) { | ||
312 | return (NULL); | ||
313 | } | ||
314 | |||
315 | object = ACPI_ALLOCATE_ZEROED(cache->object_size); | ||
316 | if (!object) { | ||
317 | return (NULL); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | return (object); | ||
322 | } | ||
323 | #endif /* ACPI_USE_LOCAL_CACHE */ | ||
diff --git a/drivers/acpi/acpica/utclib.c b/drivers/acpi/acpica/utclib.c new file mode 100644 index 000000000000..19ea4755aa73 --- /dev/null +++ b/drivers/acpi/acpica/utclib.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: cmclib - Local implementation of C library functions | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | |||
47 | /* | ||
48 | * These implementations of standard C Library routines can optionally be | ||
49 | * used if a C library is not available. In general, they are less efficient | ||
50 | * than an inline or assembly implementation | ||
51 | */ | ||
52 | |||
53 | #define _COMPONENT ACPI_UTILITIES | ||
54 | ACPI_MODULE_NAME("cmclib") | ||
55 | |||
56 | #ifndef ACPI_USE_SYSTEM_CLIBRARY | ||
57 | #define NEGATIVE 1 | ||
58 | #define POSITIVE 0 | ||
59 | /******************************************************************************* | ||
60 | * | ||
61 | * FUNCTION: acpi_ut_memcmp (memcmp) | ||
62 | * | ||
63 | * PARAMETERS: buffer1 - First Buffer | ||
64 | * buffer2 - Second Buffer | ||
65 | * count - Maximum # of bytes to compare | ||
66 | * | ||
67 | * RETURN: Index where Buffers mismatched, or 0 if Buffers matched | ||
68 | * | ||
69 | * DESCRIPTION: Compare two Buffers, with a maximum length | ||
70 | * | ||
71 | ******************************************************************************/ | ||
72 | int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count) | ||
73 | { | ||
74 | |||
75 | return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*buffer1 - | ||
76 | (unsigned char)*buffer2)); | ||
77 | } | ||
78 | |||
79 | /******************************************************************************* | ||
80 | * | ||
81 | * FUNCTION: acpi_ut_memcpy (memcpy) | ||
82 | * | ||
83 | * PARAMETERS: dest - Target of the copy | ||
84 | * src - Source buffer to copy | ||
85 | * count - Number of bytes to copy | ||
86 | * | ||
87 | * RETURN: Dest | ||
88 | * | ||
89 | * DESCRIPTION: Copy arbitrary bytes of memory | ||
90 | * | ||
91 | ******************************************************************************/ | ||
92 | |||
93 | void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count) | ||
94 | { | ||
95 | char *new = (char *)dest; | ||
96 | char *old = (char *)src; | ||
97 | |||
98 | while (count) { | ||
99 | *new = *old; | ||
100 | new++; | ||
101 | old++; | ||
102 | count--; | ||
103 | } | ||
104 | |||
105 | return (dest); | ||
106 | } | ||
107 | |||
108 | /******************************************************************************* | ||
109 | * | ||
110 | * FUNCTION: acpi_ut_memset (memset) | ||
111 | * | ||
112 | * PARAMETERS: dest - Buffer to set | ||
113 | * value - Value to set each byte of memory | ||
114 | * count - Number of bytes to set | ||
115 | * | ||
116 | * RETURN: Dest | ||
117 | * | ||
118 | * DESCRIPTION: Initialize a buffer to a known value. | ||
119 | * | ||
120 | ******************************************************************************/ | ||
121 | |||
122 | void *acpi_ut_memset(void *dest, u8 value, acpi_size count) | ||
123 | { | ||
124 | char *new = (char *)dest; | ||
125 | |||
126 | while (count) { | ||
127 | *new = (char)value; | ||
128 | new++; | ||
129 | count--; | ||
130 | } | ||
131 | |||
132 | return (dest); | ||
133 | } | ||
134 | |||
135 | /******************************************************************************* | ||
136 | * | ||
137 | * FUNCTION: acpi_ut_strlen (strlen) | ||
138 | * | ||
139 | * PARAMETERS: string - Null terminated string | ||
140 | * | ||
141 | * RETURN: Length | ||
142 | * | ||
143 | * DESCRIPTION: Returns the length of the input string | ||
144 | * | ||
145 | ******************************************************************************/ | ||
146 | |||
147 | acpi_size acpi_ut_strlen(const char *string) | ||
148 | { | ||
149 | u32 length = 0; | ||
150 | |||
151 | /* Count the string until a null is encountered */ | ||
152 | |||
153 | while (*string) { | ||
154 | length++; | ||
155 | string++; | ||
156 | } | ||
157 | |||
158 | return (length); | ||
159 | } | ||
160 | |||
161 | /******************************************************************************* | ||
162 | * | ||
163 | * FUNCTION: acpi_ut_strcpy (strcpy) | ||
164 | * | ||
165 | * PARAMETERS: dst_string - Target of the copy | ||
166 | * src_string - The source string to copy | ||
167 | * | ||
168 | * RETURN: dst_string | ||
169 | * | ||
170 | * DESCRIPTION: Copy a null terminated string | ||
171 | * | ||
172 | ******************************************************************************/ | ||
173 | |||
174 | char *acpi_ut_strcpy(char *dst_string, const char *src_string) | ||
175 | { | ||
176 | char *string = dst_string; | ||
177 | |||
178 | /* Move bytes brute force */ | ||
179 | |||
180 | while (*src_string) { | ||
181 | *string = *src_string; | ||
182 | |||
183 | string++; | ||
184 | src_string++; | ||
185 | } | ||
186 | |||
187 | /* Null terminate */ | ||
188 | |||
189 | *string = 0; | ||
190 | return (dst_string); | ||
191 | } | ||
192 | |||
193 | /******************************************************************************* | ||
194 | * | ||
195 | * FUNCTION: acpi_ut_strncpy (strncpy) | ||
196 | * | ||
197 | * PARAMETERS: dst_string - Target of the copy | ||
198 | * src_string - The source string to copy | ||
199 | * count - Maximum # of bytes to copy | ||
200 | * | ||
201 | * RETURN: dst_string | ||
202 | * | ||
203 | * DESCRIPTION: Copy a null terminated string, with a maximum length | ||
204 | * | ||
205 | ******************************************************************************/ | ||
206 | |||
207 | char *acpi_ut_strncpy(char *dst_string, const char *src_string, acpi_size count) | ||
208 | { | ||
209 | char *string = dst_string; | ||
210 | |||
211 | /* Copy the string */ | ||
212 | |||
213 | for (string = dst_string; | ||
214 | count && (count--, (*string++ = *src_string++));) {; | ||
215 | } | ||
216 | |||
217 | /* Pad with nulls if necessary */ | ||
218 | |||
219 | while (count--) { | ||
220 | *string = 0; | ||
221 | string++; | ||
222 | } | ||
223 | |||
224 | /* Return original pointer */ | ||
225 | |||
226 | return (dst_string); | ||
227 | } | ||
228 | |||
229 | /******************************************************************************* | ||
230 | * | ||
231 | * FUNCTION: acpi_ut_strcmp (strcmp) | ||
232 | * | ||
233 | * PARAMETERS: string1 - First string | ||
234 | * string2 - Second string | ||
235 | * | ||
236 | * RETURN: Index where strings mismatched, or 0 if strings matched | ||
237 | * | ||
238 | * DESCRIPTION: Compare two null terminated strings | ||
239 | * | ||
240 | ******************************************************************************/ | ||
241 | |||
242 | int acpi_ut_strcmp(const char *string1, const char *string2) | ||
243 | { | ||
244 | |||
245 | for (; (*string1 == *string2); string2++) { | ||
246 | if (!*string1++) { | ||
247 | return (0); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | return ((unsigned char)*string1 - (unsigned char)*string2); | ||
252 | } | ||
253 | |||
254 | #ifdef ACPI_FUTURE_IMPLEMENTATION | ||
255 | /* Not used at this time */ | ||
256 | /******************************************************************************* | ||
257 | * | ||
258 | * FUNCTION: acpi_ut_strchr (strchr) | ||
259 | * | ||
260 | * PARAMETERS: string - Search string | ||
261 | * ch - character to search for | ||
262 | * | ||
263 | * RETURN: Ptr to char or NULL if not found | ||
264 | * | ||
265 | * DESCRIPTION: Search a string for a character | ||
266 | * | ||
267 | ******************************************************************************/ | ||
268 | |||
269 | char *acpi_ut_strchr(const char *string, int ch) | ||
270 | { | ||
271 | |||
272 | for (; (*string); string++) { | ||
273 | if ((*string) == (char)ch) { | ||
274 | return ((char *)string); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | return (NULL); | ||
279 | } | ||
280 | #endif | ||
281 | |||
282 | /******************************************************************************* | ||
283 | * | ||
284 | * FUNCTION: acpi_ut_strncmp (strncmp) | ||
285 | * | ||
286 | * PARAMETERS: string1 - First string | ||
287 | * string2 - Second string | ||
288 | * count - Maximum # of bytes to compare | ||
289 | * | ||
290 | * RETURN: Index where strings mismatched, or 0 if strings matched | ||
291 | * | ||
292 | * DESCRIPTION: Compare two null terminated strings, with a maximum length | ||
293 | * | ||
294 | ******************************************************************************/ | ||
295 | |||
296 | int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count) | ||
297 | { | ||
298 | |||
299 | for (; count-- && (*string1 == *string2); string2++) { | ||
300 | if (!*string1++) { | ||
301 | return (0); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*string1 - | ||
306 | (unsigned char)*string2)); | ||
307 | } | ||
308 | |||
309 | /******************************************************************************* | ||
310 | * | ||
311 | * FUNCTION: acpi_ut_strcat (Strcat) | ||
312 | * | ||
313 | * PARAMETERS: dst_string - Target of the copy | ||
314 | * src_string - The source string to copy | ||
315 | * | ||
316 | * RETURN: dst_string | ||
317 | * | ||
318 | * DESCRIPTION: Append a null terminated string to a null terminated string | ||
319 | * | ||
320 | ******************************************************************************/ | ||
321 | |||
322 | char *acpi_ut_strcat(char *dst_string, const char *src_string) | ||
323 | { | ||
324 | char *string; | ||
325 | |||
326 | /* Find end of the destination string */ | ||
327 | |||
328 | for (string = dst_string; *string++;) {; | ||
329 | } | ||
330 | |||
331 | /* Concatenate the string */ | ||
332 | |||
333 | for (--string; (*string++ = *src_string++);) {; | ||
334 | } | ||
335 | |||
336 | return (dst_string); | ||
337 | } | ||
338 | |||
339 | /******************************************************************************* | ||
340 | * | ||
341 | * FUNCTION: acpi_ut_strncat (strncat) | ||
342 | * | ||
343 | * PARAMETERS: dst_string - Target of the copy | ||
344 | * src_string - The source string to copy | ||
345 | * count - Maximum # of bytes to copy | ||
346 | * | ||
347 | * RETURN: dst_string | ||
348 | * | ||
349 | * DESCRIPTION: Append a null terminated string to a null terminated string, | ||
350 | * with a maximum count. | ||
351 | * | ||
352 | ******************************************************************************/ | ||
353 | |||
354 | char *acpi_ut_strncat(char *dst_string, const char *src_string, acpi_size count) | ||
355 | { | ||
356 | char *string; | ||
357 | |||
358 | if (count) { | ||
359 | |||
360 | /* Find end of the destination string */ | ||
361 | |||
362 | for (string = dst_string; *string++;) {; | ||
363 | } | ||
364 | |||
365 | /* Concatenate the string */ | ||
366 | |||
367 | for (--string; (*string++ = *src_string++) && --count;) {; | ||
368 | } | ||
369 | |||
370 | /* Null terminate if necessary */ | ||
371 | |||
372 | if (!count) { | ||
373 | *string = 0; | ||
374 | } | ||
375 | } | ||
376 | |||
377 | return (dst_string); | ||
378 | } | ||
379 | |||
380 | /******************************************************************************* | ||
381 | * | ||
382 | * FUNCTION: acpi_ut_strstr (strstr) | ||
383 | * | ||
384 | * PARAMETERS: string1 - Target string | ||
385 | * string2 - Substring to search for | ||
386 | * | ||
387 | * RETURN: Where substring match starts, Null if no match found | ||
388 | * | ||
389 | * DESCRIPTION: Checks if String2 occurs in String1. This is not really a | ||
390 | * full implementation of strstr, only sufficient for command | ||
391 | * matching | ||
392 | * | ||
393 | ******************************************************************************/ | ||
394 | |||
395 | char *acpi_ut_strstr(char *string1, char *string2) | ||
396 | { | ||
397 | char *string; | ||
398 | |||
399 | if (acpi_ut_strlen(string2) > acpi_ut_strlen(string1)) { | ||
400 | return (NULL); | ||
401 | } | ||
402 | |||
403 | /* Walk entire string, comparing the letters */ | ||
404 | |||
405 | for (string = string1; *string2;) { | ||
406 | if (*string2 != *string) { | ||
407 | return (NULL); | ||
408 | } | ||
409 | |||
410 | string2++; | ||
411 | string++; | ||
412 | } | ||
413 | |||
414 | return (string1); | ||
415 | } | ||
416 | |||
417 | /******************************************************************************* | ||
418 | * | ||
419 | * FUNCTION: acpi_ut_strtoul (strtoul) | ||
420 | * | ||
421 | * PARAMETERS: string - Null terminated string | ||
422 | * terminater - Where a pointer to the terminating byte is | ||
423 | * returned | ||
424 | * base - Radix of the string | ||
425 | * | ||
426 | * RETURN: Converted value | ||
427 | * | ||
428 | * DESCRIPTION: Convert a string into a 32-bit unsigned value. | ||
429 | * Note: use acpi_ut_strtoul64 for 64-bit integers. | ||
430 | * | ||
431 | ******************************************************************************/ | ||
432 | |||
433 | u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base) | ||
434 | { | ||
435 | u32 converted = 0; | ||
436 | u32 index; | ||
437 | u32 sign; | ||
438 | const char *string_start; | ||
439 | u32 return_value = 0; | ||
440 | acpi_status status = AE_OK; | ||
441 | |||
442 | /* | ||
443 | * Save the value of the pointer to the buffer's first | ||
444 | * character, save the current errno value, and then | ||
445 | * skip over any white space in the buffer: | ||
446 | */ | ||
447 | string_start = string; | ||
448 | while (ACPI_IS_SPACE(*string) || *string == '\t') { | ||
449 | ++string; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * The buffer may contain an optional plus or minus sign. | ||
454 | * If it does, then skip over it but remember what is was: | ||
455 | */ | ||
456 | if (*string == '-') { | ||
457 | sign = NEGATIVE; | ||
458 | ++string; | ||
459 | } else if (*string == '+') { | ||
460 | ++string; | ||
461 | sign = POSITIVE; | ||
462 | } else { | ||
463 | sign = POSITIVE; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * If the input parameter Base is zero, then we need to | ||
468 | * determine if it is octal, decimal, or hexadecimal: | ||
469 | */ | ||
470 | if (base == 0) { | ||
471 | if (*string == '0') { | ||
472 | if (acpi_ut_to_lower(*(++string)) == 'x') { | ||
473 | base = 16; | ||
474 | ++string; | ||
475 | } else { | ||
476 | base = 8; | ||
477 | } | ||
478 | } else { | ||
479 | base = 10; | ||
480 | } | ||
481 | } else if (base < 2 || base > 36) { | ||
482 | /* | ||
483 | * The specified Base parameter is not in the domain of | ||
484 | * this function: | ||
485 | */ | ||
486 | goto done; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * For octal and hexadecimal bases, skip over the leading | ||
491 | * 0 or 0x, if they are present. | ||
492 | */ | ||
493 | if (base == 8 && *string == '0') { | ||
494 | string++; | ||
495 | } | ||
496 | |||
497 | if (base == 16 && | ||
498 | *string == '0' && acpi_ut_to_lower(*(++string)) == 'x') { | ||
499 | string++; | ||
500 | } | ||
501 | |||
502 | /* | ||
503 | * Main loop: convert the string to an unsigned long: | ||
504 | */ | ||
505 | while (*string) { | ||
506 | if (ACPI_IS_DIGIT(*string)) { | ||
507 | index = (u32)((u8)*string - '0'); | ||
508 | } else { | ||
509 | index = (u32)acpi_ut_to_upper(*string); | ||
510 | if (ACPI_IS_UPPER(index)) { | ||
511 | index = index - 'A' + 10; | ||
512 | } else { | ||
513 | goto done; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | if (index >= base) { | ||
518 | goto done; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Check to see if value is out of range: | ||
523 | */ | ||
524 | |||
525 | if (return_value > ((ACPI_UINT32_MAX - (u32)index) / (u32)base)) { | ||
526 | status = AE_ERROR; | ||
527 | return_value = 0; /* reset */ | ||
528 | } else { | ||
529 | return_value *= base; | ||
530 | return_value += index; | ||
531 | converted = 1; | ||
532 | } | ||
533 | |||
534 | ++string; | ||
535 | } | ||
536 | |||
537 | done: | ||
538 | /* | ||
539 | * If appropriate, update the caller's pointer to the next | ||
540 | * unconverted character in the buffer. | ||
541 | */ | ||
542 | if (terminator) { | ||
543 | if (converted == 0 && return_value == 0 && string != NULL) { | ||
544 | *terminator = (char *)string_start; | ||
545 | } else { | ||
546 | *terminator = (char *)string; | ||
547 | } | ||
548 | } | ||
549 | |||
550 | if (status == AE_ERROR) { | ||
551 | return_value = ACPI_UINT32_MAX; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * If a minus sign was present, then "the conversion is negated": | ||
556 | */ | ||
557 | if (sign == NEGATIVE) { | ||
558 | return_value = (ACPI_UINT32_MAX - return_value) + 1; | ||
559 | } | ||
560 | |||
561 | return (return_value); | ||
562 | } | ||
563 | |||
564 | /******************************************************************************* | ||
565 | * | ||
566 | * FUNCTION: acpi_ut_to_upper (TOUPPER) | ||
567 | * | ||
568 | * PARAMETERS: c - Character to convert | ||
569 | * | ||
570 | * RETURN: Converted character as an int | ||
571 | * | ||
572 | * DESCRIPTION: Convert character to uppercase | ||
573 | * | ||
574 | ******************************************************************************/ | ||
575 | |||
576 | int acpi_ut_to_upper(int c) | ||
577 | { | ||
578 | |||
579 | return (ACPI_IS_LOWER(c) ? ((c) - 0x20) : (c)); | ||
580 | } | ||
581 | |||
582 | /******************************************************************************* | ||
583 | * | ||
584 | * FUNCTION: acpi_ut_to_lower (TOLOWER) | ||
585 | * | ||
586 | * PARAMETERS: c - Character to convert | ||
587 | * | ||
588 | * RETURN: Converted character as an int | ||
589 | * | ||
590 | * DESCRIPTION: Convert character to lowercase | ||
591 | * | ||
592 | ******************************************************************************/ | ||
593 | |||
594 | int acpi_ut_to_lower(int c) | ||
595 | { | ||
596 | |||
597 | return (ACPI_IS_UPPER(c) ? ((c) + 0x20) : (c)); | ||
598 | } | ||
599 | |||
600 | /******************************************************************************* | ||
601 | * | ||
602 | * FUNCTION: is* functions | ||
603 | * | ||
604 | * DESCRIPTION: is* functions use the ctype table below | ||
605 | * | ||
606 | ******************************************************************************/ | ||
607 | |||
608 | const u8 _acpi_ctype[257] = { | ||
609 | _ACPI_CN, /* 0x00 0 NUL */ | ||
610 | _ACPI_CN, /* 0x01 1 SOH */ | ||
611 | _ACPI_CN, /* 0x02 2 STX */ | ||
612 | _ACPI_CN, /* 0x03 3 ETX */ | ||
613 | _ACPI_CN, /* 0x04 4 EOT */ | ||
614 | _ACPI_CN, /* 0x05 5 ENQ */ | ||
615 | _ACPI_CN, /* 0x06 6 ACK */ | ||
616 | _ACPI_CN, /* 0x07 7 BEL */ | ||
617 | _ACPI_CN, /* 0x08 8 BS */ | ||
618 | _ACPI_CN | _ACPI_SP, /* 0x09 9 TAB */ | ||
619 | _ACPI_CN | _ACPI_SP, /* 0x0A 10 LF */ | ||
620 | _ACPI_CN | _ACPI_SP, /* 0x0B 11 VT */ | ||
621 | _ACPI_CN | _ACPI_SP, /* 0x0C 12 FF */ | ||
622 | _ACPI_CN | _ACPI_SP, /* 0x0D 13 CR */ | ||
623 | _ACPI_CN, /* 0x0E 14 SO */ | ||
624 | _ACPI_CN, /* 0x0F 15 SI */ | ||
625 | _ACPI_CN, /* 0x10 16 DLE */ | ||
626 | _ACPI_CN, /* 0x11 17 DC1 */ | ||
627 | _ACPI_CN, /* 0x12 18 DC2 */ | ||
628 | _ACPI_CN, /* 0x13 19 DC3 */ | ||
629 | _ACPI_CN, /* 0x14 20 DC4 */ | ||
630 | _ACPI_CN, /* 0x15 21 NAK */ | ||
631 | _ACPI_CN, /* 0x16 22 SYN */ | ||
632 | _ACPI_CN, /* 0x17 23 ETB */ | ||
633 | _ACPI_CN, /* 0x18 24 CAN */ | ||
634 | _ACPI_CN, /* 0x19 25 EM */ | ||
635 | _ACPI_CN, /* 0x1A 26 SUB */ | ||
636 | _ACPI_CN, /* 0x1B 27 ESC */ | ||
637 | _ACPI_CN, /* 0x1C 28 FS */ | ||
638 | _ACPI_CN, /* 0x1D 29 GS */ | ||
639 | _ACPI_CN, /* 0x1E 30 RS */ | ||
640 | _ACPI_CN, /* 0x1F 31 US */ | ||
641 | _ACPI_XS | _ACPI_SP, /* 0x20 32 ' ' */ | ||
642 | _ACPI_PU, /* 0x21 33 '!' */ | ||
643 | _ACPI_PU, /* 0x22 34 '"' */ | ||
644 | _ACPI_PU, /* 0x23 35 '#' */ | ||
645 | _ACPI_PU, /* 0x24 36 '$' */ | ||
646 | _ACPI_PU, /* 0x25 37 '%' */ | ||
647 | _ACPI_PU, /* 0x26 38 '&' */ | ||
648 | _ACPI_PU, /* 0x27 39 ''' */ | ||
649 | _ACPI_PU, /* 0x28 40 '(' */ | ||
650 | _ACPI_PU, /* 0x29 41 ')' */ | ||
651 | _ACPI_PU, /* 0x2A 42 '*' */ | ||
652 | _ACPI_PU, /* 0x2B 43 '+' */ | ||
653 | _ACPI_PU, /* 0x2C 44 ',' */ | ||
654 | _ACPI_PU, /* 0x2D 45 '-' */ | ||
655 | _ACPI_PU, /* 0x2E 46 '.' */ | ||
656 | _ACPI_PU, /* 0x2F 47 '/' */ | ||
657 | _ACPI_XD | _ACPI_DI, /* 0x30 48 '0' */ | ||
658 | _ACPI_XD | _ACPI_DI, /* 0x31 49 '1' */ | ||
659 | _ACPI_XD | _ACPI_DI, /* 0x32 50 '2' */ | ||
660 | _ACPI_XD | _ACPI_DI, /* 0x33 51 '3' */ | ||
661 | _ACPI_XD | _ACPI_DI, /* 0x34 52 '4' */ | ||
662 | _ACPI_XD | _ACPI_DI, /* 0x35 53 '5' */ | ||
663 | _ACPI_XD | _ACPI_DI, /* 0x36 54 '6' */ | ||
664 | _ACPI_XD | _ACPI_DI, /* 0x37 55 '7' */ | ||
665 | _ACPI_XD | _ACPI_DI, /* 0x38 56 '8' */ | ||
666 | _ACPI_XD | _ACPI_DI, /* 0x39 57 '9' */ | ||
667 | _ACPI_PU, /* 0x3A 58 ':' */ | ||
668 | _ACPI_PU, /* 0x3B 59 ';' */ | ||
669 | _ACPI_PU, /* 0x3C 60 '<' */ | ||
670 | _ACPI_PU, /* 0x3D 61 '=' */ | ||
671 | _ACPI_PU, /* 0x3E 62 '>' */ | ||
672 | _ACPI_PU, /* 0x3F 63 '?' */ | ||
673 | _ACPI_PU, /* 0x40 64 '@' */ | ||
674 | _ACPI_XD | _ACPI_UP, /* 0x41 65 'A' */ | ||
675 | _ACPI_XD | _ACPI_UP, /* 0x42 66 'B' */ | ||
676 | _ACPI_XD | _ACPI_UP, /* 0x43 67 'C' */ | ||
677 | _ACPI_XD | _ACPI_UP, /* 0x44 68 'D' */ | ||
678 | _ACPI_XD | _ACPI_UP, /* 0x45 69 'E' */ | ||
679 | _ACPI_XD | _ACPI_UP, /* 0x46 70 'F' */ | ||
680 | _ACPI_UP, /* 0x47 71 'G' */ | ||
681 | _ACPI_UP, /* 0x48 72 'H' */ | ||
682 | _ACPI_UP, /* 0x49 73 'I' */ | ||
683 | _ACPI_UP, /* 0x4A 74 'J' */ | ||
684 | _ACPI_UP, /* 0x4B 75 'K' */ | ||
685 | _ACPI_UP, /* 0x4C 76 'L' */ | ||
686 | _ACPI_UP, /* 0x4D 77 'M' */ | ||
687 | _ACPI_UP, /* 0x4E 78 'N' */ | ||
688 | _ACPI_UP, /* 0x4F 79 'O' */ | ||
689 | _ACPI_UP, /* 0x50 80 'P' */ | ||
690 | _ACPI_UP, /* 0x51 81 'Q' */ | ||
691 | _ACPI_UP, /* 0x52 82 'R' */ | ||
692 | _ACPI_UP, /* 0x53 83 'S' */ | ||
693 | _ACPI_UP, /* 0x54 84 'T' */ | ||
694 | _ACPI_UP, /* 0x55 85 'U' */ | ||
695 | _ACPI_UP, /* 0x56 86 'V' */ | ||
696 | _ACPI_UP, /* 0x57 87 'W' */ | ||
697 | _ACPI_UP, /* 0x58 88 'X' */ | ||
698 | _ACPI_UP, /* 0x59 89 'Y' */ | ||
699 | _ACPI_UP, /* 0x5A 90 'Z' */ | ||
700 | _ACPI_PU, /* 0x5B 91 '[' */ | ||
701 | _ACPI_PU, /* 0x5C 92 '\' */ | ||
702 | _ACPI_PU, /* 0x5D 93 ']' */ | ||
703 | _ACPI_PU, /* 0x5E 94 '^' */ | ||
704 | _ACPI_PU, /* 0x5F 95 '_' */ | ||
705 | _ACPI_PU, /* 0x60 96 '`' */ | ||
706 | _ACPI_XD | _ACPI_LO, /* 0x61 97 'a' */ | ||
707 | _ACPI_XD | _ACPI_LO, /* 0x62 98 'b' */ | ||
708 | _ACPI_XD | _ACPI_LO, /* 0x63 99 'c' */ | ||
709 | _ACPI_XD | _ACPI_LO, /* 0x64 100 'd' */ | ||
710 | _ACPI_XD | _ACPI_LO, /* 0x65 101 'e' */ | ||
711 | _ACPI_XD | _ACPI_LO, /* 0x66 102 'f' */ | ||
712 | _ACPI_LO, /* 0x67 103 'g' */ | ||
713 | _ACPI_LO, /* 0x68 104 'h' */ | ||
714 | _ACPI_LO, /* 0x69 105 'i' */ | ||
715 | _ACPI_LO, /* 0x6A 106 'j' */ | ||
716 | _ACPI_LO, /* 0x6B 107 'k' */ | ||
717 | _ACPI_LO, /* 0x6C 108 'l' */ | ||
718 | _ACPI_LO, /* 0x6D 109 'm' */ | ||
719 | _ACPI_LO, /* 0x6E 110 'n' */ | ||
720 | _ACPI_LO, /* 0x6F 111 'o' */ | ||
721 | _ACPI_LO, /* 0x70 112 'p' */ | ||
722 | _ACPI_LO, /* 0x71 113 'q' */ | ||
723 | _ACPI_LO, /* 0x72 114 'r' */ | ||
724 | _ACPI_LO, /* 0x73 115 's' */ | ||
725 | _ACPI_LO, /* 0x74 116 't' */ | ||
726 | _ACPI_LO, /* 0x75 117 'u' */ | ||
727 | _ACPI_LO, /* 0x76 118 'v' */ | ||
728 | _ACPI_LO, /* 0x77 119 'w' */ | ||
729 | _ACPI_LO, /* 0x78 120 'x' */ | ||
730 | _ACPI_LO, /* 0x79 121 'y' */ | ||
731 | _ACPI_LO, /* 0x7A 122 'z' */ | ||
732 | _ACPI_PU, /* 0x7B 123 '{' */ | ||
733 | _ACPI_PU, /* 0x7C 124 '|' */ | ||
734 | _ACPI_PU, /* 0x7D 125 '}' */ | ||
735 | _ACPI_PU, /* 0x7E 126 '~' */ | ||
736 | _ACPI_CN, /* 0x7F 127 DEL */ | ||
737 | |||
738 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x80 to 0x8F */ | ||
739 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x90 to 0x9F */ | ||
740 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 to 0xAF */ | ||
741 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xB0 to 0xBF */ | ||
742 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xC0 to 0xCF */ | ||
743 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 to 0xDF */ | ||
744 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xE0 to 0xEF */ | ||
745 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xF0 to 0xFF */ | ||
746 | 0 /* 0x100 */ | ||
747 | }; | ||
748 | |||
749 | #endif /* ACPI_USE_SYSTEM_CLIBRARY */ | ||
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index e810894149ae..5d95166245ae 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c | |||
@@ -47,8 +47,9 @@ | |||
47 | 47 | ||
48 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
49 | ACPI_MODULE_NAME("utdebug") | 49 | ACPI_MODULE_NAME("utdebug") |
50 | |||
50 | #ifdef ACPI_DEBUG_OUTPUT | 51 | #ifdef ACPI_DEBUG_OUTPUT |
51 | static acpi_thread_id acpi_gbl_prev_thread_id; | 52 | static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF; |
52 | static char *acpi_gbl_fn_entry_str = "----Entry"; | 53 | static char *acpi_gbl_fn_entry_str = "----Entry"; |
53 | static char *acpi_gbl_fn_exit_str = "----Exit-"; | 54 | static char *acpi_gbl_fn_exit_str = "----Exit-"; |
54 | 55 | ||
@@ -109,7 +110,7 @@ void acpi_ut_track_stack_ptr(void) | |||
109 | * RETURN: Updated pointer to the function name | 110 | * RETURN: Updated pointer to the function name |
110 | * | 111 | * |
111 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. | 112 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. |
112 | * This allows compiler macros such as __func__ to be used | 113 | * This allows compiler macros such as __FUNCTION__ to be used |
113 | * with no change to the debug output. | 114 | * with no change to the debug output. |
114 | * | 115 | * |
115 | ******************************************************************************/ | 116 | ******************************************************************************/ |
@@ -222,7 +223,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print) | |||
222 | * | 223 | * |
223 | * RETURN: None | 224 | * RETURN: None |
224 | * | 225 | * |
225 | * DESCRIPTION: Print message with no headers. Has same interface as | 226 | * DESCRIPTION: Print message with no headers. Has same interface as |
226 | * debug_print so that the same macros can be used. | 227 | * debug_print so that the same macros can be used. |
227 | * | 228 | * |
228 | ******************************************************************************/ | 229 | ******************************************************************************/ |
@@ -258,7 +259,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print_raw) | |||
258 | * | 259 | * |
259 | * RETURN: None | 260 | * RETURN: None |
260 | * | 261 | * |
261 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 262 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
262 | * set in debug_level | 263 | * set in debug_level |
263 | * | 264 | * |
264 | ******************************************************************************/ | 265 | ******************************************************************************/ |
@@ -290,7 +291,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace) | |||
290 | * | 291 | * |
291 | * RETURN: None | 292 | * RETURN: None |
292 | * | 293 | * |
293 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 294 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
294 | * set in debug_level | 295 | * set in debug_level |
295 | * | 296 | * |
296 | ******************************************************************************/ | 297 | ******************************************************************************/ |
@@ -299,6 +300,7 @@ acpi_ut_trace_ptr(u32 line_number, | |||
299 | const char *function_name, | 300 | const char *function_name, |
300 | const char *module_name, u32 component_id, void *pointer) | 301 | const char *module_name, u32 component_id, void *pointer) |
301 | { | 302 | { |
303 | |||
302 | acpi_gbl_nesting_level++; | 304 | acpi_gbl_nesting_level++; |
303 | acpi_ut_track_stack_ptr(); | 305 | acpi_ut_track_stack_ptr(); |
304 | 306 | ||
@@ -319,7 +321,7 @@ acpi_ut_trace_ptr(u32 line_number, | |||
319 | * | 321 | * |
320 | * RETURN: None | 322 | * RETURN: None |
321 | * | 323 | * |
322 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 324 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
323 | * set in debug_level | 325 | * set in debug_level |
324 | * | 326 | * |
325 | ******************************************************************************/ | 327 | ******************************************************************************/ |
@@ -350,7 +352,7 @@ acpi_ut_trace_str(u32 line_number, | |||
350 | * | 352 | * |
351 | * RETURN: None | 353 | * RETURN: None |
352 | * | 354 | * |
353 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 355 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
354 | * set in debug_level | 356 | * set in debug_level |
355 | * | 357 | * |
356 | ******************************************************************************/ | 358 | ******************************************************************************/ |
@@ -380,7 +382,7 @@ acpi_ut_trace_u32(u32 line_number, | |||
380 | * | 382 | * |
381 | * RETURN: None | 383 | * RETURN: None |
382 | * | 384 | * |
383 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 385 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
384 | * set in debug_level | 386 | * set in debug_level |
385 | * | 387 | * |
386 | ******************************************************************************/ | 388 | ******************************************************************************/ |
@@ -412,7 +414,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit) | |||
412 | * | 414 | * |
413 | * RETURN: None | 415 | * RETURN: None |
414 | * | 416 | * |
415 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 417 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
416 | * set in debug_level. Prints exit status also. | 418 | * set in debug_level. Prints exit status also. |
417 | * | 419 | * |
418 | ******************************************************************************/ | 420 | ******************************************************************************/ |
@@ -453,7 +455,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) | |||
453 | * | 455 | * |
454 | * RETURN: None | 456 | * RETURN: None |
455 | * | 457 | * |
456 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 458 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
457 | * set in debug_level. Prints exit value also. | 459 | * set in debug_level. Prints exit value also. |
458 | * | 460 | * |
459 | ******************************************************************************/ | 461 | ******************************************************************************/ |
@@ -485,7 +487,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) | |||
485 | * | 487 | * |
486 | * RETURN: None | 488 | * RETURN: None |
487 | * | 489 | * |
488 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 490 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
489 | * set in debug_level. Prints exit value also. | 491 | * set in debug_level. Prints exit value also. |
490 | * | 492 | * |
491 | ******************************************************************************/ | 493 | ******************************************************************************/ |
@@ -511,7 +513,7 @@ acpi_ut_ptr_exit(u32 line_number, | |||
511 | * PARAMETERS: buffer - Buffer to dump | 513 | * PARAMETERS: buffer - Buffer to dump |
512 | * count - Amount to dump, in bytes | 514 | * count - Amount to dump, in bytes |
513 | * display - BYTE, WORD, DWORD, or QWORD display | 515 | * display - BYTE, WORD, DWORD, or QWORD display |
514 | * component_ID - Caller's component ID | 516 | * offset - Beginning buffer offset (display only) |
515 | * | 517 | * |
516 | * RETURN: None | 518 | * RETURN: None |
517 | * | 519 | * |
@@ -519,7 +521,7 @@ acpi_ut_ptr_exit(u32 line_number, | |||
519 | * | 521 | * |
520 | ******************************************************************************/ | 522 | ******************************************************************************/ |
521 | 523 | ||
522 | void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | 524 | void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) |
523 | { | 525 | { |
524 | u32 i = 0; | 526 | u32 i = 0; |
525 | u32 j; | 527 | u32 j; |
@@ -541,7 +543,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
541 | 543 | ||
542 | /* Print current offset */ | 544 | /* Print current offset */ |
543 | 545 | ||
544 | acpi_os_printf("%6.4X: ", i); | 546 | acpi_os_printf("%6.4X: ", (base_offset + i)); |
545 | 547 | ||
546 | /* Print 16 hex chars */ | 548 | /* Print 16 hex chars */ |
547 | 549 | ||
@@ -623,7 +625,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
623 | 625 | ||
624 | /******************************************************************************* | 626 | /******************************************************************************* |
625 | * | 627 | * |
626 | * FUNCTION: acpi_ut_dump_buffer | 628 | * FUNCTION: acpi_ut_debug_dump_buffer |
627 | * | 629 | * |
628 | * PARAMETERS: buffer - Buffer to dump | 630 | * PARAMETERS: buffer - Buffer to dump |
629 | * count - Amount to dump, in bytes | 631 | * count - Amount to dump, in bytes |
@@ -636,7 +638,8 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
636 | * | 638 | * |
637 | ******************************************************************************/ | 639 | ******************************************************************************/ |
638 | 640 | ||
639 | void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) | 641 | void |
642 | acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id) | ||
640 | { | 643 | { |
641 | 644 | ||
642 | /* Only dump the buffer if tracing is enabled */ | 645 | /* Only dump the buffer if tracing is enabled */ |
@@ -646,5 +649,5 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) | |||
646 | return; | 649 | return; |
647 | } | 650 | } |
648 | 651 | ||
649 | acpi_ut_dump_buffer2(buffer, count, display); | 652 | acpi_ut_dump_buffer(buffer, count, display, 0); |
650 | } | 653 | } |
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 5d84e1954575..774c3aefbf5d 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c | |||
@@ -67,10 +67,10 @@ ACPI_MODULE_NAME("utids") | |||
67 | ******************************************************************************/ | 67 | ******************************************************************************/ |
68 | acpi_status | 68 | acpi_status |
69 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | 69 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, |
70 | struct acpica_device_id **return_id) | 70 | struct acpi_pnp_device_id **return_id) |
71 | { | 71 | { |
72 | union acpi_operand_object *obj_desc; | 72 | union acpi_operand_object *obj_desc; |
73 | struct acpica_device_id *hid; | 73 | struct acpi_pnp_device_id *hid; |
74 | u32 length; | 74 | u32 length; |
75 | acpi_status status; | 75 | acpi_status status; |
76 | 76 | ||
@@ -94,16 +94,17 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | |||
94 | /* Allocate a buffer for the HID */ | 94 | /* Allocate a buffer for the HID */ |
95 | 95 | ||
96 | hid = | 96 | hid = |
97 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + | 97 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + |
98 | (acpi_size) length); | 98 | (acpi_size) length); |
99 | if (!hid) { | 99 | if (!hid) { |
100 | status = AE_NO_MEMORY; | 100 | status = AE_NO_MEMORY; |
101 | goto cleanup; | 101 | goto cleanup; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Area for the string starts after DEVICE_ID struct */ | 104 | /* Area for the string starts after PNP_DEVICE_ID struct */ |
105 | 105 | ||
106 | hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id)); | 106 | hid->string = |
107 | ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id)); | ||
107 | 108 | ||
108 | /* Convert EISAID to a string or simply copy existing string */ | 109 | /* Convert EISAID to a string or simply copy existing string */ |
109 | 110 | ||
@@ -126,6 +127,73 @@ cleanup: | |||
126 | 127 | ||
127 | /******************************************************************************* | 128 | /******************************************************************************* |
128 | * | 129 | * |
130 | * FUNCTION: acpi_ut_execute_SUB | ||
131 | * | ||
132 | * PARAMETERS: device_node - Node for the device | ||
133 | * return_id - Where the _SUB is returned | ||
134 | * | ||
135 | * RETURN: Status | ||
136 | * | ||
137 | * DESCRIPTION: Executes the _SUB control method that returns the subsystem | ||
138 | * ID of the device. The _SUB value is always a string containing | ||
139 | * either a valid PNP or ACPI ID. | ||
140 | * | ||
141 | * NOTE: Internal function, no parameter validation | ||
142 | * | ||
143 | ******************************************************************************/ | ||
144 | |||
145 | acpi_status | ||
146 | acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, | ||
147 | struct acpi_pnp_device_id **return_id) | ||
148 | { | ||
149 | union acpi_operand_object *obj_desc; | ||
150 | struct acpi_pnp_device_id *sub; | ||
151 | u32 length; | ||
152 | acpi_status status; | ||
153 | |||
154 | ACPI_FUNCTION_TRACE(ut_execute_SUB); | ||
155 | |||
156 | status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB, | ||
157 | ACPI_BTYPE_STRING, &obj_desc); | ||
158 | if (ACPI_FAILURE(status)) { | ||
159 | return_ACPI_STATUS(status); | ||
160 | } | ||
161 | |||
162 | /* Get the size of the String to be returned, includes null terminator */ | ||
163 | |||
164 | length = obj_desc->string.length + 1; | ||
165 | |||
166 | /* Allocate a buffer for the SUB */ | ||
167 | |||
168 | sub = | ||
169 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + | ||
170 | (acpi_size) length); | ||
171 | if (!sub) { | ||
172 | status = AE_NO_MEMORY; | ||
173 | goto cleanup; | ||
174 | } | ||
175 | |||
176 | /* Area for the string starts after PNP_DEVICE_ID struct */ | ||
177 | |||
178 | sub->string = | ||
179 | ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id)); | ||
180 | |||
181 | /* Simply copy existing string */ | ||
182 | |||
183 | ACPI_STRCPY(sub->string, obj_desc->string.pointer); | ||
184 | sub->length = length; | ||
185 | *return_id = sub; | ||
186 | |||
187 | cleanup: | ||
188 | |||
189 | /* On exit, we must delete the return object */ | ||
190 | |||
191 | acpi_ut_remove_reference(obj_desc); | ||
192 | return_ACPI_STATUS(status); | ||
193 | } | ||
194 | |||
195 | /******************************************************************************* | ||
196 | * | ||
129 | * FUNCTION: acpi_ut_execute_UID | 197 | * FUNCTION: acpi_ut_execute_UID |
130 | * | 198 | * |
131 | * PARAMETERS: device_node - Node for the device | 199 | * PARAMETERS: device_node - Node for the device |
@@ -144,10 +212,10 @@ cleanup: | |||
144 | 212 | ||
145 | acpi_status | 213 | acpi_status |
146 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | 214 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, |
147 | struct acpica_device_id **return_id) | 215 | struct acpi_pnp_device_id **return_id) |
148 | { | 216 | { |
149 | union acpi_operand_object *obj_desc; | 217 | union acpi_operand_object *obj_desc; |
150 | struct acpica_device_id *uid; | 218 | struct acpi_pnp_device_id *uid; |
151 | u32 length; | 219 | u32 length; |
152 | acpi_status status; | 220 | acpi_status status; |
153 | 221 | ||
@@ -171,16 +239,17 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | |||
171 | /* Allocate a buffer for the UID */ | 239 | /* Allocate a buffer for the UID */ |
172 | 240 | ||
173 | uid = | 241 | uid = |
174 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + | 242 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + |
175 | (acpi_size) length); | 243 | (acpi_size) length); |
176 | if (!uid) { | 244 | if (!uid) { |
177 | status = AE_NO_MEMORY; | 245 | status = AE_NO_MEMORY; |
178 | goto cleanup; | 246 | goto cleanup; |
179 | } | 247 | } |
180 | 248 | ||
181 | /* Area for the string starts after DEVICE_ID struct */ | 249 | /* Area for the string starts after PNP_DEVICE_ID struct */ |
182 | 250 | ||
183 | uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id)); | 251 | uid->string = |
252 | ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id)); | ||
184 | 253 | ||
185 | /* Convert an Integer to string, or just copy an existing string */ | 254 | /* Convert an Integer to string, or just copy an existing string */ |
186 | 255 | ||
@@ -226,11 +295,11 @@ cleanup: | |||
226 | 295 | ||
227 | acpi_status | 296 | acpi_status |
228 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | 297 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, |
229 | struct acpica_device_id_list **return_cid_list) | 298 | struct acpi_pnp_device_id_list **return_cid_list) |
230 | { | 299 | { |
231 | union acpi_operand_object **cid_objects; | 300 | union acpi_operand_object **cid_objects; |
232 | union acpi_operand_object *obj_desc; | 301 | union acpi_operand_object *obj_desc; |
233 | struct acpica_device_id_list *cid_list; | 302 | struct acpi_pnp_device_id_list *cid_list; |
234 | char *next_id_string; | 303 | char *next_id_string; |
235 | u32 string_area_size; | 304 | u32 string_area_size; |
236 | u32 length; | 305 | u32 length; |
@@ -288,11 +357,12 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | |||
288 | /* | 357 | /* |
289 | * Now that we know the length of the CIDs, allocate return buffer: | 358 | * Now that we know the length of the CIDs, allocate return buffer: |
290 | * 1) Size of the base structure + | 359 | * 1) Size of the base structure + |
291 | * 2) Size of the CID DEVICE_ID array + | 360 | * 2) Size of the CID PNP_DEVICE_ID array + |
292 | * 3) Size of the actual CID strings | 361 | * 3) Size of the actual CID strings |
293 | */ | 362 | */ |
294 | cid_list_size = sizeof(struct acpica_device_id_list) + | 363 | cid_list_size = sizeof(struct acpi_pnp_device_id_list) + |
295 | ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size; | 364 | ((count - 1) * sizeof(struct acpi_pnp_device_id)) + |
365 | string_area_size; | ||
296 | 366 | ||
297 | cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); | 367 | cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); |
298 | if (!cid_list) { | 368 | if (!cid_list) { |
@@ -300,10 +370,10 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | |||
300 | goto cleanup; | 370 | goto cleanup; |
301 | } | 371 | } |
302 | 372 | ||
303 | /* Area for CID strings starts after the CID DEVICE_ID array */ | 373 | /* Area for CID strings starts after the CID PNP_DEVICE_ID array */ |
304 | 374 | ||
305 | next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + | 375 | next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + |
306 | ((acpi_size) count * sizeof(struct acpica_device_id)); | 376 | ((acpi_size) count * sizeof(struct acpi_pnp_device_id)); |
307 | 377 | ||
308 | /* Copy/convert the CIDs to the return buffer */ | 378 | /* Copy/convert the CIDs to the return buffer */ |
309 | 379 | ||
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index d88a8aaab2a6..49563674833a 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c | |||
@@ -81,7 +81,7 @@ typedef union uint64_overlay { | |||
81 | * RETURN: Status (Checks for divide-by-zero) | 81 | * RETURN: Status (Checks for divide-by-zero) |
82 | * | 82 | * |
83 | * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) | 83 | * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) |
84 | * divide and modulo. The result is a 64-bit quotient and a | 84 | * divide and modulo. The result is a 64-bit quotient and a |
85 | * 32-bit remainder. | 85 | * 32-bit remainder. |
86 | * | 86 | * |
87 | ******************************************************************************/ | 87 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 33c6cf7ff467..9286a69eb9aa 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c | |||
@@ -41,8 +41,6 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/module.h> | ||
45 | |||
46 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 45 | #include "accommon.h" |
48 | #include "acnamesp.h" | 46 | #include "acnamesp.h" |
@@ -201,8 +199,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) | |||
201 | */ | 199 | */ |
202 | acpi_gbl_owner_id_mask[j] |= (1 << k); | 200 | acpi_gbl_owner_id_mask[j] |= (1 << k); |
203 | 201 | ||
204 | acpi_gbl_last_owner_id_index = (u8) j; | 202 | acpi_gbl_last_owner_id_index = (u8)j; |
205 | acpi_gbl_next_owner_id_offset = (u8) (k + 1); | 203 | acpi_gbl_next_owner_id_offset = (u8)(k + 1); |
206 | 204 | ||
207 | /* | 205 | /* |
208 | * Construct encoded ID from the index and bit position | 206 | * Construct encoded ID from the index and bit position |
@@ -252,7 +250,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) | |||
252 | * control method or unloading a table. Either way, we would | 250 | * control method or unloading a table. Either way, we would |
253 | * ignore any error anyway. | 251 | * ignore any error anyway. |
254 | * | 252 | * |
255 | * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 | 253 | * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 |
256 | * | 254 | * |
257 | ******************************************************************************/ | 255 | ******************************************************************************/ |
258 | 256 | ||
@@ -339,6 +337,73 @@ void acpi_ut_strupr(char *src_string) | |||
339 | return; | 337 | return; |
340 | } | 338 | } |
341 | 339 | ||
340 | #ifdef ACPI_ASL_COMPILER | ||
341 | /******************************************************************************* | ||
342 | * | ||
343 | * FUNCTION: acpi_ut_strlwr (strlwr) | ||
344 | * | ||
345 | * PARAMETERS: src_string - The source string to convert | ||
346 | * | ||
347 | * RETURN: None | ||
348 | * | ||
349 | * DESCRIPTION: Convert string to lowercase | ||
350 | * | ||
351 | * NOTE: This is not a POSIX function, so it appears here, not in utclib.c | ||
352 | * | ||
353 | ******************************************************************************/ | ||
354 | |||
355 | void acpi_ut_strlwr(char *src_string) | ||
356 | { | ||
357 | char *string; | ||
358 | |||
359 | ACPI_FUNCTION_ENTRY(); | ||
360 | |||
361 | if (!src_string) { | ||
362 | return; | ||
363 | } | ||
364 | |||
365 | /* Walk entire string, lowercasing the letters */ | ||
366 | |||
367 | for (string = src_string; *string; string++) { | ||
368 | *string = (char)ACPI_TOLOWER(*string); | ||
369 | } | ||
370 | |||
371 | return; | ||
372 | } | ||
373 | |||
374 | /****************************************************************************** | ||
375 | * | ||
376 | * FUNCTION: acpi_ut_stricmp | ||
377 | * | ||
378 | * PARAMETERS: string1 - first string to compare | ||
379 | * string2 - second string to compare | ||
380 | * | ||
381 | * RETURN: int that signifies string relationship. Zero means strings | ||
382 | * are equal. | ||
383 | * | ||
384 | * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare | ||
385 | * strings with no case sensitivity) | ||
386 | * | ||
387 | ******************************************************************************/ | ||
388 | |||
389 | int acpi_ut_stricmp(char *string1, char *string2) | ||
390 | { | ||
391 | int c1; | ||
392 | int c2; | ||
393 | |||
394 | do { | ||
395 | c1 = tolower((int)*string1); | ||
396 | c2 = tolower((int)*string2); | ||
397 | |||
398 | string1++; | ||
399 | string2++; | ||
400 | } | ||
401 | while ((c1 == c2) && (c1)); | ||
402 | |||
403 | return (c1 - c2); | ||
404 | } | ||
405 | #endif | ||
406 | |||
342 | /******************************************************************************* | 407 | /******************************************************************************* |
343 | * | 408 | * |
344 | * FUNCTION: acpi_ut_print_string | 409 | * FUNCTION: acpi_ut_print_string |
@@ -469,8 +534,8 @@ u32 acpi_ut_dword_byte_swap(u32 value) | |||
469 | * RETURN: None | 534 | * RETURN: None |
470 | * | 535 | * |
471 | * DESCRIPTION: Set the global integer bit width based upon the revision | 536 | * DESCRIPTION: Set the global integer bit width based upon the revision |
472 | * of the DSDT. For Revision 1 and 0, Integers are 32 bits. | 537 | * of the DSDT. For Revision 1 and 0, Integers are 32 bits. |
473 | * For Revision 2 and above, Integers are 64 bits. Yes, this | 538 | * For Revision 2 and above, Integers are 64 bits. Yes, this |
474 | * makes a difference. | 539 | * makes a difference. |
475 | * | 540 | * |
476 | ******************************************************************************/ | 541 | ******************************************************************************/ |
@@ -606,7 +671,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position) | |||
606 | * | 671 | * |
607 | * RETURN: TRUE if the name is valid, FALSE otherwise | 672 | * RETURN: TRUE if the name is valid, FALSE otherwise |
608 | * | 673 | * |
609 | * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: | 674 | * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: |
610 | * 1) Upper case alpha | 675 | * 1) Upper case alpha |
611 | * 2) numeric | 676 | * 2) numeric |
612 | * 3) underscore | 677 | * 3) underscore |
@@ -638,29 +703,59 @@ u8 acpi_ut_valid_acpi_name(u32 name) | |||
638 | * RETURN: Repaired version of the name | 703 | * RETURN: Repaired version of the name |
639 | * | 704 | * |
640 | * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and | 705 | * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and |
641 | * return the new name. | 706 | * return the new name. NOTE: the Name parameter must reside in |
707 | * read/write memory, cannot be a const. | ||
708 | * | ||
709 | * An ACPI Name must consist of valid ACPI characters. We will repair the name | ||
710 | * if necessary because we don't want to abort because of this, but we want | ||
711 | * all namespace names to be printable. A warning message is appropriate. | ||
712 | * | ||
713 | * This issue came up because there are in fact machines that exhibit | ||
714 | * this problem, and we want to be able to enable ACPI support for them, | ||
715 | * even though there are a few bad names. | ||
642 | * | 716 | * |
643 | ******************************************************************************/ | 717 | ******************************************************************************/ |
644 | 718 | ||
645 | acpi_name acpi_ut_repair_name(char *name) | 719 | void acpi_ut_repair_name(char *name) |
646 | { | 720 | { |
647 | u32 i; | 721 | u32 i; |
648 | char new_name[ACPI_NAME_SIZE]; | 722 | u8 found_bad_char = FALSE; |
723 | u32 original_name; | ||
724 | |||
725 | ACPI_FUNCTION_NAME(ut_repair_name); | ||
726 | |||
727 | ACPI_MOVE_NAME(&original_name, name); | ||
728 | |||
729 | /* Check each character in the name */ | ||
649 | 730 | ||
650 | for (i = 0; i < ACPI_NAME_SIZE; i++) { | 731 | for (i = 0; i < ACPI_NAME_SIZE; i++) { |
651 | new_name[i] = name[i]; | 732 | if (acpi_ut_valid_acpi_char(name[i], i)) { |
733 | continue; | ||
734 | } | ||
652 | 735 | ||
653 | /* | 736 | /* |
654 | * Replace a bad character with something printable, yet technically | 737 | * Replace a bad character with something printable, yet technically |
655 | * still invalid. This prevents any collisions with existing "good" | 738 | * still invalid. This prevents any collisions with existing "good" |
656 | * names in the namespace. | 739 | * names in the namespace. |
657 | */ | 740 | */ |
658 | if (!acpi_ut_valid_acpi_char(name[i], i)) { | 741 | name[i] = '*'; |
659 | new_name[i] = '*'; | 742 | found_bad_char = TRUE; |
660 | } | ||
661 | } | 743 | } |
662 | 744 | ||
663 | return (*(u32 *) new_name); | 745 | if (found_bad_char) { |
746 | |||
747 | /* Report warning only if in strict mode or debug mode */ | ||
748 | |||
749 | if (!acpi_gbl_enable_interpreter_slack) { | ||
750 | ACPI_WARNING((AE_INFO, | ||
751 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
752 | name)); | ||
753 | } else { | ||
754 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
755 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
756 | name)); | ||
757 | } | ||
758 | } | ||
664 | } | 759 | } |
665 | 760 | ||
666 | /******************************************************************************* | 761 | /******************************************************************************* |
@@ -681,7 +776,7 @@ acpi_name acpi_ut_repair_name(char *name) | |||
681 | * | 776 | * |
682 | ******************************************************************************/ | 777 | ******************************************************************************/ |
683 | 778 | ||
684 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | 779 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) |
685 | { | 780 | { |
686 | u32 this_digit = 0; | 781 | u32 this_digit = 0; |
687 | u64 return_value = 0; | 782 | u64 return_value = 0; |
@@ -754,14 +849,14 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
754 | 849 | ||
755 | /* Convert ASCII 0-9 to Decimal value */ | 850 | /* Convert ASCII 0-9 to Decimal value */ |
756 | 851 | ||
757 | this_digit = ((u8) * string) - '0'; | 852 | this_digit = ((u8)*string) - '0'; |
758 | } else if (base == 10) { | 853 | } else if (base == 10) { |
759 | 854 | ||
760 | /* Digit is out of range; possible in to_integer case only */ | 855 | /* Digit is out of range; possible in to_integer case only */ |
761 | 856 | ||
762 | term = 1; | 857 | term = 1; |
763 | } else { | 858 | } else { |
764 | this_digit = (u8) ACPI_TOUPPER(*string); | 859 | this_digit = (u8)ACPI_TOUPPER(*string); |
765 | if (ACPI_IS_XDIGIT((char)this_digit)) { | 860 | if (ACPI_IS_XDIGIT((char)this_digit)) { |
766 | 861 | ||
767 | /* Convert ASCII Hex char to value */ | 862 | /* Convert ASCII Hex char to value */ |
@@ -788,8 +883,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
788 | 883 | ||
789 | valid_digits++; | 884 | valid_digits++; |
790 | 885 | ||
791 | if (sign_of0x && ((valid_digits > 16) | 886 | if (sign_of0x |
792 | || ((valid_digits > 8) && mode32))) { | 887 | && ((valid_digits > 16) |
888 | || ((valid_digits > 8) && mode32))) { | ||
793 | /* | 889 | /* |
794 | * This is to_integer operation case. | 890 | * This is to_integer operation case. |
795 | * No any restrictions for string-to-integer conversion, | 891 | * No any restrictions for string-to-integer conversion, |
@@ -800,7 +896,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
800 | 896 | ||
801 | /* Divide the digit into the correct position */ | 897 | /* Divide the digit into the correct position */ |
802 | 898 | ||
803 | (void)acpi_ut_short_divide((dividend - (u64) this_digit), | 899 | (void)acpi_ut_short_divide((dividend - (u64)this_digit), |
804 | base, "ient, NULL); | 900 | base, "ient, NULL); |
805 | 901 | ||
806 | if (return_value > quotient) { | 902 | if (return_value > quotient) { |
@@ -890,7 +986,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object, | |||
890 | ******************************************************************************/ | 986 | ******************************************************************************/ |
891 | 987 | ||
892 | acpi_status | 988 | acpi_status |
893 | acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | 989 | acpi_ut_walk_package_tree(union acpi_operand_object *source_object, |
894 | void *target_object, | 990 | void *target_object, |
895 | acpi_pkg_callback walk_callback, void *context) | 991 | acpi_pkg_callback walk_callback, void *context) |
896 | { | 992 | { |
@@ -917,10 +1013,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
917 | 1013 | ||
918 | /* | 1014 | /* |
919 | * Check for: | 1015 | * Check for: |
920 | * 1) An uninitialized package element. It is completely | 1016 | * 1) An uninitialized package element. It is completely |
921 | * legal to declare a package and leave it uninitialized | 1017 | * legal to declare a package and leave it uninitialized |
922 | * 2) Not an internal object - can be a namespace node instead | 1018 | * 2) Not an internal object - can be a namespace node instead |
923 | * 3) Any type other than a package. Packages are handled in else | 1019 | * 3) Any type other than a package. Packages are handled in else |
924 | * case below. | 1020 | * case below. |
925 | */ | 1021 | */ |
926 | if ((!this_source_obj) || | 1022 | if ((!this_source_obj) || |
@@ -939,7 +1035,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
939 | state->pkg.source_object->package.count) { | 1035 | state->pkg.source_object->package.count) { |
940 | /* | 1036 | /* |
941 | * We've handled all of the objects at this level, This means | 1037 | * We've handled all of the objects at this level, This means |
942 | * that we have just completed a package. That package may | 1038 | * that we have just completed a package. That package may |
943 | * have contained one or more packages itself. | 1039 | * have contained one or more packages itself. |
944 | * | 1040 | * |
945 | * Delete this state and pop the previous state (package). | 1041 | * Delete this state and pop the previous state (package). |
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 296baa676bc5..5ccf57c0d87e 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c | |||
@@ -193,6 +193,8 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) | |||
193 | 193 | ||
194 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; | 194 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; |
195 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; | 195 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; |
196 | |||
197 | return_VOID; | ||
196 | } | 198 | } |
197 | 199 | ||
198 | /******************************************************************************* | 200 | /******************************************************************************* |
@@ -226,9 +228,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) | |||
226 | /* | 228 | /* |
227 | * Mutex debug code, for internal debugging only. | 229 | * Mutex debug code, for internal debugging only. |
228 | * | 230 | * |
229 | * Deadlock prevention. Check if this thread owns any mutexes of value | 231 | * Deadlock prevention. Check if this thread owns any mutexes of value |
230 | * greater than or equal to this one. If so, the thread has violated | 232 | * greater than or equal to this one. If so, the thread has violated |
231 | * the mutex ordering rule. This indicates a coding error somewhere in | 233 | * the mutex ordering rule. This indicates a coding error somewhere in |
232 | * the ACPI subsystem code. | 234 | * the ACPI subsystem code. |
233 | */ | 235 | */ |
234 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { | 236 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { |
@@ -319,9 +321,9 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) | |||
319 | /* | 321 | /* |
320 | * Mutex debug code, for internal debugging only. | 322 | * Mutex debug code, for internal debugging only. |
321 | * | 323 | * |
322 | * Deadlock prevention. Check if this thread owns any mutexes of value | 324 | * Deadlock prevention. Check if this thread owns any mutexes of value |
323 | * greater than this one. If so, the thread has violated the mutex | 325 | * greater than this one. If so, the thread has violated the mutex |
324 | * ordering rule. This indicates a coding error somewhere in | 326 | * ordering rule. This indicates a coding error somewhere in |
325 | * the ACPI subsystem code. | 327 | * the ACPI subsystem code. |
326 | */ | 328 | */ |
327 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { | 329 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { |
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 655f0799a391..5c52ca78f6fa 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c | |||
@@ -77,7 +77,7 @@ acpi_ut_get_element_length(u8 object_type, | |||
77 | * | 77 | * |
78 | * NOTE: We always allocate the worst-case object descriptor because | 78 | * NOTE: We always allocate the worst-case object descriptor because |
79 | * these objects are cached, and we want them to be | 79 | * these objects are cached, and we want them to be |
80 | * one-size-satisifies-any-request. This in itself may not be | 80 | * one-size-satisifies-any-request. This in itself may not be |
81 | * the most memory efficient, but the efficiency of the object | 81 | * the most memory efficient, but the efficiency of the object |
82 | * cache should more than make up for this! | 82 | * cache should more than make up for this! |
83 | * | 83 | * |
@@ -370,9 +370,9 @@ u8 acpi_ut_valid_internal_object(void *object) | |||
370 | * line_number - Caller's line number (for error output) | 370 | * line_number - Caller's line number (for error output) |
371 | * component_id - Caller's component ID (for error output) | 371 | * component_id - Caller's component ID (for error output) |
372 | * | 372 | * |
373 | * RETURN: Pointer to newly allocated object descriptor. Null on error | 373 | * RETURN: Pointer to newly allocated object descriptor. Null on error |
374 | * | 374 | * |
375 | * DESCRIPTION: Allocate a new object descriptor. Gracefully handle | 375 | * DESCRIPTION: Allocate a new object descriptor. Gracefully handle |
376 | * error conditions. | 376 | * error conditions. |
377 | * | 377 | * |
378 | ******************************************************************************/ | 378 | ******************************************************************************/ |
@@ -554,7 +554,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
554 | 554 | ||
555 | /* | 555 | /* |
556 | * Account for the space required by the object rounded up to the next | 556 | * Account for the space required by the object rounded up to the next |
557 | * multiple of the machine word size. This keeps each object aligned | 557 | * multiple of the machine word size. This keeps each object aligned |
558 | * on a machine word boundary. (preventing alignment faults on some | 558 | * on a machine word boundary. (preventing alignment faults on some |
559 | * machines.) | 559 | * machines.) |
560 | */ | 560 | */ |
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index a1c988260073..cee0473ba813 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c | |||
@@ -147,7 +147,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state | |||
147 | * | 147 | * |
148 | * RETURN: The new state object. NULL on failure. | 148 | * RETURN: The new state object. NULL on failure. |
149 | * | 149 | * |
150 | * DESCRIPTION: Create a generic state object. Attempt to obtain one from | 150 | * DESCRIPTION: Create a generic state object. Attempt to obtain one from |
151 | * the global state cache; If none available, create a new one. | 151 | * the global state cache; If none available, create a new one. |
152 | * | 152 | * |
153 | ******************************************************************************/ | 153 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c new file mode 100644 index 000000000000..a424a9e3fea4 --- /dev/null +++ b/drivers/acpi/acpica/uttrack.c | |||
@@ -0,0 +1,692 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: uttrack - Memory allocation tracking routines (debug only) | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | /* | ||
45 | * These procedures are used for tracking memory leaks in the subsystem, and | ||
46 | * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. | ||
47 | * | ||
48 | * Each memory allocation is tracked via a doubly linked list. Each | ||
49 | * element contains the caller's component, module name, function name, and | ||
50 | * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call | ||
51 | * acpi_ut_track_allocation to add an element to the list; deletion | ||
52 | * occurs in the body of acpi_ut_free. | ||
53 | */ | ||
54 | |||
55 | #include <acpi/acpi.h> | ||
56 | #include "accommon.h" | ||
57 | |||
58 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
59 | |||
60 | #define _COMPONENT ACPI_UTILITIES | ||
61 | ACPI_MODULE_NAME("uttrack") | ||
62 | |||
63 | /* Local prototypes */ | ||
64 | static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct | ||
65 | acpi_debug_mem_block | ||
66 | *allocation); | ||
67 | |||
68 | static acpi_status | ||
69 | acpi_ut_track_allocation(struct acpi_debug_mem_block *address, | ||
70 | acpi_size size, | ||
71 | u8 alloc_type, | ||
72 | u32 component, const char *module, u32 line); | ||
73 | |||
74 | static acpi_status | ||
75 | acpi_ut_remove_allocation(struct acpi_debug_mem_block *address, | ||
76 | u32 component, const char *module, u32 line); | ||
77 | |||
78 | /******************************************************************************* | ||
79 | * | ||
80 | * FUNCTION: acpi_ut_create_list | ||
81 | * | ||
82 | * PARAMETERS: cache_name - Ascii name for the cache | ||
83 | * object_size - Size of each cached object | ||
84 | * return_cache - Where the new cache object is returned | ||
85 | * | ||
86 | * RETURN: Status | ||
87 | * | ||
88 | * DESCRIPTION: Create a local memory list for tracking purposed | ||
89 | * | ||
90 | ******************************************************************************/ | ||
91 | |||
92 | acpi_status | ||
93 | acpi_ut_create_list(char *list_name, | ||
94 | u16 object_size, struct acpi_memory_list **return_cache) | ||
95 | { | ||
96 | struct acpi_memory_list *cache; | ||
97 | |||
98 | cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); | ||
99 | if (!cache) { | ||
100 | return (AE_NO_MEMORY); | ||
101 | } | ||
102 | |||
103 | ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); | ||
104 | |||
105 | cache->list_name = list_name; | ||
106 | cache->object_size = object_size; | ||
107 | |||
108 | *return_cache = cache; | ||
109 | return (AE_OK); | ||
110 | } | ||
111 | |||
112 | /******************************************************************************* | ||
113 | * | ||
114 | * FUNCTION: acpi_ut_allocate_and_track | ||
115 | * | ||
116 | * PARAMETERS: size - Size of the allocation | ||
117 | * component - Component type of caller | ||
118 | * module - Source file name of caller | ||
119 | * line - Line number of caller | ||
120 | * | ||
121 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
122 | * | ||
123 | * DESCRIPTION: The subsystem's equivalent of malloc. | ||
124 | * | ||
125 | ******************************************************************************/ | ||
126 | |||
127 | void *acpi_ut_allocate_and_track(acpi_size size, | ||
128 | u32 component, const char *module, u32 line) | ||
129 | { | ||
130 | struct acpi_debug_mem_block *allocation; | ||
131 | acpi_status status; | ||
132 | |||
133 | allocation = | ||
134 | acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header), | ||
135 | component, module, line); | ||
136 | if (!allocation) { | ||
137 | return (NULL); | ||
138 | } | ||
139 | |||
140 | status = acpi_ut_track_allocation(allocation, size, | ||
141 | ACPI_MEM_MALLOC, component, module, | ||
142 | line); | ||
143 | if (ACPI_FAILURE(status)) { | ||
144 | acpi_os_free(allocation); | ||
145 | return (NULL); | ||
146 | } | ||
147 | |||
148 | acpi_gbl_global_list->total_allocated++; | ||
149 | acpi_gbl_global_list->total_size += (u32)size; | ||
150 | acpi_gbl_global_list->current_total_size += (u32)size; | ||
151 | if (acpi_gbl_global_list->current_total_size > | ||
152 | acpi_gbl_global_list->max_occupied) { | ||
153 | acpi_gbl_global_list->max_occupied = | ||
154 | acpi_gbl_global_list->current_total_size; | ||
155 | } | ||
156 | |||
157 | return ((void *)&allocation->user_space); | ||
158 | } | ||
159 | |||
160 | /******************************************************************************* | ||
161 | * | ||
162 | * FUNCTION: acpi_ut_allocate_zeroed_and_track | ||
163 | * | ||
164 | * PARAMETERS: size - Size of the allocation | ||
165 | * component - Component type of caller | ||
166 | * module - Source file name of caller | ||
167 | * line - Line number of caller | ||
168 | * | ||
169 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
170 | * | ||
171 | * DESCRIPTION: Subsystem equivalent of calloc. | ||
172 | * | ||
173 | ******************************************************************************/ | ||
174 | |||
175 | void *acpi_ut_allocate_zeroed_and_track(acpi_size size, | ||
176 | u32 component, | ||
177 | const char *module, u32 line) | ||
178 | { | ||
179 | struct acpi_debug_mem_block *allocation; | ||
180 | acpi_status status; | ||
181 | |||
182 | allocation = | ||
183 | acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header), | ||
184 | component, module, line); | ||
185 | if (!allocation) { | ||
186 | |||
187 | /* Report allocation error */ | ||
188 | |||
189 | ACPI_ERROR((module, line, | ||
190 | "Could not allocate size %u", (u32)size)); | ||
191 | return (NULL); | ||
192 | } | ||
193 | |||
194 | status = acpi_ut_track_allocation(allocation, size, | ||
195 | ACPI_MEM_CALLOC, component, module, | ||
196 | line); | ||
197 | if (ACPI_FAILURE(status)) { | ||
198 | acpi_os_free(allocation); | ||
199 | return (NULL); | ||
200 | } | ||
201 | |||
202 | acpi_gbl_global_list->total_allocated++; | ||
203 | acpi_gbl_global_list->total_size += (u32)size; | ||
204 | acpi_gbl_global_list->current_total_size += (u32)size; | ||
205 | if (acpi_gbl_global_list->current_total_size > | ||
206 | acpi_gbl_global_list->max_occupied) { | ||
207 | acpi_gbl_global_list->max_occupied = | ||
208 | acpi_gbl_global_list->current_total_size; | ||
209 | } | ||
210 | |||
211 | return ((void *)&allocation->user_space); | ||
212 | } | ||
213 | |||
214 | /******************************************************************************* | ||
215 | * | ||
216 | * FUNCTION: acpi_ut_free_and_track | ||
217 | * | ||
218 | * PARAMETERS: allocation - Address of the memory to deallocate | ||
219 | * component - Component type of caller | ||
220 | * module - Source file name of caller | ||
221 | * line - Line number of caller | ||
222 | * | ||
223 | * RETURN: None | ||
224 | * | ||
225 | * DESCRIPTION: Frees the memory at Allocation | ||
226 | * | ||
227 | ******************************************************************************/ | ||
228 | |||
229 | void | ||
230 | acpi_ut_free_and_track(void *allocation, | ||
231 | u32 component, const char *module, u32 line) | ||
232 | { | ||
233 | struct acpi_debug_mem_block *debug_block; | ||
234 | acpi_status status; | ||
235 | |||
236 | ACPI_FUNCTION_TRACE_PTR(ut_free, allocation); | ||
237 | |||
238 | if (NULL == allocation) { | ||
239 | ACPI_ERROR((module, line, "Attempt to delete a NULL address")); | ||
240 | |||
241 | return_VOID; | ||
242 | } | ||
243 | |||
244 | debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block, | ||
245 | (((char *)allocation) - | ||
246 | sizeof(struct acpi_debug_mem_header))); | ||
247 | |||
248 | acpi_gbl_global_list->total_freed++; | ||
249 | acpi_gbl_global_list->current_total_size -= debug_block->size; | ||
250 | |||
251 | status = acpi_ut_remove_allocation(debug_block, | ||
252 | component, module, line); | ||
253 | if (ACPI_FAILURE(status)) { | ||
254 | ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); | ||
255 | } | ||
256 | |||
257 | acpi_os_free(debug_block); | ||
258 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation)); | ||
259 | return_VOID; | ||
260 | } | ||
261 | |||
262 | /******************************************************************************* | ||
263 | * | ||
264 | * FUNCTION: acpi_ut_find_allocation | ||
265 | * | ||
266 | * PARAMETERS: allocation - Address of allocated memory | ||
267 | * | ||
268 | * RETURN: Three cases: | ||
269 | * 1) List is empty, NULL is returned. | ||
270 | * 2) Element was found. Returns Allocation parameter. | ||
271 | * 3) Element was not found. Returns position where it should be | ||
272 | * inserted into the list. | ||
273 | * | ||
274 | * DESCRIPTION: Searches for an element in the global allocation tracking list. | ||
275 | * If the element is not found, returns the location within the | ||
276 | * list where the element should be inserted. | ||
277 | * | ||
278 | * Note: The list is ordered by larger-to-smaller addresses. | ||
279 | * | ||
280 | * This global list is used to detect memory leaks in ACPICA as | ||
281 | * well as other issues such as an attempt to release the same | ||
282 | * internal object more than once. Although expensive as far | ||
283 | * as cpu time, this list is much more helpful for finding these | ||
284 | * types of issues than using memory leak detectors outside of | ||
285 | * the ACPICA code. | ||
286 | * | ||
287 | ******************************************************************************/ | ||
288 | |||
289 | static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct | ||
290 | acpi_debug_mem_block | ||
291 | *allocation) | ||
292 | { | ||
293 | struct acpi_debug_mem_block *element; | ||
294 | |||
295 | element = acpi_gbl_global_list->list_head; | ||
296 | if (!element) { | ||
297 | return (NULL); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Search for the address. | ||
302 | * | ||
303 | * Note: List is ordered by larger-to-smaller addresses, on the | ||
304 | * assumption that a new allocation usually has a larger address | ||
305 | * than previous allocations. | ||
306 | */ | ||
307 | while (element > allocation) { | ||
308 | |||
309 | /* Check for end-of-list */ | ||
310 | |||
311 | if (!element->next) { | ||
312 | return (element); | ||
313 | } | ||
314 | |||
315 | element = element->next; | ||
316 | } | ||
317 | |||
318 | if (element == allocation) { | ||
319 | return (element); | ||
320 | } | ||
321 | |||
322 | return (element->previous); | ||
323 | } | ||
324 | |||
325 | /******************************************************************************* | ||
326 | * | ||
327 | * FUNCTION: acpi_ut_track_allocation | ||
328 | * | ||
329 | * PARAMETERS: allocation - Address of allocated memory | ||
330 | * size - Size of the allocation | ||
331 | * alloc_type - MEM_MALLOC or MEM_CALLOC | ||
332 | * component - Component type of caller | ||
333 | * module - Source file name of caller | ||
334 | * line - Line number of caller | ||
335 | * | ||
336 | * RETURN: Status | ||
337 | * | ||
338 | * DESCRIPTION: Inserts an element into the global allocation tracking list. | ||
339 | * | ||
340 | ******************************************************************************/ | ||
341 | |||
342 | static acpi_status | ||
343 | acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, | ||
344 | acpi_size size, | ||
345 | u8 alloc_type, | ||
346 | u32 component, const char *module, u32 line) | ||
347 | { | ||
348 | struct acpi_memory_list *mem_list; | ||
349 | struct acpi_debug_mem_block *element; | ||
350 | acpi_status status = AE_OK; | ||
351 | |||
352 | ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation); | ||
353 | |||
354 | if (acpi_gbl_disable_mem_tracking) { | ||
355 | return_ACPI_STATUS(AE_OK); | ||
356 | } | ||
357 | |||
358 | mem_list = acpi_gbl_global_list; | ||
359 | status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); | ||
360 | if (ACPI_FAILURE(status)) { | ||
361 | return_ACPI_STATUS(status); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Search the global list for this address to make sure it is not | ||
366 | * already present. This will catch several kinds of problems. | ||
367 | */ | ||
368 | element = acpi_ut_find_allocation(allocation); | ||
369 | if (element == allocation) { | ||
370 | ACPI_ERROR((AE_INFO, | ||
371 | "UtTrackAllocation: Allocation (%p) already present in global list!", | ||
372 | allocation)); | ||
373 | goto unlock_and_exit; | ||
374 | } | ||
375 | |||
376 | /* Fill in the instance data */ | ||
377 | |||
378 | allocation->size = (u32)size; | ||
379 | allocation->alloc_type = alloc_type; | ||
380 | allocation->component = component; | ||
381 | allocation->line = line; | ||
382 | |||
383 | ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME); | ||
384 | allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0; | ||
385 | |||
386 | if (!element) { | ||
387 | |||
388 | /* Insert at list head */ | ||
389 | |||
390 | if (mem_list->list_head) { | ||
391 | ((struct acpi_debug_mem_block *)(mem_list->list_head))-> | ||
392 | previous = allocation; | ||
393 | } | ||
394 | |||
395 | allocation->next = mem_list->list_head; | ||
396 | allocation->previous = NULL; | ||
397 | |||
398 | mem_list->list_head = allocation; | ||
399 | } else { | ||
400 | /* Insert after element */ | ||
401 | |||
402 | allocation->next = element->next; | ||
403 | allocation->previous = element; | ||
404 | |||
405 | if (element->next) { | ||
406 | (element->next)->previous = allocation; | ||
407 | } | ||
408 | |||
409 | element->next = allocation; | ||
410 | } | ||
411 | |||
412 | unlock_and_exit: | ||
413 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
414 | return_ACPI_STATUS(status); | ||
415 | } | ||
416 | |||
417 | /******************************************************************************* | ||
418 | * | ||
419 | * FUNCTION: acpi_ut_remove_allocation | ||
420 | * | ||
421 | * PARAMETERS: allocation - Address of allocated memory | ||
422 | * component - Component type of caller | ||
423 | * module - Source file name of caller | ||
424 | * line - Line number of caller | ||
425 | * | ||
426 | * RETURN: Status | ||
427 | * | ||
428 | * DESCRIPTION: Deletes an element from the global allocation tracking list. | ||
429 | * | ||
430 | ******************************************************************************/ | ||
431 | |||
432 | static acpi_status | ||
433 | acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation, | ||
434 | u32 component, const char *module, u32 line) | ||
435 | { | ||
436 | struct acpi_memory_list *mem_list; | ||
437 | acpi_status status; | ||
438 | |||
439 | ACPI_FUNCTION_TRACE(ut_remove_allocation); | ||
440 | |||
441 | if (acpi_gbl_disable_mem_tracking) { | ||
442 | return_ACPI_STATUS(AE_OK); | ||
443 | } | ||
444 | |||
445 | mem_list = acpi_gbl_global_list; | ||
446 | if (NULL == mem_list->list_head) { | ||
447 | |||
448 | /* No allocations! */ | ||
449 | |||
450 | ACPI_ERROR((module, line, | ||
451 | "Empty allocation list, nothing to free!")); | ||
452 | |||
453 | return_ACPI_STATUS(AE_OK); | ||
454 | } | ||
455 | |||
456 | status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); | ||
457 | if (ACPI_FAILURE(status)) { | ||
458 | return_ACPI_STATUS(status); | ||
459 | } | ||
460 | |||
461 | /* Unlink */ | ||
462 | |||
463 | if (allocation->previous) { | ||
464 | (allocation->previous)->next = allocation->next; | ||
465 | } else { | ||
466 | mem_list->list_head = allocation->next; | ||
467 | } | ||
468 | |||
469 | if (allocation->next) { | ||
470 | (allocation->next)->previous = allocation->previous; | ||
471 | } | ||
472 | |||
473 | /* Mark the segment as deleted */ | ||
474 | |||
475 | ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size); | ||
476 | |||
477 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n", | ||
478 | allocation->size)); | ||
479 | |||
480 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
481 | return_ACPI_STATUS(status); | ||
482 | } | ||
483 | |||
484 | /******************************************************************************* | ||
485 | * | ||
486 | * FUNCTION: acpi_ut_dump_allocation_info | ||
487 | * | ||
488 | * PARAMETERS: None | ||
489 | * | ||
490 | * RETURN: None | ||
491 | * | ||
492 | * DESCRIPTION: Print some info about the outstanding allocations. | ||
493 | * | ||
494 | ******************************************************************************/ | ||
495 | |||
496 | void acpi_ut_dump_allocation_info(void) | ||
497 | { | ||
498 | /* | ||
499 | struct acpi_memory_list *mem_list; | ||
500 | */ | ||
501 | |||
502 | ACPI_FUNCTION_TRACE(ut_dump_allocation_info); | ||
503 | |||
504 | /* | ||
505 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
506 | ("%30s: %4d (%3d Kb)\n", "Current allocations", | ||
507 | mem_list->current_count, | ||
508 | ROUND_UP_TO_1K (mem_list->current_size))); | ||
509 | |||
510 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
511 | ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", | ||
512 | mem_list->max_concurrent_count, | ||
513 | ROUND_UP_TO_1K (mem_list->max_concurrent_size))); | ||
514 | |||
515 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
516 | ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", | ||
517 | running_object_count, | ||
518 | ROUND_UP_TO_1K (running_object_size))); | ||
519 | |||
520 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
521 | ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", | ||
522 | running_alloc_count, | ||
523 | ROUND_UP_TO_1K (running_alloc_size))); | ||
524 | |||
525 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
526 | ("%30s: %4d (%3d Kb)\n", "Current Nodes", | ||
527 | acpi_gbl_current_node_count, | ||
528 | ROUND_UP_TO_1K (acpi_gbl_current_node_size))); | ||
529 | |||
530 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
531 | ("%30s: %4d (%3d Kb)\n", "Max Nodes", | ||
532 | acpi_gbl_max_concurrent_node_count, | ||
533 | ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * | ||
534 | sizeof (struct acpi_namespace_node))))); | ||
535 | */ | ||
536 | return_VOID; | ||
537 | } | ||
538 | |||
539 | /******************************************************************************* | ||
540 | * | ||
541 | * FUNCTION: acpi_ut_dump_allocations | ||
542 | * | ||
543 | * PARAMETERS: component - Component(s) to dump info for. | ||
544 | * module - Module to dump info for. NULL means all. | ||
545 | * | ||
546 | * RETURN: None | ||
547 | * | ||
548 | * DESCRIPTION: Print a list of all outstanding allocations. | ||
549 | * | ||
550 | ******************************************************************************/ | ||
551 | |||
552 | void acpi_ut_dump_allocations(u32 component, const char *module) | ||
553 | { | ||
554 | struct acpi_debug_mem_block *element; | ||
555 | union acpi_descriptor *descriptor; | ||
556 | u32 num_outstanding = 0; | ||
557 | u8 descriptor_type; | ||
558 | |||
559 | ACPI_FUNCTION_TRACE(ut_dump_allocations); | ||
560 | |||
561 | if (acpi_gbl_disable_mem_tracking) { | ||
562 | return_VOID; | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * Walk the allocation list. | ||
567 | */ | ||
568 | if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) { | ||
569 | return_VOID; | ||
570 | } | ||
571 | |||
572 | element = acpi_gbl_global_list->list_head; | ||
573 | while (element) { | ||
574 | if ((element->component & component) && | ||
575 | ((module == NULL) | ||
576 | || (0 == ACPI_STRCMP(module, element->module)))) { | ||
577 | descriptor = | ||
578 | ACPI_CAST_PTR(union acpi_descriptor, | ||
579 | &element->user_space); | ||
580 | |||
581 | if (element->size < | ||
582 | sizeof(struct acpi_common_descriptor)) { | ||
583 | acpi_os_printf("%p Length 0x%04X %9.9s-%u " | ||
584 | "[Not a Descriptor - too small]\n", | ||
585 | descriptor, element->size, | ||
586 | element->module, element->line); | ||
587 | } else { | ||
588 | /* Ignore allocated objects that are in a cache */ | ||
589 | |||
590 | if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != | ||
591 | ACPI_DESC_TYPE_CACHED) { | ||
592 | acpi_os_printf | ||
593 | ("%p Length 0x%04X %9.9s-%u [%s] ", | ||
594 | descriptor, element->size, | ||
595 | element->module, element->line, | ||
596 | acpi_ut_get_descriptor_name | ||
597 | (descriptor)); | ||
598 | |||
599 | /* Validate the descriptor type using Type field and length */ | ||
600 | |||
601 | descriptor_type = 0; /* Not a valid descriptor type */ | ||
602 | |||
603 | switch (ACPI_GET_DESCRIPTOR_TYPE | ||
604 | (descriptor)) { | ||
605 | case ACPI_DESC_TYPE_OPERAND: | ||
606 | if (element->size == | ||
607 | sizeof(union | ||
608 | acpi_operand_object)) | ||
609 | { | ||
610 | descriptor_type = | ||
611 | ACPI_DESC_TYPE_OPERAND; | ||
612 | } | ||
613 | break; | ||
614 | |||
615 | case ACPI_DESC_TYPE_PARSER: | ||
616 | if (element->size == | ||
617 | sizeof(union | ||
618 | acpi_parse_object)) { | ||
619 | descriptor_type = | ||
620 | ACPI_DESC_TYPE_PARSER; | ||
621 | } | ||
622 | break; | ||
623 | |||
624 | case ACPI_DESC_TYPE_NAMED: | ||
625 | if (element->size == | ||
626 | sizeof(struct | ||
627 | acpi_namespace_node)) | ||
628 | { | ||
629 | descriptor_type = | ||
630 | ACPI_DESC_TYPE_NAMED; | ||
631 | } | ||
632 | break; | ||
633 | |||
634 | default: | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | /* Display additional info for the major descriptor types */ | ||
639 | |||
640 | switch (descriptor_type) { | ||
641 | case ACPI_DESC_TYPE_OPERAND: | ||
642 | acpi_os_printf | ||
643 | ("%12.12s RefCount 0x%04X\n", | ||
644 | acpi_ut_get_type_name | ||
645 | (descriptor->object.common. | ||
646 | type), | ||
647 | descriptor->object.common. | ||
648 | reference_count); | ||
649 | break; | ||
650 | |||
651 | case ACPI_DESC_TYPE_PARSER: | ||
652 | acpi_os_printf | ||
653 | ("AmlOpcode 0x%04hX\n", | ||
654 | descriptor->op.asl. | ||
655 | aml_opcode); | ||
656 | break; | ||
657 | |||
658 | case ACPI_DESC_TYPE_NAMED: | ||
659 | acpi_os_printf("%4.4s\n", | ||
660 | acpi_ut_get_node_name | ||
661 | (&descriptor-> | ||
662 | node)); | ||
663 | break; | ||
664 | |||
665 | default: | ||
666 | acpi_os_printf("\n"); | ||
667 | break; | ||
668 | } | ||
669 | } | ||
670 | } | ||
671 | |||
672 | num_outstanding++; | ||
673 | } | ||
674 | |||
675 | element = element->next; | ||
676 | } | ||
677 | |||
678 | (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
679 | |||
680 | /* Print summary */ | ||
681 | |||
682 | if (!num_outstanding) { | ||
683 | ACPI_INFO((AE_INFO, "No outstanding allocations")); | ||
684 | } else { | ||
685 | ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations", | ||
686 | num_outstanding, num_outstanding)); | ||
687 | } | ||
688 | |||
689 | return_VOID; | ||
690 | } | ||
691 | |||
692 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ | ||
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index b09632b4f5b3..390db0ca5e2e 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c | |||
@@ -147,7 +147,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status) | |||
147 | * RETURN: status - the status of the call | 147 | * RETURN: status - the status of the call |
148 | * | 148 | * |
149 | * DESCRIPTION: This function is called to get information about the current | 149 | * DESCRIPTION: This function is called to get information about the current |
150 | * state of the ACPI subsystem. It will return system information | 150 | * state of the ACPI subsystem. It will return system information |
151 | * in the out_buffer. | 151 | * in the out_buffer. |
152 | * | 152 | * |
153 | * If the function fails an appropriate status will be returned | 153 | * If the function fails an appropriate status will be returned |
@@ -238,7 +238,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | acpi_gbl_init_handler = handler; | 240 | acpi_gbl_init_handler = handler; |
241 | return AE_OK; | 241 | return (AE_OK); |
242 | } | 242 | } |
243 | 243 | ||
244 | ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) | 244 | ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) |
@@ -263,6 +263,7 @@ acpi_status acpi_purge_cached_objects(void) | |||
263 | (void)acpi_os_purge_cache(acpi_gbl_operand_cache); | 263 | (void)acpi_os_purge_cache(acpi_gbl_operand_cache); |
264 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache); | 264 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache); |
265 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache); | 265 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache); |
266 | |||
266 | return_ACPI_STATUS(AE_OK); | 267 | return_ACPI_STATUS(AE_OK); |
267 | } | 268 | } |
268 | 269 | ||
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 6d63cc39b9ae..d4d3826140d8 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c | |||
@@ -408,7 +408,7 @@ acpi_ut_namespace_error(const char *module_name, | |||
408 | 408 | ||
409 | ACPI_MOVE_32_TO_32(&bad_name, | 409 | ACPI_MOVE_32_TO_32(&bad_name, |
410 | ACPI_CAST_PTR(u32, internal_name)); | 410 | ACPI_CAST_PTR(u32, internal_name)); |
411 | acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name); | 411 | acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name); |
412 | } else { | 412 | } else { |
413 | /* Convert path to external format */ | 413 | /* Convert path to external format */ |
414 | 414 | ||
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 1599566ed1fe..da93c003e953 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -994,7 +994,7 @@ err: | |||
994 | return rc; | 994 | return rc; |
995 | } | 995 | } |
996 | 996 | ||
997 | static int __devexit ghes_remove(struct platform_device *ghes_dev) | 997 | static int ghes_remove(struct platform_device *ghes_dev) |
998 | { | 998 | { |
999 | struct ghes *ghes; | 999 | struct ghes *ghes; |
1000 | struct acpi_hest_generic *generic; | 1000 | struct acpi_hest_generic *generic; |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 45e3e1759fb8..7efaeaa53b88 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/dmi.h> | 34 | #include <linux/dmi.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/suspend.h> | 36 | #include <linux/suspend.h> |
37 | #include <asm/unaligned.h> | ||
37 | 38 | ||
38 | #ifdef CONFIG_ACPI_PROCFS_POWER | 39 | #ifdef CONFIG_ACPI_PROCFS_POWER |
39 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
@@ -95,6 +96,18 @@ enum { | |||
95 | ACPI_BATTERY_ALARM_PRESENT, | 96 | ACPI_BATTERY_ALARM_PRESENT, |
96 | ACPI_BATTERY_XINFO_PRESENT, | 97 | ACPI_BATTERY_XINFO_PRESENT, |
97 | ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, | 98 | ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, |
99 | /* On Lenovo Thinkpad models from 2010 and 2011, the power unit | ||
100 | switches between mWh and mAh depending on whether the system | ||
101 | is running on battery or not. When mAh is the unit, most | ||
102 | reported values are incorrect and need to be adjusted by | ||
103 | 10000/design_voltage. Verified on x201, t410, t410s, and x220. | ||
104 | Pre-2010 and 2012 models appear to always report in mWh and | ||
105 | are thus unaffected (tested with t42, t61, t500, x200, x300, | ||
106 | and x230). Also, in mid-2012 Lenovo issued a BIOS update for | ||
107 | the 2011 models that fixes the issue (tested on x220 with a | ||
108 | post-1.29 BIOS), but as of Nov. 2012, no such update is | ||
109 | available for the 2010 models. */ | ||
110 | ACPI_BATTERY_QUIRK_THINKPAD_MAH, | ||
98 | }; | 111 | }; |
99 | 112 | ||
100 | struct acpi_battery { | 113 | struct acpi_battery { |
@@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) | |||
438 | kfree(buffer.pointer); | 451 | kfree(buffer.pointer); |
439 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) | 452 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) |
440 | battery->full_charge_capacity = battery->design_capacity; | 453 | battery->full_charge_capacity = battery->design_capacity; |
454 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && | ||
455 | battery->power_unit && battery->design_voltage) { | ||
456 | battery->design_capacity = battery->design_capacity * | ||
457 | 10000 / battery->design_voltage; | ||
458 | battery->full_charge_capacity = battery->full_charge_capacity * | ||
459 | 10000 / battery->design_voltage; | ||
460 | battery->design_capacity_warning = | ||
461 | battery->design_capacity_warning * | ||
462 | 10000 / battery->design_voltage; | ||
463 | /* Curiously, design_capacity_low, unlike the rest of them, | ||
464 | is correct. */ | ||
465 | /* capacity_granularity_* equal 1 on the systems tested, so | ||
466 | it's impossible to tell if they would need an adjustment | ||
467 | or not if their values were higher. */ | ||
468 | } | ||
441 | return result; | 469 | return result; |
442 | } | 470 | } |
443 | 471 | ||
@@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) | |||
486 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) | 514 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) |
487 | battery->capacity_now = (battery->capacity_now * | 515 | battery->capacity_now = (battery->capacity_now * |
488 | battery->full_charge_capacity) / 100; | 516 | battery->full_charge_capacity) / 100; |
517 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && | ||
518 | battery->power_unit && battery->design_voltage) { | ||
519 | battery->capacity_now = battery->capacity_now * | ||
520 | 10000 / battery->design_voltage; | ||
521 | } | ||
489 | return result; | 522 | return result; |
490 | } | 523 | } |
491 | 524 | ||
@@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery) | |||
595 | mutex_unlock(&battery->sysfs_lock); | 628 | mutex_unlock(&battery->sysfs_lock); |
596 | } | 629 | } |
597 | 630 | ||
631 | static void find_battery(const struct dmi_header *dm, void *private) | ||
632 | { | ||
633 | struct acpi_battery *battery = (struct acpi_battery *)private; | ||
634 | /* Note: the hardcoded offsets below have been extracted from | ||
635 | the source code of dmidecode. */ | ||
636 | if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { | ||
637 | const u8 *dmi_data = (const u8 *)(dm + 1); | ||
638 | int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); | ||
639 | if (dm->length >= 18) | ||
640 | dmi_capacity *= dmi_data[17]; | ||
641 | if (battery->design_capacity * battery->design_voltage / 1000 | ||
642 | != dmi_capacity && | ||
643 | battery->design_capacity * 10 == dmi_capacity) | ||
644 | set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, | ||
645 | &battery->flags); | ||
646 | } | ||
647 | } | ||
648 | |||
598 | /* | 649 | /* |
599 | * According to the ACPI spec, some kinds of primary batteries can | 650 | * According to the ACPI spec, some kinds of primary batteries can |
600 | * report percentage battery remaining capacity directly to OS. | 651 | * report percentage battery remaining capacity directly to OS. |
@@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery) | |||
620 | battery->capacity_now = (battery->capacity_now * | 671 | battery->capacity_now = (battery->capacity_now * |
621 | battery->full_charge_capacity) / 100; | 672 | battery->full_charge_capacity) / 100; |
622 | } | 673 | } |
674 | |||
675 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) | ||
676 | return ; | ||
677 | |||
678 | if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { | ||
679 | const char *s; | ||
680 | s = dmi_get_system_info(DMI_PRODUCT_VERSION); | ||
681 | if (s && !strnicmp(s, "ThinkPad", 8)) { | ||
682 | dmi_walk(find_battery, battery); | ||
683 | if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, | ||
684 | &battery->flags) && | ||
685 | battery->design_voltage) { | ||
686 | battery->design_capacity = | ||
687 | battery->design_capacity * | ||
688 | 10000 / battery->design_voltage; | ||
689 | battery->full_charge_capacity = | ||
690 | battery->full_charge_capacity * | ||
691 | 10000 / battery->design_voltage; | ||
692 | battery->design_capacity_warning = | ||
693 | battery->design_capacity_warning * | ||
694 | 10000 / battery->design_voltage; | ||
695 | battery->capacity_now = battery->capacity_now * | ||
696 | 10000 / battery->design_voltage; | ||
697 | } | ||
698 | } | ||
699 | } | ||
623 | } | 700 | } |
624 | 701 | ||
625 | static int acpi_battery_update(struct acpi_battery *battery) | 702 | static int acpi_battery_update(struct acpi_battery *battery) |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d59175efc428..1f0d457ecbcf 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -257,7 +257,15 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | 259 | ||
260 | static int __acpi_bus_set_power(struct acpi_device *device, int state) | 260 | /** |
261 | * acpi_device_set_power - Set power state of an ACPI device. | ||
262 | * @device: Device to set the power state of. | ||
263 | * @state: New power state to set. | ||
264 | * | ||
265 | * Callers must ensure that the device is power manageable before using this | ||
266 | * function. | ||
267 | */ | ||
268 | int acpi_device_set_power(struct acpi_device *device, int state) | ||
261 | { | 269 | { |
262 | int result = 0; | 270 | int result = 0; |
263 | acpi_status status = AE_OK; | 271 | acpi_status status = AE_OK; |
@@ -298,6 +306,12 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
298 | * a lower-powered state. | 306 | * a lower-powered state. |
299 | */ | 307 | */ |
300 | if (state < device->power.state) { | 308 | if (state < device->power.state) { |
309 | if (device->power.state >= ACPI_STATE_D3_HOT && | ||
310 | state != ACPI_STATE_D0) { | ||
311 | printk(KERN_WARNING PREFIX | ||
312 | "Cannot transition to non-D0 state from D3\n"); | ||
313 | return -ENODEV; | ||
314 | } | ||
301 | if (device->power.flags.power_resources) { | 315 | if (device->power.flags.power_resources) { |
302 | result = acpi_power_transition(device, state); | 316 | result = acpi_power_transition(device, state); |
303 | if (result) | 317 | if (result) |
@@ -341,6 +355,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
341 | 355 | ||
342 | return result; | 356 | return result; |
343 | } | 357 | } |
358 | EXPORT_SYMBOL(acpi_device_set_power); | ||
344 | 359 | ||
345 | 360 | ||
346 | int acpi_bus_set_power(acpi_handle handle, int state) | 361 | int acpi_bus_set_power(acpi_handle handle, int state) |
@@ -359,7 +374,7 @@ int acpi_bus_set_power(acpi_handle handle, int state) | |||
359 | return -ENODEV; | 374 | return -ENODEV; |
360 | } | 375 | } |
361 | 376 | ||
362 | return __acpi_bus_set_power(device, state); | 377 | return acpi_device_set_power(device, state); |
363 | } | 378 | } |
364 | EXPORT_SYMBOL(acpi_bus_set_power); | 379 | EXPORT_SYMBOL(acpi_bus_set_power); |
365 | 380 | ||
@@ -402,7 +417,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p) | |||
402 | if (result) | 417 | if (result) |
403 | return result; | 418 | return result; |
404 | 419 | ||
405 | result = __acpi_bus_set_power(device, state); | 420 | result = acpi_device_set_power(device, state); |
406 | if (!result && state_p) | 421 | if (!result && state_p) |
407 | *state_p = state; | 422 | *state_p = state; |
408 | 423 | ||
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 1f9f7d7d7bc5..811910b50b75 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
@@ -92,17 +92,24 @@ static int is_device_present(acpi_handle handle) | |||
92 | return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT); | 92 | return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT); |
93 | } | 93 | } |
94 | 94 | ||
95 | static bool is_container_device(const char *hid) | ||
96 | { | ||
97 | const struct acpi_device_id *container_id; | ||
98 | |||
99 | for (container_id = container_device_ids; | ||
100 | container_id->id[0]; container_id++) { | ||
101 | if (!strcmp((char *)container_id->id, hid)) | ||
102 | return true; | ||
103 | } | ||
104 | |||
105 | return false; | ||
106 | } | ||
107 | |||
95 | /*******************************************************************/ | 108 | /*******************************************************************/ |
96 | static int acpi_container_add(struct acpi_device *device) | 109 | static int acpi_container_add(struct acpi_device *device) |
97 | { | 110 | { |
98 | struct acpi_container *container; | 111 | struct acpi_container *container; |
99 | 112 | ||
100 | |||
101 | if (!device) { | ||
102 | printk(KERN_ERR PREFIX "device is NULL\n"); | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | |||
106 | container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL); | 113 | container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL); |
107 | if (!container) | 114 | if (!container) |
108 | return -ENOMEM; | 115 | return -ENOMEM; |
@@ -164,7 +171,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) | |||
164 | case ACPI_NOTIFY_BUS_CHECK: | 171 | case ACPI_NOTIFY_BUS_CHECK: |
165 | /* Fall through */ | 172 | /* Fall through */ |
166 | case ACPI_NOTIFY_DEVICE_CHECK: | 173 | case ACPI_NOTIFY_DEVICE_CHECK: |
167 | printk(KERN_WARNING "Container driver received %s event\n", | 174 | pr_debug("Container driver received %s event\n", |
168 | (type == ACPI_NOTIFY_BUS_CHECK) ? | 175 | (type == ACPI_NOTIFY_BUS_CHECK) ? |
169 | "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); | 176 | "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); |
170 | 177 | ||
@@ -185,7 +192,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) | |||
185 | 192 | ||
186 | result = container_device_add(&device, handle); | 193 | result = container_device_add(&device, handle); |
187 | if (result) { | 194 | if (result) { |
188 | printk(KERN_WARNING "Failed to add container\n"); | 195 | acpi_handle_warn(handle, "Failed to add container\n"); |
189 | break; | 196 | break; |
190 | } | 197 | } |
191 | 198 | ||
@@ -232,10 +239,8 @@ container_walk_namespace_cb(acpi_handle handle, | |||
232 | goto end; | 239 | goto end; |
233 | } | 240 | } |
234 | 241 | ||
235 | if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") && | 242 | if (!is_container_device(hid)) |
236 | strcmp(hid, "PNP0A06")) { | ||
237 | goto end; | 243 | goto end; |
238 | } | ||
239 | 244 | ||
240 | switch (*action) { | 245 | switch (*action) { |
241 | case INSTALL_NOTIFY_HANDLER: | 246 | case INSTALL_NOTIFY_HANDLER: |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c new file mode 100644 index 000000000000..f09dc987cf17 --- /dev/null +++ b/drivers/acpi/device_pm.c | |||
@@ -0,0 +1,668 @@ | |||
1 | /* | ||
2 | * drivers/acpi/device_pm.c - ACPI device power management routines. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corp. | ||
5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | */ | ||
24 | |||
25 | #include <linux/device.h> | ||
26 | #include <linux/export.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/pm_qos.h> | ||
29 | #include <linux/pm_runtime.h> | ||
30 | |||
31 | #include <acpi/acpi.h> | ||
32 | #include <acpi/acpi_bus.h> | ||
33 | |||
34 | static DEFINE_MUTEX(acpi_pm_notifier_lock); | ||
35 | |||
36 | /** | ||
37 | * acpi_add_pm_notifier - Register PM notifier for given ACPI device. | ||
38 | * @adev: ACPI device to add the notifier for. | ||
39 | * @context: Context information to pass to the notifier routine. | ||
40 | * | ||
41 | * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of | ||
42 | * PM wakeup events. For example, wakeup events may be generated for bridges | ||
43 | * if one of the devices below the bridge is signaling wakeup, even if the | ||
44 | * bridge itself doesn't have a wakeup GPE associated with it. | ||
45 | */ | ||
46 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, | ||
47 | acpi_notify_handler handler, void *context) | ||
48 | { | ||
49 | acpi_status status = AE_ALREADY_EXISTS; | ||
50 | |||
51 | mutex_lock(&acpi_pm_notifier_lock); | ||
52 | |||
53 | if (adev->wakeup.flags.notifier_present) | ||
54 | goto out; | ||
55 | |||
56 | status = acpi_install_notify_handler(adev->handle, | ||
57 | ACPI_SYSTEM_NOTIFY, | ||
58 | handler, context); | ||
59 | if (ACPI_FAILURE(status)) | ||
60 | goto out; | ||
61 | |||
62 | adev->wakeup.flags.notifier_present = true; | ||
63 | |||
64 | out: | ||
65 | mutex_unlock(&acpi_pm_notifier_lock); | ||
66 | return status; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
71 | * @adev: ACPI device to remove the notifier from. | ||
72 | */ | ||
73 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
74 | acpi_notify_handler handler) | ||
75 | { | ||
76 | acpi_status status = AE_BAD_PARAMETER; | ||
77 | |||
78 | mutex_lock(&acpi_pm_notifier_lock); | ||
79 | |||
80 | if (!adev->wakeup.flags.notifier_present) | ||
81 | goto out; | ||
82 | |||
83 | status = acpi_remove_notify_handler(adev->handle, | ||
84 | ACPI_SYSTEM_NOTIFY, | ||
85 | handler); | ||
86 | if (ACPI_FAILURE(status)) | ||
87 | goto out; | ||
88 | |||
89 | adev->wakeup.flags.notifier_present = false; | ||
90 | |||
91 | out: | ||
92 | mutex_unlock(&acpi_pm_notifier_lock); | ||
93 | return status; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * acpi_device_power_state - Get preferred power state of ACPI device. | ||
98 | * @dev: Device whose preferred target power state to return. | ||
99 | * @adev: ACPI device node corresponding to @dev. | ||
100 | * @target_state: System state to match the resultant device state. | ||
101 | * @d_max_in: Deepest low-power state to take into consideration. | ||
102 | * @d_min_p: Location to store the upper limit of the allowed states range. | ||
103 | * Return value: Preferred power state of the device on success, -ENODEV | ||
104 | * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
105 | * | ||
106 | * Find the lowest power (highest number) ACPI device power state that the | ||
107 | * device can be in while the system is in the state represented by | ||
108 | * @target_state. If @d_min_p is set, the highest power (lowest number) device | ||
109 | * power state that @dev can be in for the given system sleep state is stored | ||
110 | * at the location pointed to by it. | ||
111 | * | ||
112 | * Callers must ensure that @dev and @adev are valid pointers and that @adev | ||
113 | * actually corresponds to @dev before using this function. | ||
114 | */ | ||
115 | int acpi_device_power_state(struct device *dev, struct acpi_device *adev, | ||
116 | u32 target_state, int d_max_in, int *d_min_p) | ||
117 | { | ||
118 | char acpi_method[] = "_SxD"; | ||
119 | unsigned long long d_min, d_max; | ||
120 | bool wakeup = false; | ||
121 | |||
122 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) | ||
123 | return -EINVAL; | ||
124 | |||
125 | if (d_max_in > ACPI_STATE_D3_HOT) { | ||
126 | enum pm_qos_flags_status stat; | ||
127 | |||
128 | stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); | ||
129 | if (stat == PM_QOS_FLAGS_ALL) | ||
130 | d_max_in = ACPI_STATE_D3_HOT; | ||
131 | } | ||
132 | |||
133 | acpi_method[2] = '0' + target_state; | ||
134 | /* | ||
135 | * If the sleep state is S0, the lowest limit from ACPI is D3, | ||
136 | * but if the device has _S0W, we will use the value from _S0W | ||
137 | * as the lowest limit from ACPI. Finally, we will constrain | ||
138 | * the lowest limit with the specified one. | ||
139 | */ | ||
140 | d_min = ACPI_STATE_D0; | ||
141 | d_max = ACPI_STATE_D3; | ||
142 | |||
143 | /* | ||
144 | * If present, _SxD methods return the minimum D-state (highest power | ||
145 | * state) we can use for the corresponding S-states. Otherwise, the | ||
146 | * minimum D-state is D0 (ACPI 3.x). | ||
147 | * | ||
148 | * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer | ||
149 | * provided -- that's our fault recovery, we ignore retval. | ||
150 | */ | ||
151 | if (target_state > ACPI_STATE_S0) { | ||
152 | acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min); | ||
153 | wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid | ||
154 | && adev->wakeup.sleep_state >= target_state; | ||
155 | } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) != | ||
156 | PM_QOS_FLAGS_NONE) { | ||
157 | wakeup = adev->wakeup.flags.valid; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * If _PRW says we can wake up the system from the target sleep state, | ||
162 | * the D-state returned by _SxD is sufficient for that (we assume a | ||
163 | * wakeup-aware driver if wake is set). Still, if _SxW exists | ||
164 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that | ||
165 | * can wake the system. _S0W may be valid, too. | ||
166 | */ | ||
167 | if (wakeup) { | ||
168 | acpi_status status; | ||
169 | |||
170 | acpi_method[3] = 'W'; | ||
171 | status = acpi_evaluate_integer(adev->handle, acpi_method, NULL, | ||
172 | &d_max); | ||
173 | if (ACPI_FAILURE(status)) { | ||
174 | if (target_state != ACPI_STATE_S0 || | ||
175 | status != AE_NOT_FOUND) | ||
176 | d_max = d_min; | ||
177 | } else if (d_max < d_min) { | ||
178 | /* Warn the user of the broken DSDT */ | ||
179 | printk(KERN_WARNING "ACPI: Wrong value from %s\n", | ||
180 | acpi_method); | ||
181 | /* Sanitize it */ | ||
182 | d_min = d_max; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | if (d_max_in < d_min) | ||
187 | return -EINVAL; | ||
188 | if (d_min_p) | ||
189 | *d_min_p = d_min; | ||
190 | /* constrain d_max with specified lowest limit (max number) */ | ||
191 | if (d_max > d_max_in) { | ||
192 | for (d_max = d_max_in; d_max > d_min; d_max--) { | ||
193 | if (adev->power.states[d_max].flags.valid) | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | return d_max; | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(acpi_device_power_state); | ||
200 | |||
201 | /** | ||
202 | * acpi_pm_device_sleep_state - Get preferred power state of ACPI device. | ||
203 | * @dev: Device whose preferred target power state to return. | ||
204 | * @d_min_p: Location to store the upper limit of the allowed states range. | ||
205 | * @d_max_in: Deepest low-power state to take into consideration. | ||
206 | * Return value: Preferred power state of the device on success, -ENODEV | ||
207 | * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
208 | * | ||
209 | * The caller must ensure that @dev is valid before using this function. | ||
210 | */ | ||
211 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | ||
212 | { | ||
213 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
214 | struct acpi_device *adev; | ||
215 | |||
216 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
217 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | return acpi_device_power_state(dev, adev, acpi_target_system_state(), | ||
222 | d_max_in, d_min_p); | ||
223 | } | ||
224 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); | ||
225 | |||
226 | #ifdef CONFIG_PM_RUNTIME | ||
227 | /** | ||
228 | * acpi_wakeup_device - Wakeup notification handler for ACPI devices. | ||
229 | * @handle: ACPI handle of the device the notification is for. | ||
230 | * @event: Type of the signaled event. | ||
231 | * @context: Device corresponding to @handle. | ||
232 | */ | ||
233 | static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context) | ||
234 | { | ||
235 | struct device *dev = context; | ||
236 | |||
237 | if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) { | ||
238 | pm_wakeup_event(dev, 0); | ||
239 | pm_runtime_resume(dev); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device. | ||
245 | * @adev: ACPI device to enable/disable the remote wakeup for. | ||
246 | * @enable: Whether to enable or disable the wakeup functionality. | ||
247 | * | ||
248 | * Enable/disable the GPE associated with @adev so that it can generate | ||
249 | * wakeup signals for the device in response to external (remote) events and | ||
250 | * enable/disable device wakeup power. | ||
251 | * | ||
252 | * Callers must ensure that @adev is a valid ACPI device node before executing | ||
253 | * this function. | ||
254 | */ | ||
255 | int __acpi_device_run_wake(struct acpi_device *adev, bool enable) | ||
256 | { | ||
257 | struct acpi_device_wakeup *wakeup = &adev->wakeup; | ||
258 | |||
259 | if (enable) { | ||
260 | acpi_status res; | ||
261 | int error; | ||
262 | |||
263 | error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0); | ||
264 | if (error) | ||
265 | return error; | ||
266 | |||
267 | res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); | ||
268 | if (ACPI_FAILURE(res)) { | ||
269 | acpi_disable_wakeup_device_power(adev); | ||
270 | return -EIO; | ||
271 | } | ||
272 | } else { | ||
273 | acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); | ||
274 | acpi_disable_wakeup_device_power(adev); | ||
275 | } | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. | ||
281 | * @dev: Device to enable/disable the platform to wake up. | ||
282 | * @enable: Whether to enable or disable the wakeup functionality. | ||
283 | */ | ||
284 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | ||
285 | { | ||
286 | struct acpi_device *adev; | ||
287 | acpi_handle handle; | ||
288 | |||
289 | if (!device_run_wake(phys_dev)) | ||
290 | return -EINVAL; | ||
291 | |||
292 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
293 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
294 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", | ||
295 | __func__); | ||
296 | return -ENODEV; | ||
297 | } | ||
298 | |||
299 | return __acpi_device_run_wake(adev, enable); | ||
300 | } | ||
301 | EXPORT_SYMBOL(acpi_pm_device_run_wake); | ||
302 | #else | ||
303 | static inline void acpi_wakeup_device(acpi_handle handle, u32 event, | ||
304 | void *context) {} | ||
305 | #endif /* CONFIG_PM_RUNTIME */ | ||
306 | |||
307 | #ifdef CONFIG_PM_SLEEP | ||
308 | /** | ||
309 | * __acpi_device_sleep_wake - Enable or disable device to wake up the system. | ||
310 | * @dev: Device to enable/desible to wake up the system. | ||
311 | * @target_state: System state the device is supposed to wake up from. | ||
312 | * @enable: Whether to enable or disable @dev to wake up the system. | ||
313 | */ | ||
314 | int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state, | ||
315 | bool enable) | ||
316 | { | ||
317 | return enable ? | ||
318 | acpi_enable_wakeup_device_power(adev, target_state) : | ||
319 | acpi_disable_wakeup_device_power(adev); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system. | ||
324 | * @dev: Device to enable/desible to wake up the system from sleep states. | ||
325 | * @enable: Whether to enable or disable @dev to wake up the system. | ||
326 | */ | ||
327 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | ||
328 | { | ||
329 | acpi_handle handle; | ||
330 | struct acpi_device *adev; | ||
331 | int error; | ||
332 | |||
333 | if (!device_can_wakeup(dev)) | ||
334 | return -EINVAL; | ||
335 | |||
336 | handle = DEVICE_ACPI_HANDLE(dev); | ||
337 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
338 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | ||
339 | return -ENODEV; | ||
340 | } | ||
341 | |||
342 | error = __acpi_device_sleep_wake(adev, acpi_target_system_state(), | ||
343 | enable); | ||
344 | if (!error) | ||
345 | dev_info(dev, "System wakeup %s by ACPI\n", | ||
346 | enable ? "enabled" : "disabled"); | ||
347 | |||
348 | return error; | ||
349 | } | ||
350 | #endif /* CONFIG_PM_SLEEP */ | ||
351 | |||
352 | /** | ||
353 | * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. | ||
354 | * @dev: Device to get the ACPI node for. | ||
355 | */ | ||
356 | static struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
357 | { | ||
358 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
359 | struct acpi_device *adev; | ||
360 | |||
361 | return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ? | ||
362 | adev : NULL; | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * acpi_dev_pm_low_power - Put ACPI device into a low-power state. | ||
367 | * @dev: Device to put into a low-power state. | ||
368 | * @adev: ACPI device node corresponding to @dev. | ||
369 | * @system_state: System state to choose the device state for. | ||
370 | */ | ||
371 | static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, | ||
372 | u32 system_state) | ||
373 | { | ||
374 | int power_state; | ||
375 | |||
376 | if (!acpi_device_power_manageable(adev)) | ||
377 | return 0; | ||
378 | |||
379 | power_state = acpi_device_power_state(dev, adev, system_state, | ||
380 | ACPI_STATE_D3, NULL); | ||
381 | if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3) | ||
382 | return -EIO; | ||
383 | |||
384 | return acpi_device_set_power(adev, power_state); | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * acpi_dev_pm_full_power - Put ACPI device into the full-power state. | ||
389 | * @adev: ACPI device node to put into the full-power state. | ||
390 | */ | ||
391 | static int acpi_dev_pm_full_power(struct acpi_device *adev) | ||
392 | { | ||
393 | return acpi_device_power_manageable(adev) ? | ||
394 | acpi_device_set_power(adev, ACPI_STATE_D0) : 0; | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_PM_RUNTIME | ||
398 | /** | ||
399 | * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. | ||
400 | * @dev: Device to put into a low-power state. | ||
401 | * | ||
402 | * Put the given device into a runtime low-power state using the standard ACPI | ||
403 | * mechanism. Set up remote wakeup if desired, choose the state to put the | ||
404 | * device into (this checks if remote wakeup is expected to work too), and set | ||
405 | * the power state of the device. | ||
406 | */ | ||
407 | int acpi_dev_runtime_suspend(struct device *dev) | ||
408 | { | ||
409 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
410 | bool remote_wakeup; | ||
411 | int error; | ||
412 | |||
413 | if (!adev) | ||
414 | return 0; | ||
415 | |||
416 | remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > | ||
417 | PM_QOS_FLAGS_NONE; | ||
418 | error = __acpi_device_run_wake(adev, remote_wakeup); | ||
419 | if (remote_wakeup && error) | ||
420 | return -EAGAIN; | ||
421 | |||
422 | error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
423 | if (error) | ||
424 | __acpi_device_run_wake(adev, false); | ||
425 | |||
426 | return error; | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); | ||
429 | |||
430 | /** | ||
431 | * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. | ||
432 | * @dev: Device to put into the full-power state. | ||
433 | * | ||
434 | * Put the given device into the full-power state using the standard ACPI | ||
435 | * mechanism at run time. Set the power state of the device to ACPI D0 and | ||
436 | * disable remote wakeup. | ||
437 | */ | ||
438 | int acpi_dev_runtime_resume(struct device *dev) | ||
439 | { | ||
440 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
441 | int error; | ||
442 | |||
443 | if (!adev) | ||
444 | return 0; | ||
445 | |||
446 | error = acpi_dev_pm_full_power(adev); | ||
447 | __acpi_device_run_wake(adev, false); | ||
448 | return error; | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); | ||
451 | |||
452 | /** | ||
453 | * acpi_subsys_runtime_suspend - Suspend device using ACPI. | ||
454 | * @dev: Device to suspend. | ||
455 | * | ||
456 | * Carry out the generic runtime suspend procedure for @dev and use ACPI to put | ||
457 | * it into a runtime low-power state. | ||
458 | */ | ||
459 | int acpi_subsys_runtime_suspend(struct device *dev) | ||
460 | { | ||
461 | int ret = pm_generic_runtime_suspend(dev); | ||
462 | return ret ? ret : acpi_dev_runtime_suspend(dev); | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); | ||
465 | |||
466 | /** | ||
467 | * acpi_subsys_runtime_resume - Resume device using ACPI. | ||
468 | * @dev: Device to Resume. | ||
469 | * | ||
470 | * Use ACPI to put the given device into the full-power state and carry out the | ||
471 | * generic runtime resume procedure for it. | ||
472 | */ | ||
473 | int acpi_subsys_runtime_resume(struct device *dev) | ||
474 | { | ||
475 | int ret = acpi_dev_runtime_resume(dev); | ||
476 | return ret ? ret : pm_generic_runtime_resume(dev); | ||
477 | } | ||
478 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); | ||
479 | #endif /* CONFIG_PM_RUNTIME */ | ||
480 | |||
481 | #ifdef CONFIG_PM_SLEEP | ||
482 | /** | ||
483 | * acpi_dev_suspend_late - Put device into a low-power state using ACPI. | ||
484 | * @dev: Device to put into a low-power state. | ||
485 | * | ||
486 | * Put the given device into a low-power state during system transition to a | ||
487 | * sleep state using the standard ACPI mechanism. Set up system wakeup if | ||
488 | * desired, choose the state to put the device into (this checks if system | ||
489 | * wakeup is expected to work too), and set the power state of the device. | ||
490 | */ | ||
491 | int acpi_dev_suspend_late(struct device *dev) | ||
492 | { | ||
493 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
494 | u32 target_state; | ||
495 | bool wakeup; | ||
496 | int error; | ||
497 | |||
498 | if (!adev) | ||
499 | return 0; | ||
500 | |||
501 | target_state = acpi_target_system_state(); | ||
502 | wakeup = device_may_wakeup(dev); | ||
503 | error = __acpi_device_sleep_wake(adev, target_state, wakeup); | ||
504 | if (wakeup && error) | ||
505 | return error; | ||
506 | |||
507 | error = acpi_dev_pm_low_power(dev, adev, target_state); | ||
508 | if (error) | ||
509 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | ||
510 | |||
511 | return error; | ||
512 | } | ||
513 | EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); | ||
514 | |||
515 | /** | ||
516 | * acpi_dev_resume_early - Put device into the full-power state using ACPI. | ||
517 | * @dev: Device to put into the full-power state. | ||
518 | * | ||
519 | * Put the given device into the full-power state using the standard ACPI | ||
520 | * mechanism during system transition to the working state. Set the power | ||
521 | * state of the device to ACPI D0 and disable remote wakeup. | ||
522 | */ | ||
523 | int acpi_dev_resume_early(struct device *dev) | ||
524 | { | ||
525 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
526 | int error; | ||
527 | |||
528 | if (!adev) | ||
529 | return 0; | ||
530 | |||
531 | error = acpi_dev_pm_full_power(adev); | ||
532 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | ||
533 | return error; | ||
534 | } | ||
535 | EXPORT_SYMBOL_GPL(acpi_dev_resume_early); | ||
536 | |||
537 | /** | ||
538 | * acpi_subsys_prepare - Prepare device for system transition to a sleep state. | ||
539 | * @dev: Device to prepare. | ||
540 | */ | ||
541 | int acpi_subsys_prepare(struct device *dev) | ||
542 | { | ||
543 | /* | ||
544 | * Follow PCI and resume devices suspended at run time before running | ||
545 | * their system suspend callbacks. | ||
546 | */ | ||
547 | pm_runtime_resume(dev); | ||
548 | return pm_generic_prepare(dev); | ||
549 | } | ||
550 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); | ||
551 | |||
552 | /** | ||
553 | * acpi_subsys_suspend_late - Suspend device using ACPI. | ||
554 | * @dev: Device to suspend. | ||
555 | * | ||
556 | * Carry out the generic late suspend procedure for @dev and use ACPI to put | ||
557 | * it into a low-power state during system transition into a sleep state. | ||
558 | */ | ||
559 | int acpi_subsys_suspend_late(struct device *dev) | ||
560 | { | ||
561 | int ret = pm_generic_suspend_late(dev); | ||
562 | return ret ? ret : acpi_dev_suspend_late(dev); | ||
563 | } | ||
564 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); | ||
565 | |||
566 | /** | ||
567 | * acpi_subsys_resume_early - Resume device using ACPI. | ||
568 | * @dev: Device to Resume. | ||
569 | * | ||
570 | * Use ACPI to put the given device into the full-power state and carry out the | ||
571 | * generic early resume procedure for it during system transition into the | ||
572 | * working state. | ||
573 | */ | ||
574 | int acpi_subsys_resume_early(struct device *dev) | ||
575 | { | ||
576 | int ret = acpi_dev_resume_early(dev); | ||
577 | return ret ? ret : pm_generic_resume_early(dev); | ||
578 | } | ||
579 | EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); | ||
580 | #endif /* CONFIG_PM_SLEEP */ | ||
581 | |||
582 | static struct dev_pm_domain acpi_general_pm_domain = { | ||
583 | .ops = { | ||
584 | #ifdef CONFIG_PM_RUNTIME | ||
585 | .runtime_suspend = acpi_subsys_runtime_suspend, | ||
586 | .runtime_resume = acpi_subsys_runtime_resume, | ||
587 | .runtime_idle = pm_generic_runtime_idle, | ||
588 | #endif | ||
589 | #ifdef CONFIG_PM_SLEEP | ||
590 | .prepare = acpi_subsys_prepare, | ||
591 | .suspend_late = acpi_subsys_suspend_late, | ||
592 | .resume_early = acpi_subsys_resume_early, | ||
593 | .poweroff_late = acpi_subsys_suspend_late, | ||
594 | .restore_early = acpi_subsys_resume_early, | ||
595 | #endif | ||
596 | }, | ||
597 | }; | ||
598 | |||
599 | /** | ||
600 | * acpi_dev_pm_attach - Prepare device for ACPI power management. | ||
601 | * @dev: Device to prepare. | ||
602 | * @power_on: Whether or not to power on the device. | ||
603 | * | ||
604 | * If @dev has a valid ACPI handle that has a valid struct acpi_device object | ||
605 | * attached to it, install a wakeup notification handler for the device and | ||
606 | * add it to the general ACPI PM domain. If @power_on is set, the device will | ||
607 | * be put into the ACPI D0 state before the function returns. | ||
608 | * | ||
609 | * This assumes that the @dev's bus type uses generic power management callbacks | ||
610 | * (or doesn't use any power management callbacks at all). | ||
611 | * | ||
612 | * Callers must ensure proper synchronization of this function with power | ||
613 | * management callbacks. | ||
614 | */ | ||
615 | int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
616 | { | ||
617 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
618 | |||
619 | if (!adev) | ||
620 | return -ENODEV; | ||
621 | |||
622 | if (dev->pm_domain) | ||
623 | return -EEXIST; | ||
624 | |||
625 | acpi_add_pm_notifier(adev, acpi_wakeup_device, dev); | ||
626 | dev->pm_domain = &acpi_general_pm_domain; | ||
627 | if (power_on) { | ||
628 | acpi_dev_pm_full_power(adev); | ||
629 | __acpi_device_run_wake(adev, false); | ||
630 | } | ||
631 | return 0; | ||
632 | } | ||
633 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); | ||
634 | |||
635 | /** | ||
636 | * acpi_dev_pm_detach - Remove ACPI power management from the device. | ||
637 | * @dev: Device to take care of. | ||
638 | * @power_off: Whether or not to try to remove power from the device. | ||
639 | * | ||
640 | * Remove the device from the general ACPI PM domain and remove its wakeup | ||
641 | * notifier. If @power_off is set, additionally remove power from the device if | ||
642 | * possible. | ||
643 | * | ||
644 | * Callers must ensure proper synchronization of this function with power | ||
645 | * management callbacks. | ||
646 | */ | ||
647 | void acpi_dev_pm_detach(struct device *dev, bool power_off) | ||
648 | { | ||
649 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
650 | |||
651 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { | ||
652 | dev->pm_domain = NULL; | ||
653 | acpi_remove_pm_notifier(adev, acpi_wakeup_device); | ||
654 | if (power_off) { | ||
655 | /* | ||
656 | * If the device's PM QoS resume latency limit or flags | ||
657 | * have been exposed to user space, they have to be | ||
658 | * hidden at this point, so that they don't affect the | ||
659 | * choice of the low-power state to put the device into. | ||
660 | */ | ||
661 | dev_pm_qos_hide_latency_limit(dev); | ||
662 | dev_pm_qos_hide_flags(dev); | ||
663 | __acpi_device_run_wake(adev, false); | ||
664 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
665 | } | ||
666 | } | ||
667 | } | ||
668 | EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); | ||
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 88eb14304667..f32bd47b35e0 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/jiffies.h> | 32 | #include <linux/jiffies.h> |
33 | #include <linux/stddef.h> | 33 | #include <linux/stddef.h> |
34 | #include <linux/acpi.h> | ||
34 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
35 | #include <acpi/acpi_drivers.h> | 36 | #include <acpi/acpi_drivers.h> |
36 | 37 | ||
@@ -460,12 +461,8 @@ static void handle_dock(struct dock_station *ds, int dock) | |||
460 | struct acpi_object_list arg_list; | 461 | struct acpi_object_list arg_list; |
461 | union acpi_object arg; | 462 | union acpi_object arg; |
462 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 463 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
463 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
464 | 464 | ||
465 | acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer); | 465 | acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking"); |
466 | |||
467 | printk(KERN_INFO PREFIX "%s - %s\n", | ||
468 | (char *)name_buffer.pointer, dock ? "docking" : "undocking"); | ||
469 | 466 | ||
470 | /* _DCK method has one argument */ | 467 | /* _DCK method has one argument */ |
471 | arg_list.count = 1; | 468 | arg_list.count = 1; |
@@ -474,11 +471,10 @@ static void handle_dock(struct dock_station *ds, int dock) | |||
474 | arg.integer.value = dock; | 471 | arg.integer.value = dock; |
475 | status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); | 472 | status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); |
476 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) | 473 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) |
477 | ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute" | 474 | acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n", |
478 | " _DCK\n", (char *)name_buffer.pointer)); | 475 | status); |
479 | 476 | ||
480 | kfree(buffer.pointer); | 477 | kfree(buffer.pointer); |
481 | kfree(name_buffer.pointer); | ||
482 | } | 478 | } |
483 | 479 | ||
484 | static inline void dock(struct dock_station *ds) | 480 | static inline void dock(struct dock_station *ds) |
@@ -525,9 +521,11 @@ static void dock_lock(struct dock_station *ds, int lock) | |||
525 | status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL); | 521 | status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL); |
526 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 522 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
527 | if (lock) | 523 | if (lock) |
528 | printk(KERN_WARNING PREFIX "Locking device failed\n"); | 524 | acpi_handle_warn(ds->handle, |
525 | "Locking device failed (0x%x)\n", status); | ||
529 | else | 526 | else |
530 | printk(KERN_WARNING PREFIX "Unlocking device failed\n"); | 527 | acpi_handle_warn(ds->handle, |
528 | "Unlocking device failed (0x%x)\n", status); | ||
531 | } | 529 | } |
532 | } | 530 | } |
533 | 531 | ||
@@ -667,7 +665,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) | |||
667 | dock_lock(ds, 0); | 665 | dock_lock(ds, 0); |
668 | eject_dock(ds); | 666 | eject_dock(ds); |
669 | if (dock_present(ds)) { | 667 | if (dock_present(ds)) { |
670 | printk(KERN_ERR PREFIX "Unable to undock!\n"); | 668 | acpi_handle_err(ds->handle, "Unable to undock!\n"); |
671 | return -EBUSY; | 669 | return -EBUSY; |
672 | } | 670 | } |
673 | complete_undock(ds); | 671 | complete_undock(ds); |
@@ -715,7 +713,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) | |||
715 | begin_dock(ds); | 713 | begin_dock(ds); |
716 | dock(ds); | 714 | dock(ds); |
717 | if (!dock_present(ds)) { | 715 | if (!dock_present(ds)) { |
718 | printk(KERN_ERR PREFIX "Unable to dock!\n"); | 716 | acpi_handle_err(handle, "Unable to dock!\n"); |
719 | complete_dock(ds); | 717 | complete_dock(ds); |
720 | break; | 718 | break; |
721 | } | 719 | } |
@@ -743,7 +741,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) | |||
743 | dock_event(ds, event, UNDOCK_EVENT); | 741 | dock_event(ds, event, UNDOCK_EVENT); |
744 | break; | 742 | break; |
745 | default: | 743 | default: |
746 | printk(KERN_ERR PREFIX "Unknown dock event %d\n", event); | 744 | acpi_handle_err(handle, "Unknown dock event %d\n", event); |
747 | } | 745 | } |
748 | } | 746 | } |
749 | 747 | ||
@@ -987,7 +985,7 @@ err_rmgroup: | |||
987 | sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group); | 985 | sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group); |
988 | err_unregister: | 986 | err_unregister: |
989 | platform_device_unregister(dd); | 987 | platform_device_unregister(dd); |
990 | printk(KERN_ERR "%s encountered error %d\n", __func__, ret); | 988 | acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret); |
991 | return ret; | 989 | return ret; |
992 | } | 990 | } |
993 | 991 | ||
@@ -1016,51 +1014,39 @@ static int dock_remove(struct dock_station *ds) | |||
1016 | } | 1014 | } |
1017 | 1015 | ||
1018 | /** | 1016 | /** |
1019 | * find_dock - look for a dock station | 1017 | * find_dock_and_bay - look for dock stations and bays |
1020 | * @handle: acpi handle of a device | 1018 | * @handle: acpi handle of a device |
1021 | * @lvl: unused | 1019 | * @lvl: unused |
1022 | * @context: counter of dock stations found | 1020 | * @context: unused |
1023 | * @rv: unused | 1021 | * @rv: unused |
1024 | * | 1022 | * |
1025 | * This is called by acpi_walk_namespace to look for dock stations. | 1023 | * This is called by acpi_walk_namespace to look for dock stations and bays. |
1026 | */ | 1024 | */ |
1027 | static __init acpi_status | 1025 | static __init acpi_status |
1028 | find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) | 1026 | find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv) |
1029 | { | 1027 | { |
1030 | if (is_dock(handle)) | 1028 | if (is_dock(handle) || is_ejectable_bay(handle)) |
1031 | dock_add(handle); | 1029 | dock_add(handle); |
1032 | 1030 | ||
1033 | return AE_OK; | 1031 | return AE_OK; |
1034 | } | 1032 | } |
1035 | 1033 | ||
1036 | static __init acpi_status | ||
1037 | find_bay(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
1038 | { | ||
1039 | /* If bay is a dock, it's already handled */ | ||
1040 | if (is_ejectable_bay(handle) && !is_dock(handle)) | ||
1041 | dock_add(handle); | ||
1042 | return AE_OK; | ||
1043 | } | ||
1044 | |||
1045 | static int __init dock_init(void) | 1034 | static int __init dock_init(void) |
1046 | { | 1035 | { |
1047 | if (acpi_disabled) | 1036 | if (acpi_disabled) |
1048 | return 0; | 1037 | return 0; |
1049 | 1038 | ||
1050 | /* look for a dock station */ | 1039 | /* look for dock stations and bays */ |
1051 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | 1040 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
1052 | ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL); | 1041 | ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL); |
1053 | 1042 | ||
1054 | /* look for bay */ | ||
1055 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
1056 | ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL); | ||
1057 | if (!dock_station_count) { | 1043 | if (!dock_station_count) { |
1058 | printk(KERN_INFO PREFIX "No dock devices found.\n"); | 1044 | pr_info(PREFIX "No dock devices found.\n"); |
1059 | return 0; | 1045 | return 0; |
1060 | } | 1046 | } |
1061 | 1047 | ||
1062 | register_acpi_bus_notifier(&dock_acpi_notifier); | 1048 | register_acpi_bus_notifier(&dock_acpi_notifier); |
1063 | printk(KERN_INFO PREFIX "%s: %d docks/bays found\n", | 1049 | pr_info(PREFIX "%s: %d docks/bays found\n", |
1064 | ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); | 1050 | ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); |
1065 | return 0; | 1051 | return 0; |
1066 | } | 1052 | } |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a51df9681319..354007d490d1 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -158,10 +158,10 @@ static int ec_transaction_done(struct acpi_ec *ec) | |||
158 | { | 158 | { |
159 | unsigned long flags; | 159 | unsigned long flags; |
160 | int ret = 0; | 160 | int ret = 0; |
161 | spin_lock_irqsave(&ec->curr_lock, flags); | 161 | spin_lock_irqsave(&ec->lock, flags); |
162 | if (!ec->curr || ec->curr->done) | 162 | if (!ec->curr || ec->curr->done) |
163 | ret = 1; | 163 | ret = 1; |
164 | spin_unlock_irqrestore(&ec->curr_lock, flags); | 164 | spin_unlock_irqrestore(&ec->lock, flags); |
165 | return ret; | 165 | return ret; |
166 | } | 166 | } |
167 | 167 | ||
@@ -175,32 +175,38 @@ static void start_transaction(struct acpi_ec *ec) | |||
175 | static void advance_transaction(struct acpi_ec *ec, u8 status) | 175 | static void advance_transaction(struct acpi_ec *ec, u8 status) |
176 | { | 176 | { |
177 | unsigned long flags; | 177 | unsigned long flags; |
178 | spin_lock_irqsave(&ec->curr_lock, flags); | 178 | struct transaction *t = ec->curr; |
179 | if (!ec->curr) | 179 | |
180 | spin_lock_irqsave(&ec->lock, flags); | ||
181 | if (!t) | ||
180 | goto unlock; | 182 | goto unlock; |
181 | if (ec->curr->wlen > ec->curr->wi) { | 183 | if (t->wlen > t->wi) { |
182 | if ((status & ACPI_EC_FLAG_IBF) == 0) | 184 | if ((status & ACPI_EC_FLAG_IBF) == 0) |
183 | acpi_ec_write_data(ec, | 185 | acpi_ec_write_data(ec, |
184 | ec->curr->wdata[ec->curr->wi++]); | 186 | t->wdata[t->wi++]); |
185 | else | 187 | else |
186 | goto err; | 188 | goto err; |
187 | } else if (ec->curr->rlen > ec->curr->ri) { | 189 | } else if (t->rlen > t->ri) { |
188 | if ((status & ACPI_EC_FLAG_OBF) == 1) { | 190 | if ((status & ACPI_EC_FLAG_OBF) == 1) { |
189 | ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec); | 191 | t->rdata[t->ri++] = acpi_ec_read_data(ec); |
190 | if (ec->curr->rlen == ec->curr->ri) | 192 | if (t->rlen == t->ri) |
191 | ec->curr->done = true; | 193 | t->done = true; |
192 | } else | 194 | } else |
193 | goto err; | 195 | goto err; |
194 | } else if (ec->curr->wlen == ec->curr->wi && | 196 | } else if (t->wlen == t->wi && |
195 | (status & ACPI_EC_FLAG_IBF) == 0) | 197 | (status & ACPI_EC_FLAG_IBF) == 0) |
196 | ec->curr->done = true; | 198 | t->done = true; |
197 | goto unlock; | 199 | goto unlock; |
198 | err: | 200 | err: |
199 | /* false interrupt, state didn't change */ | 201 | /* |
200 | if (in_interrupt()) | 202 | * If SCI bit is set, then don't think it's a false IRQ |
201 | ++ec->curr->irq_count; | 203 | * otherwise will take a not handled IRQ as a false one. |
204 | */ | ||
205 | if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) | ||
206 | ++t->irq_count; | ||
207 | |||
202 | unlock: | 208 | unlock: |
203 | spin_unlock_irqrestore(&ec->curr_lock, flags); | 209 | spin_unlock_irqrestore(&ec->lock, flags); |
204 | } | 210 | } |
205 | 211 | ||
206 | static int acpi_ec_sync_query(struct acpi_ec *ec); | 212 | static int acpi_ec_sync_query(struct acpi_ec *ec); |
@@ -238,9 +244,9 @@ static int ec_poll(struct acpi_ec *ec) | |||
238 | if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) | 244 | if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) |
239 | break; | 245 | break; |
240 | pr_debug(PREFIX "controller reset, restart transaction\n"); | 246 | pr_debug(PREFIX "controller reset, restart transaction\n"); |
241 | spin_lock_irqsave(&ec->curr_lock, flags); | 247 | spin_lock_irqsave(&ec->lock, flags); |
242 | start_transaction(ec); | 248 | start_transaction(ec); |
243 | spin_unlock_irqrestore(&ec->curr_lock, flags); | 249 | spin_unlock_irqrestore(&ec->lock, flags); |
244 | } | 250 | } |
245 | return -ETIME; | 251 | return -ETIME; |
246 | } | 252 | } |
@@ -253,17 +259,17 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | |||
253 | if (EC_FLAGS_MSI) | 259 | if (EC_FLAGS_MSI) |
254 | udelay(ACPI_EC_MSI_UDELAY); | 260 | udelay(ACPI_EC_MSI_UDELAY); |
255 | /* start transaction */ | 261 | /* start transaction */ |
256 | spin_lock_irqsave(&ec->curr_lock, tmp); | 262 | spin_lock_irqsave(&ec->lock, tmp); |
257 | /* following two actions should be kept atomic */ | 263 | /* following two actions should be kept atomic */ |
258 | ec->curr = t; | 264 | ec->curr = t; |
259 | start_transaction(ec); | 265 | start_transaction(ec); |
260 | if (ec->curr->command == ACPI_EC_COMMAND_QUERY) | 266 | if (ec->curr->command == ACPI_EC_COMMAND_QUERY) |
261 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | 267 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
262 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | 268 | spin_unlock_irqrestore(&ec->lock, tmp); |
263 | ret = ec_poll(ec); | 269 | ret = ec_poll(ec); |
264 | spin_lock_irqsave(&ec->curr_lock, tmp); | 270 | spin_lock_irqsave(&ec->lock, tmp); |
265 | ec->curr = NULL; | 271 | ec->curr = NULL; |
266 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | 272 | spin_unlock_irqrestore(&ec->lock, tmp); |
267 | return ret; | 273 | return ret; |
268 | } | 274 | } |
269 | 275 | ||
@@ -292,7 +298,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
292 | return -EINVAL; | 298 | return -EINVAL; |
293 | if (t->rdata) | 299 | if (t->rdata) |
294 | memset(t->rdata, 0, t->rlen); | 300 | memset(t->rdata, 0, t->rlen); |
295 | mutex_lock(&ec->lock); | 301 | mutex_lock(&ec->mutex); |
296 | if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { | 302 | if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { |
297 | status = -EINVAL; | 303 | status = -EINVAL; |
298 | goto unlock; | 304 | goto unlock; |
@@ -310,7 +316,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
310 | status = -ETIME; | 316 | status = -ETIME; |
311 | goto end; | 317 | goto end; |
312 | } | 318 | } |
313 | pr_debug(PREFIX "transaction start\n"); | 319 | pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n", |
320 | t->command, t->wdata ? t->wdata[0] : 0); | ||
314 | /* disable GPE during transaction if storm is detected */ | 321 | /* disable GPE during transaction if storm is detected */ |
315 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 322 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
316 | /* It has to be disabled, so that it doesn't trigger. */ | 323 | /* It has to be disabled, so that it doesn't trigger. */ |
@@ -326,8 +333,9 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
326 | /* It is safe to enable the GPE outside of the transaction. */ | 333 | /* It is safe to enable the GPE outside of the transaction. */ |
327 | acpi_enable_gpe(NULL, ec->gpe); | 334 | acpi_enable_gpe(NULL, ec->gpe); |
328 | } else if (t->irq_count > ec_storm_threshold) { | 335 | } else if (t->irq_count > ec_storm_threshold) { |
329 | pr_info(PREFIX "GPE storm detected, " | 336 | pr_info(PREFIX "GPE storm detected(%d GPEs), " |
330 | "transactions will use polling mode\n"); | 337 | "transactions will use polling mode\n", |
338 | t->irq_count); | ||
331 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | 339 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); |
332 | } | 340 | } |
333 | pr_debug(PREFIX "transaction end\n"); | 341 | pr_debug(PREFIX "transaction end\n"); |
@@ -335,7 +343,7 @@ end: | |||
335 | if (ec->global_lock) | 343 | if (ec->global_lock) |
336 | acpi_release_global_lock(glk); | 344 | acpi_release_global_lock(glk); |
337 | unlock: | 345 | unlock: |
338 | mutex_unlock(&ec->lock); | 346 | mutex_unlock(&ec->mutex); |
339 | return status; | 347 | return status; |
340 | } | 348 | } |
341 | 349 | ||
@@ -403,7 +411,7 @@ int ec_burst_disable(void) | |||
403 | 411 | ||
404 | EXPORT_SYMBOL(ec_burst_disable); | 412 | EXPORT_SYMBOL(ec_burst_disable); |
405 | 413 | ||
406 | int ec_read(u8 addr, u8 * val) | 414 | int ec_read(u8 addr, u8 *val) |
407 | { | 415 | { |
408 | int err; | 416 | int err; |
409 | u8 temp_data; | 417 | u8 temp_data; |
@@ -468,10 +476,10 @@ void acpi_ec_block_transactions(void) | |||
468 | if (!ec) | 476 | if (!ec) |
469 | return; | 477 | return; |
470 | 478 | ||
471 | mutex_lock(&ec->lock); | 479 | mutex_lock(&ec->mutex); |
472 | /* Prevent transactions from being carried out */ | 480 | /* Prevent transactions from being carried out */ |
473 | set_bit(EC_FLAGS_BLOCKED, &ec->flags); | 481 | set_bit(EC_FLAGS_BLOCKED, &ec->flags); |
474 | mutex_unlock(&ec->lock); | 482 | mutex_unlock(&ec->mutex); |
475 | } | 483 | } |
476 | 484 | ||
477 | void acpi_ec_unblock_transactions(void) | 485 | void acpi_ec_unblock_transactions(void) |
@@ -481,10 +489,10 @@ void acpi_ec_unblock_transactions(void) | |||
481 | if (!ec) | 489 | if (!ec) |
482 | return; | 490 | return; |
483 | 491 | ||
484 | mutex_lock(&ec->lock); | 492 | mutex_lock(&ec->mutex); |
485 | /* Allow transactions to be carried out again */ | 493 | /* Allow transactions to be carried out again */ |
486 | clear_bit(EC_FLAGS_BLOCKED, &ec->flags); | 494 | clear_bit(EC_FLAGS_BLOCKED, &ec->flags); |
487 | mutex_unlock(&ec->lock); | 495 | mutex_unlock(&ec->mutex); |
488 | } | 496 | } |
489 | 497 | ||
490 | void acpi_ec_unblock_transactions_early(void) | 498 | void acpi_ec_unblock_transactions_early(void) |
@@ -536,9 +544,9 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
536 | handler->handle = handle; | 544 | handler->handle = handle; |
537 | handler->func = func; | 545 | handler->func = func; |
538 | handler->data = data; | 546 | handler->data = data; |
539 | mutex_lock(&ec->lock); | 547 | mutex_lock(&ec->mutex); |
540 | list_add(&handler->node, &ec->list); | 548 | list_add(&handler->node, &ec->list); |
541 | mutex_unlock(&ec->lock); | 549 | mutex_unlock(&ec->mutex); |
542 | return 0; | 550 | return 0; |
543 | } | 551 | } |
544 | 552 | ||
@@ -547,14 +555,14 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); | |||
547 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) | 555 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) |
548 | { | 556 | { |
549 | struct acpi_ec_query_handler *handler, *tmp; | 557 | struct acpi_ec_query_handler *handler, *tmp; |
550 | mutex_lock(&ec->lock); | 558 | mutex_lock(&ec->mutex); |
551 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { | 559 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { |
552 | if (query_bit == handler->query_bit) { | 560 | if (query_bit == handler->query_bit) { |
553 | list_del(&handler->node); | 561 | list_del(&handler->node); |
554 | kfree(handler); | 562 | kfree(handler); |
555 | } | 563 | } |
556 | } | 564 | } |
557 | mutex_unlock(&ec->lock); | 565 | mutex_unlock(&ec->mutex); |
558 | } | 566 | } |
559 | 567 | ||
560 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); | 568 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); |
@@ -601,9 +609,9 @@ static void acpi_ec_gpe_query(void *ec_cxt) | |||
601 | struct acpi_ec *ec = ec_cxt; | 609 | struct acpi_ec *ec = ec_cxt; |
602 | if (!ec) | 610 | if (!ec) |
603 | return; | 611 | return; |
604 | mutex_lock(&ec->lock); | 612 | mutex_lock(&ec->mutex); |
605 | acpi_ec_sync_query(ec); | 613 | acpi_ec_sync_query(ec); |
606 | mutex_unlock(&ec->lock); | 614 | mutex_unlock(&ec->mutex); |
607 | } | 615 | } |
608 | 616 | ||
609 | static int ec_check_sci(struct acpi_ec *ec, u8 state) | 617 | static int ec_check_sci(struct acpi_ec *ec, u8 state) |
@@ -622,10 +630,11 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, | |||
622 | u32 gpe_number, void *data) | 630 | u32 gpe_number, void *data) |
623 | { | 631 | { |
624 | struct acpi_ec *ec = data; | 632 | struct acpi_ec *ec = data; |
633 | u8 status = acpi_ec_read_status(ec); | ||
625 | 634 | ||
626 | pr_debug(PREFIX "~~~> interrupt\n"); | 635 | pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status); |
627 | 636 | ||
628 | advance_transaction(ec, acpi_ec_read_status(ec)); | 637 | advance_transaction(ec, status); |
629 | if (ec_transaction_done(ec) && | 638 | if (ec_transaction_done(ec) && |
630 | (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { | 639 | (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { |
631 | wake_up(&ec->wait); | 640 | wake_up(&ec->wait); |
@@ -691,10 +700,10 @@ static struct acpi_ec *make_acpi_ec(void) | |||
691 | if (!ec) | 700 | if (!ec) |
692 | return NULL; | 701 | return NULL; |
693 | ec->flags = 1 << EC_FLAGS_QUERY_PENDING; | 702 | ec->flags = 1 << EC_FLAGS_QUERY_PENDING; |
694 | mutex_init(&ec->lock); | 703 | mutex_init(&ec->mutex); |
695 | init_waitqueue_head(&ec->wait); | 704 | init_waitqueue_head(&ec->wait); |
696 | INIT_LIST_HEAD(&ec->list); | 705 | INIT_LIST_HEAD(&ec->list); |
697 | spin_lock_init(&ec->curr_lock); | 706 | spin_lock_init(&ec->lock); |
698 | return ec; | 707 | return ec; |
699 | } | 708 | } |
700 | 709 | ||
@@ -853,12 +862,12 @@ static int acpi_ec_remove(struct acpi_device *device, int type) | |||
853 | 862 | ||
854 | ec = acpi_driver_data(device); | 863 | ec = acpi_driver_data(device); |
855 | ec_remove_handlers(ec); | 864 | ec_remove_handlers(ec); |
856 | mutex_lock(&ec->lock); | 865 | mutex_lock(&ec->mutex); |
857 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { | 866 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { |
858 | list_del(&handler->node); | 867 | list_del(&handler->node); |
859 | kfree(handler); | 868 | kfree(handler); |
860 | } | 869 | } |
861 | mutex_unlock(&ec->lock); | 870 | mutex_unlock(&ec->mutex); |
862 | release_region(ec->data_addr, 1); | 871 | release_region(ec->data_addr, 1); |
863 | release_region(ec->command_addr, 1); | 872 | release_region(ec->command_addr, 1); |
864 | device->driver_data = NULL; | 873 | device->driver_data = NULL; |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 08373086cd7e..01551840d236 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -130,46 +130,59 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
130 | { | 130 | { |
131 | struct acpi_device *acpi_dev; | 131 | struct acpi_device *acpi_dev; |
132 | acpi_status status; | 132 | acpi_status status; |
133 | struct acpi_device_physical_node *physical_node; | 133 | struct acpi_device_physical_node *physical_node, *pn; |
134 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 134 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; |
135 | int retval = -EINVAL; | 135 | int retval = -EINVAL; |
136 | 136 | ||
137 | if (dev->archdata.acpi_handle) { | 137 | if (ACPI_HANDLE(dev)) { |
138 | dev_warn(dev, "Drivers changed 'acpi_handle'\n"); | 138 | if (handle) { |
139 | return -EINVAL; | 139 | dev_warn(dev, "ACPI handle is already set\n"); |
140 | return -EINVAL; | ||
141 | } else { | ||
142 | handle = ACPI_HANDLE(dev); | ||
143 | } | ||
140 | } | 144 | } |
145 | if (!handle) | ||
146 | return -EINVAL; | ||
141 | 147 | ||
142 | get_device(dev); | 148 | get_device(dev); |
143 | status = acpi_bus_get_device(handle, &acpi_dev); | 149 | status = acpi_bus_get_device(handle, &acpi_dev); |
144 | if (ACPI_FAILURE(status)) | 150 | if (ACPI_FAILURE(status)) |
145 | goto err; | 151 | goto err; |
146 | 152 | ||
147 | physical_node = kzalloc(sizeof(struct acpi_device_physical_node), | 153 | physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); |
148 | GFP_KERNEL); | ||
149 | if (!physical_node) { | 154 | if (!physical_node) { |
150 | retval = -ENOMEM; | 155 | retval = -ENOMEM; |
151 | goto err; | 156 | goto err; |
152 | } | 157 | } |
153 | 158 | ||
154 | mutex_lock(&acpi_dev->physical_node_lock); | 159 | mutex_lock(&acpi_dev->physical_node_lock); |
160 | |||
161 | /* Sanity check. */ | ||
162 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) | ||
163 | if (pn->dev == dev) { | ||
164 | dev_warn(dev, "Already associated with ACPI node\n"); | ||
165 | goto err_free; | ||
166 | } | ||
167 | |||
155 | /* allocate physical node id according to physical_node_id_bitmap */ | 168 | /* allocate physical node id according to physical_node_id_bitmap */ |
156 | physical_node->node_id = | 169 | physical_node->node_id = |
157 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, | 170 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, |
158 | ACPI_MAX_PHYSICAL_NODE); | 171 | ACPI_MAX_PHYSICAL_NODE); |
159 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { | 172 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { |
160 | retval = -ENOSPC; | 173 | retval = -ENOSPC; |
161 | mutex_unlock(&acpi_dev->physical_node_lock); | 174 | goto err_free; |
162 | kfree(physical_node); | ||
163 | goto err; | ||
164 | } | 175 | } |
165 | 176 | ||
166 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); | 177 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); |
167 | physical_node->dev = dev; | 178 | physical_node->dev = dev; |
168 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); | 179 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); |
169 | acpi_dev->physical_node_count++; | 180 | acpi_dev->physical_node_count++; |
181 | |||
170 | mutex_unlock(&acpi_dev->physical_node_lock); | 182 | mutex_unlock(&acpi_dev->physical_node_lock); |
171 | 183 | ||
172 | dev->archdata.acpi_handle = handle; | 184 | if (!ACPI_HANDLE(dev)) |
185 | ACPI_HANDLE_SET(dev, acpi_dev->handle); | ||
173 | 186 | ||
174 | if (!physical_node->node_id) | 187 | if (!physical_node->node_id) |
175 | strcpy(physical_node_name, PHYSICAL_NODE_STRING); | 188 | strcpy(physical_node_name, PHYSICAL_NODE_STRING); |
@@ -187,8 +200,14 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
187 | return 0; | 200 | return 0; |
188 | 201 | ||
189 | err: | 202 | err: |
203 | ACPI_HANDLE_SET(dev, NULL); | ||
190 | put_device(dev); | 204 | put_device(dev); |
191 | return retval; | 205 | return retval; |
206 | |||
207 | err_free: | ||
208 | mutex_unlock(&acpi_dev->physical_node_lock); | ||
209 | kfree(physical_node); | ||
210 | goto err; | ||
192 | } | 211 | } |
193 | 212 | ||
194 | static int acpi_unbind_one(struct device *dev) | 213 | static int acpi_unbind_one(struct device *dev) |
@@ -198,11 +217,10 @@ static int acpi_unbind_one(struct device *dev) | |||
198 | acpi_status status; | 217 | acpi_status status; |
199 | struct list_head *node, *next; | 218 | struct list_head *node, *next; |
200 | 219 | ||
201 | if (!dev->archdata.acpi_handle) | 220 | if (!ACPI_HANDLE(dev)) |
202 | return 0; | 221 | return 0; |
203 | 222 | ||
204 | status = acpi_bus_get_device(dev->archdata.acpi_handle, | 223 | status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); |
205 | &acpi_dev); | ||
206 | if (ACPI_FAILURE(status)) | 224 | if (ACPI_FAILURE(status)) |
207 | goto err; | 225 | goto err; |
208 | 226 | ||
@@ -228,7 +246,7 @@ static int acpi_unbind_one(struct device *dev) | |||
228 | 246 | ||
229 | sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name); | 247 | sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name); |
230 | sysfs_remove_link(&dev->kobj, "firmware_node"); | 248 | sysfs_remove_link(&dev->kobj, "firmware_node"); |
231 | dev->archdata.acpi_handle = NULL; | 249 | ACPI_HANDLE_SET(dev, NULL); |
232 | /* acpi_bind_one increase refcnt by one */ | 250 | /* acpi_bind_one increase refcnt by one */ |
233 | put_device(dev); | 251 | put_device(dev); |
234 | kfree(entry); | 252 | kfree(entry); |
@@ -248,6 +266,10 @@ static int acpi_platform_notify(struct device *dev) | |||
248 | acpi_handle handle; | 266 | acpi_handle handle; |
249 | int ret = -EINVAL; | 267 | int ret = -EINVAL; |
250 | 268 | ||
269 | ret = acpi_bind_one(dev, NULL); | ||
270 | if (!ret) | ||
271 | goto out; | ||
272 | |||
251 | if (!dev->bus || !dev->parent) { | 273 | if (!dev->bus || !dev->parent) { |
252 | /* bridge devices genernally haven't bus or parent */ | 274 | /* bridge devices genernally haven't bus or parent */ |
253 | ret = acpi_find_bridge_device(dev, &handle); | 275 | ret = acpi_find_bridge_device(dev, &handle); |
@@ -261,16 +283,16 @@ static int acpi_platform_notify(struct device *dev) | |||
261 | } | 283 | } |
262 | if ((ret = type->find_device(dev, &handle)) != 0) | 284 | if ((ret = type->find_device(dev, &handle)) != 0) |
263 | DBG("Can't get handler for %s\n", dev_name(dev)); | 285 | DBG("Can't get handler for %s\n", dev_name(dev)); |
264 | end: | 286 | end: |
265 | if (!ret) | 287 | if (!ret) |
266 | acpi_bind_one(dev, handle); | 288 | acpi_bind_one(dev, handle); |
267 | 289 | ||
290 | out: | ||
268 | #if ACPI_GLUE_DEBUG | 291 | #if ACPI_GLUE_DEBUG |
269 | if (!ret) { | 292 | if (!ret) { |
270 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 293 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
271 | 294 | ||
272 | acpi_get_name(dev->archdata.acpi_handle, | 295 | acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer); |
273 | ACPI_FULL_PATHNAME, &buffer); | ||
274 | DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); | 296 | DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); |
275 | kfree(buffer.pointer); | 297 | kfree(buffer.pointer); |
276 | } else | 298 | } else |
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index 20a0f2c3ca3b..b514e81e8cfa 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c | |||
@@ -70,7 +70,7 @@ static int __devinit acpi_hed_add(struct acpi_device *device) | |||
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int __devexit acpi_hed_remove(struct acpi_device *device, int type) | 73 | static int acpi_hed_remove(struct acpi_device *device, int type) |
74 | { | 74 | { |
75 | hed_handle = NULL; | 75 | hed_handle = NULL; |
76 | return 0; | 76 | return 0; |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index ca75b9ce0489..3c407cdc1ec1 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -58,11 +58,11 @@ struct acpi_ec { | |||
58 | unsigned long data_addr; | 58 | unsigned long data_addr; |
59 | unsigned long global_lock; | 59 | unsigned long global_lock; |
60 | unsigned long flags; | 60 | unsigned long flags; |
61 | struct mutex lock; | 61 | struct mutex mutex; |
62 | wait_queue_head_t wait; | 62 | wait_queue_head_t wait; |
63 | struct list_head list; | 63 | struct list_head list; |
64 | struct transaction *curr; | 64 | struct transaction *curr; |
65 | spinlock_t curr_lock; | 65 | spinlock_t lock; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern struct acpi_ec *first_ec; | 68 | extern struct acpi_ec *first_ec; |
@@ -93,4 +93,11 @@ static inline int suspend_nvs_save(void) { return 0; } | |||
93 | static inline void suspend_nvs_restore(void) {} | 93 | static inline void suspend_nvs_restore(void) {} |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | /*-------------------------------------------------------------------------- | ||
97 | Platform bus support | ||
98 | -------------------------------------------------------------------------- */ | ||
99 | struct platform_device; | ||
100 | |||
101 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev); | ||
102 | |||
96 | #endif /* _ACPI_INTERNAL_H_ */ | 103 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 9eaf708f5885..6dc4a2b1e956 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -932,7 +932,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
932 | * having a static work_struct. | 932 | * having a static work_struct. |
933 | */ | 933 | */ |
934 | 934 | ||
935 | dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); | 935 | dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); |
936 | if (!dpc) | 936 | if (!dpc) |
937 | return AE_NO_MEMORY; | 937 | return AE_NO_MEMORY; |
938 | 938 | ||
@@ -944,17 +944,22 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
944 | * because the hotplug code may call driver .remove() functions, | 944 | * because the hotplug code may call driver .remove() functions, |
945 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete | 945 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete |
946 | * to flush these workqueues. | 946 | * to flush these workqueues. |
947 | * | ||
948 | * To prevent lockdep from complaining unnecessarily, make sure that | ||
949 | * there is a different static lockdep key for each workqueue by using | ||
950 | * INIT_WORK() for each of them separately. | ||
947 | */ | 951 | */ |
948 | queue = hp ? kacpi_hotplug_wq : | 952 | if (hp) { |
949 | (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); | 953 | queue = kacpi_hotplug_wq; |
950 | dpc->wait = hp ? 1 : 0; | 954 | dpc->wait = 1; |
951 | |||
952 | if (queue == kacpi_hotplug_wq) | ||
953 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 955 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
954 | else if (queue == kacpi_notify_wq) | 956 | } else if (type == OSL_NOTIFY_HANDLER) { |
957 | queue = kacpi_notify_wq; | ||
955 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 958 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
956 | else | 959 | } else { |
960 | queue = kacpid_wq; | ||
957 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 961 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
962 | } | ||
958 | 963 | ||
959 | /* | 964 | /* |
960 | * On some machines, a software-initiated SMI causes corruption unless | 965 | * On some machines, a software-initiated SMI causes corruption unless |
@@ -986,6 +991,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, | |||
986 | { | 991 | { |
987 | return __acpi_os_execute(0, function, context, 1); | 992 | return __acpi_os_execute(0, function, context, 1); |
988 | } | 993 | } |
994 | EXPORT_SYMBOL(acpi_os_hotplug_execute); | ||
989 | 995 | ||
990 | void acpi_os_wait_events_complete(void) | 996 | void acpi_os_wait_events_complete(void) |
991 | { | 997 | { |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 0eefa12e648c..23a032490130 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -459,19 +459,19 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
459 | */ | 459 | */ |
460 | if (gsi < 0) { | 460 | if (gsi < 0) { |
461 | u32 dev_gsi; | 461 | u32 dev_gsi; |
462 | dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); | ||
463 | /* Interrupt Line values above 0xF are forbidden */ | 462 | /* Interrupt Line values above 0xF are forbidden */ |
464 | if (dev->irq > 0 && (dev->irq <= 0xF) && | 463 | if (dev->irq > 0 && (dev->irq <= 0xF) && |
465 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { | 464 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { |
466 | printk(" - using ISA IRQ %d\n", dev->irq); | 465 | dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", |
466 | pin_name(pin), dev->irq); | ||
467 | acpi_register_gsi(&dev->dev, dev_gsi, | 467 | acpi_register_gsi(&dev->dev, dev_gsi, |
468 | ACPI_LEVEL_SENSITIVE, | 468 | ACPI_LEVEL_SENSITIVE, |
469 | ACPI_ACTIVE_LOW); | 469 | ACPI_ACTIVE_LOW); |
470 | return 0; | ||
471 | } else { | 470 | } else { |
472 | printk("\n"); | 471 | dev_warn(&dev->dev, "PCI INT %c: no GSI\n", |
473 | return 0; | 472 | pin_name(pin)); |
474 | } | 473 | } |
474 | return 0; | ||
475 | } | 475 | } |
476 | 476 | ||
477 | rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); | 477 | rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); |
@@ -495,11 +495,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | /* FIXME: implement x86/x86_64 version */ | ||
499 | void __attribute__ ((weak)) acpi_unregister_gsi(u32 i) | ||
500 | { | ||
501 | } | ||
502 | |||
503 | void acpi_pci_irq_disable(struct pci_dev *dev) | 498 | void acpi_pci_irq_disable(struct pci_dev *dev) |
504 | { | 499 | { |
505 | struct acpi_prt_entry *entry; | 500 | struct acpi_prt_entry *entry; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 40e38a06ba85..7db61b8fa11f 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -473,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle) | |||
473 | return ret; | 473 | return ret; |
474 | 474 | ||
475 | no_power_resource: | 475 | no_power_resource: |
476 | printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!"); | 476 | printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!\n"); |
477 | return -ENODEV; | 477 | return -ENODEV; |
478 | } | 478 | } |
479 | EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); | 479 | EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 27adb090bb30..ef98796b3824 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file, | |||
362 | struct list_head *node, *next; | 362 | struct list_head *node, *next; |
363 | char strbuf[5]; | 363 | char strbuf[5]; |
364 | char str[5] = ""; | 364 | char str[5] = ""; |
365 | unsigned int len = count; | ||
366 | 365 | ||
367 | if (len > 4) | 366 | if (count > 4) |
368 | len = 4; | 367 | count = 4; |
369 | if (len < 0) | ||
370 | return -EFAULT; | ||
371 | 368 | ||
372 | if (copy_from_user(strbuf, buffer, len)) | 369 | if (copy_from_user(strbuf, buffer, count)) |
373 | return -EFAULT; | 370 | return -EFAULT; |
374 | strbuf[len] = '\0'; | 371 | strbuf[count] = '\0'; |
375 | sscanf(strbuf, "%s", str); | 372 | sscanf(strbuf, "%s", str); |
376 | 373 | ||
377 | mutex_lock(&acpi_device_lock); | 374 | mutex_lock(&acpi_device_lock); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bd4e5dca3ff7..e83311bf1ebd 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/moduleparam.h> | 44 | #include <linux/moduleparam.h> |
45 | #include <linux/cpuidle.h> | 45 | #include <linux/cpuidle.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/acpi.h> | ||
47 | 48 | ||
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
49 | #include <asm/cpu.h> | 50 | #include <asm/cpu.h> |
@@ -282,7 +283,9 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
282 | /* Declared with "Processor" statement; match ProcessorID */ | 283 | /* Declared with "Processor" statement; match ProcessorID */ |
283 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | 284 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); |
284 | if (ACPI_FAILURE(status)) { | 285 | if (ACPI_FAILURE(status)) { |
285 | printk(KERN_ERR PREFIX "Evaluating processor object\n"); | 286 | dev_err(&device->dev, |
287 | "Failed to evaluate processor object (0x%x)\n", | ||
288 | status); | ||
286 | return -ENODEV; | 289 | return -ENODEV; |
287 | } | 290 | } |
288 | 291 | ||
@@ -301,8 +304,9 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
301 | status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, | 304 | status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, |
302 | NULL, &value); | 305 | NULL, &value); |
303 | if (ACPI_FAILURE(status)) { | 306 | if (ACPI_FAILURE(status)) { |
304 | printk(KERN_ERR PREFIX | 307 | dev_err(&device->dev, |
305 | "Evaluating processor _UID [%#x]\n", status); | 308 | "Failed to evaluate processor _UID (0x%x)\n", |
309 | status); | ||
306 | return -ENODEV; | 310 | return -ENODEV; |
307 | } | 311 | } |
308 | device_declaration = 1; | 312 | device_declaration = 1; |
@@ -345,7 +349,7 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
345 | if (!object.processor.pblk_address) | 349 | if (!object.processor.pblk_address) |
346 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); | 350 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); |
347 | else if (object.processor.pblk_length != 6) | 351 | else if (object.processor.pblk_length != 6) |
348 | printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", | 352 | dev_err(&device->dev, "Invalid PBLK length [%d]\n", |
349 | object.processor.pblk_length); | 353 | object.processor.pblk_length); |
350 | else { | 354 | else { |
351 | pr->throttling.address = object.processor.pblk_address; | 355 | pr->throttling.address = object.processor.pblk_address; |
@@ -430,8 +434,8 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
430 | * Initialize missing things | 434 | * Initialize missing things |
431 | */ | 435 | */ |
432 | if (pr->flags.need_hotplug_init) { | 436 | if (pr->flags.need_hotplug_init) { |
433 | printk(KERN_INFO "Will online and init hotplugged " | 437 | pr_info("Will online and init hotplugged CPU: %d\n", |
434 | "CPU: %d\n", pr->id); | 438 | pr->id); |
435 | WARN(acpi_processor_start(pr), "Failed to start CPU:" | 439 | WARN(acpi_processor_start(pr), "Failed to start CPU:" |
436 | " %d\n", pr->id); | 440 | " %d\n", pr->id); |
437 | pr->flags.need_hotplug_init = 0; | 441 | pr->flags.need_hotplug_init = 0; |
@@ -492,14 +496,16 @@ static __ref int acpi_processor_start(struct acpi_processor *pr) | |||
492 | &pr->cdev->device.kobj, | 496 | &pr->cdev->device.kobj, |
493 | "thermal_cooling"); | 497 | "thermal_cooling"); |
494 | if (result) { | 498 | if (result) { |
495 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 499 | dev_err(&device->dev, |
500 | "Failed to create sysfs link 'thermal_cooling'\n"); | ||
496 | goto err_thermal_unregister; | 501 | goto err_thermal_unregister; |
497 | } | 502 | } |
498 | result = sysfs_create_link(&pr->cdev->device.kobj, | 503 | result = sysfs_create_link(&pr->cdev->device.kobj, |
499 | &device->dev.kobj, | 504 | &device->dev.kobj, |
500 | "device"); | 505 | "device"); |
501 | if (result) { | 506 | if (result) { |
502 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 507 | dev_err(&pr->cdev->device, |
508 | "Failed to create sysfs link 'device'\n"); | ||
503 | goto err_remove_sysfs_thermal; | 509 | goto err_remove_sysfs_thermal; |
504 | } | 510 | } |
505 | 511 | ||
@@ -561,8 +567,9 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
561 | */ | 567 | */ |
562 | if (per_cpu(processor_device_array, pr->id) != NULL && | 568 | if (per_cpu(processor_device_array, pr->id) != NULL && |
563 | per_cpu(processor_device_array, pr->id) != device) { | 569 | per_cpu(processor_device_array, pr->id) != device) { |
564 | printk(KERN_WARNING "BIOS reported wrong ACPI id " | 570 | dev_warn(&device->dev, |
565 | "for the processor\n"); | 571 | "BIOS reported wrong ACPI id %d for the processor\n", |
572 | pr->id); | ||
566 | result = -ENODEV; | 573 | result = -ENODEV; |
567 | goto err_free_cpumask; | 574 | goto err_free_cpumask; |
568 | } | 575 | } |
@@ -695,8 +702,8 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) | |||
695 | static void acpi_processor_hotplug_notify(acpi_handle handle, | 702 | static void acpi_processor_hotplug_notify(acpi_handle handle, |
696 | u32 event, void *data) | 703 | u32 event, void *data) |
697 | { | 704 | { |
698 | struct acpi_processor *pr; | ||
699 | struct acpi_device *device = NULL; | 705 | struct acpi_device *device = NULL; |
706 | struct acpi_eject_event *ej_event = NULL; | ||
700 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ | 707 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ |
701 | int result; | 708 | int result; |
702 | 709 | ||
@@ -716,7 +723,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle, | |||
716 | 723 | ||
717 | result = acpi_processor_device_add(handle, &device); | 724 | result = acpi_processor_device_add(handle, &device); |
718 | if (result) { | 725 | if (result) { |
719 | printk(KERN_ERR PREFIX "Unable to add the device\n"); | 726 | acpi_handle_err(handle, "Unable to add the device\n"); |
720 | break; | 727 | break; |
721 | } | 728 | } |
722 | 729 | ||
@@ -728,20 +735,29 @@ static void acpi_processor_hotplug_notify(acpi_handle handle, | |||
728 | "received ACPI_NOTIFY_EJECT_REQUEST\n")); | 735 | "received ACPI_NOTIFY_EJECT_REQUEST\n")); |
729 | 736 | ||
730 | if (acpi_bus_get_device(handle, &device)) { | 737 | if (acpi_bus_get_device(handle, &device)) { |
731 | printk(KERN_ERR PREFIX | 738 | acpi_handle_err(handle, |
732 | "Device don't exist, dropping EJECT\n"); | 739 | "Device don't exist, dropping EJECT\n"); |
733 | break; | 740 | break; |
734 | } | 741 | } |
735 | pr = acpi_driver_data(device); | 742 | if (!acpi_driver_data(device)) { |
736 | if (!pr) { | 743 | acpi_handle_err(handle, |
737 | printk(KERN_ERR PREFIX | 744 | "Driver data is NULL, dropping EJECT\n"); |
738 | "Driver data is NULL, dropping EJECT\n"); | ||
739 | break; | 745 | break; |
740 | } | 746 | } |
741 | 747 | ||
742 | /* REVISIT: update when eject is supported */ | 748 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); |
743 | ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; | 749 | if (!ej_event) { |
744 | break; | 750 | acpi_handle_err(handle, "No memory, dropping EJECT\n"); |
751 | break; | ||
752 | } | ||
753 | |||
754 | ej_event->handle = handle; | ||
755 | ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; | ||
756 | acpi_os_hotplug_execute(acpi_bus_hot_remove_device, | ||
757 | (void *)ej_event); | ||
758 | |||
759 | /* eject is performed asynchronously */ | ||
760 | return; | ||
745 | 761 | ||
746 | default: | 762 | default: |
747 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 763 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
@@ -841,7 +857,7 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) | |||
841 | * and do it when the CPU gets online the first time | 857 | * and do it when the CPU gets online the first time |
842 | * TBD: Cleanup above functions and try to do this more elegant. | 858 | * TBD: Cleanup above functions and try to do this more elegant. |
843 | */ | 859 | */ |
844 | printk(KERN_INFO "CPU %d got hotplugged\n", pr->id); | 860 | pr_info("CPU %d got hotplugged\n", pr->id); |
845 | pr->flags.need_hotplug_init = 1; | 861 | pr->flags.need_hotplug_init = 1; |
846 | 862 | ||
847 | return AE_OK; | 863 | return AE_OK; |
@@ -852,8 +868,22 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr) | |||
852 | if (cpu_online(pr->id)) | 868 | if (cpu_online(pr->id)) |
853 | cpu_down(pr->id); | 869 | cpu_down(pr->id); |
854 | 870 | ||
871 | get_online_cpus(); | ||
872 | /* | ||
873 | * The cpu might become online again at this point. So we check whether | ||
874 | * the cpu has been onlined or not. If the cpu became online, it means | ||
875 | * that someone wants to use the cpu. So acpi_processor_handle_eject() | ||
876 | * returns -EAGAIN. | ||
877 | */ | ||
878 | if (unlikely(cpu_online(pr->id))) { | ||
879 | put_online_cpus(); | ||
880 | pr_warn("Failed to remove CPU %d, because other task " | ||
881 | "brought the CPU back online\n", pr->id); | ||
882 | return -EAGAIN; | ||
883 | } | ||
855 | arch_unregister_cpu(pr->id); | 884 | arch_unregister_cpu(pr->id); |
856 | acpi_unmap_lsapic(pr->id); | 885 | acpi_unmap_lsapic(pr->id); |
886 | put_online_cpus(); | ||
857 | return (0); | 887 | return (0); |
858 | } | 888 | } |
859 | #else | 889 | #else |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e8086c725305..f1a5da44591d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | 735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
736 | struct cpuidle_driver *drv, int index) | 736 | struct cpuidle_driver *drv, int index) |
737 | { | 737 | { |
738 | ktime_t kt1, kt2; | ||
739 | s64 idle_time; | ||
740 | struct acpi_processor *pr; | 738 | struct acpi_processor *pr; |
741 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 739 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
742 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 740 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
743 | 741 | ||
744 | pr = __this_cpu_read(processors); | 742 | pr = __this_cpu_read(processors); |
745 | dev->last_residency = 0; | ||
746 | 743 | ||
747 | if (unlikely(!pr)) | 744 | if (unlikely(!pr)) |
748 | return -EINVAL; | 745 | return -EINVAL; |
749 | 746 | ||
750 | local_irq_disable(); | ||
751 | |||
752 | |||
753 | lapic_timer_state_broadcast(pr, cx, 1); | 747 | lapic_timer_state_broadcast(pr, cx, 1); |
754 | kt1 = ktime_get_real(); | ||
755 | acpi_idle_do_entry(cx); | 748 | acpi_idle_do_entry(cx); |
756 | kt2 = ktime_get_real(); | ||
757 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | ||
758 | |||
759 | /* Update device last_residency*/ | ||
760 | dev->last_residency = (int)idle_time; | ||
761 | 749 | ||
762 | local_irq_enable(); | ||
763 | lapic_timer_state_broadcast(pr, cx, 0); | 750 | lapic_timer_state_broadcast(pr, cx, 0); |
764 | 751 | ||
765 | return index; | 752 | return index; |
@@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
806 | struct acpi_processor *pr; | 793 | struct acpi_processor *pr; |
807 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 794 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
808 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 795 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
809 | ktime_t kt1, kt2; | ||
810 | s64 idle_time_ns; | ||
811 | s64 idle_time; | ||
812 | 796 | ||
813 | pr = __this_cpu_read(processors); | 797 | pr = __this_cpu_read(processors); |
814 | dev->last_residency = 0; | ||
815 | 798 | ||
816 | if (unlikely(!pr)) | 799 | if (unlikely(!pr)) |
817 | return -EINVAL; | 800 | return -EINVAL; |
818 | 801 | ||
819 | local_irq_disable(); | ||
820 | |||
821 | |||
822 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 802 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
823 | current_thread_info()->status &= ~TS_POLLING; | 803 | current_thread_info()->status &= ~TS_POLLING; |
824 | /* | 804 | /* |
@@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
829 | 809 | ||
830 | if (unlikely(need_resched())) { | 810 | if (unlikely(need_resched())) { |
831 | current_thread_info()->status |= TS_POLLING; | 811 | current_thread_info()->status |= TS_POLLING; |
832 | local_irq_enable(); | ||
833 | return -EINVAL; | 812 | return -EINVAL; |
834 | } | 813 | } |
835 | } | 814 | } |
@@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
843 | if (cx->type == ACPI_STATE_C3) | 822 | if (cx->type == ACPI_STATE_C3) |
844 | ACPI_FLUSH_CPU_CACHE(); | 823 | ACPI_FLUSH_CPU_CACHE(); |
845 | 824 | ||
846 | kt1 = ktime_get_real(); | ||
847 | /* Tell the scheduler that we are going deep-idle: */ | 825 | /* Tell the scheduler that we are going deep-idle: */ |
848 | sched_clock_idle_sleep_event(); | 826 | sched_clock_idle_sleep_event(); |
849 | acpi_idle_do_entry(cx); | 827 | acpi_idle_do_entry(cx); |
850 | kt2 = ktime_get_real(); | ||
851 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
852 | idle_time = idle_time_ns; | ||
853 | do_div(idle_time, NSEC_PER_USEC); | ||
854 | 828 | ||
855 | /* Update device last_residency*/ | 829 | sched_clock_idle_wakeup_event(0); |
856 | dev->last_residency = (int)idle_time; | ||
857 | 830 | ||
858 | /* Tell the scheduler how much we idled: */ | ||
859 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
860 | |||
861 | local_irq_enable(); | ||
862 | if (cx->entry_method != ACPI_CSTATE_FFH) | 831 | if (cx->entry_method != ACPI_CSTATE_FFH) |
863 | current_thread_info()->status |= TS_POLLING; | 832 | current_thread_info()->status |= TS_POLLING; |
864 | 833 | ||
@@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
883 | struct acpi_processor *pr; | 852 | struct acpi_processor *pr; |
884 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 853 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
885 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 854 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
886 | ktime_t kt1, kt2; | ||
887 | s64 idle_time_ns; | ||
888 | s64 idle_time; | ||
889 | |||
890 | 855 | ||
891 | pr = __this_cpu_read(processors); | 856 | pr = __this_cpu_read(processors); |
892 | dev->last_residency = 0; | ||
893 | 857 | ||
894 | if (unlikely(!pr)) | 858 | if (unlikely(!pr)) |
895 | return -EINVAL; | 859 | return -EINVAL; |
@@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
899 | return drv->states[drv->safe_state_index].enter(dev, | 863 | return drv->states[drv->safe_state_index].enter(dev, |
900 | drv, drv->safe_state_index); | 864 | drv, drv->safe_state_index); |
901 | } else { | 865 | } else { |
902 | local_irq_disable(); | ||
903 | acpi_safe_halt(); | 866 | acpi_safe_halt(); |
904 | local_irq_enable(); | ||
905 | return -EBUSY; | 867 | return -EBUSY; |
906 | } | 868 | } |
907 | } | 869 | } |
908 | 870 | ||
909 | local_irq_disable(); | ||
910 | |||
911 | |||
912 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 871 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
913 | current_thread_info()->status &= ~TS_POLLING; | 872 | current_thread_info()->status &= ~TS_POLLING; |
914 | /* | 873 | /* |
@@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
919 | 878 | ||
920 | if (unlikely(need_resched())) { | 879 | if (unlikely(need_resched())) { |
921 | current_thread_info()->status |= TS_POLLING; | 880 | current_thread_info()->status |= TS_POLLING; |
922 | local_irq_enable(); | ||
923 | return -EINVAL; | 881 | return -EINVAL; |
924 | } | 882 | } |
925 | } | 883 | } |
@@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
934 | */ | 892 | */ |
935 | lapic_timer_state_broadcast(pr, cx, 1); | 893 | lapic_timer_state_broadcast(pr, cx, 1); |
936 | 894 | ||
937 | kt1 = ktime_get_real(); | ||
938 | /* | 895 | /* |
939 | * disable bus master | 896 | * disable bus master |
940 | * bm_check implies we need ARB_DIS | 897 | * bm_check implies we need ARB_DIS |
@@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
965 | c3_cpu_count--; | 922 | c3_cpu_count--; |
966 | raw_spin_unlock(&c3_lock); | 923 | raw_spin_unlock(&c3_lock); |
967 | } | 924 | } |
968 | kt2 = ktime_get_real(); | ||
969 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
970 | idle_time = idle_time_ns; | ||
971 | do_div(idle_time, NSEC_PER_USEC); | ||
972 | |||
973 | /* Update device last_residency*/ | ||
974 | dev->last_residency = (int)idle_time; | ||
975 | 925 | ||
976 | /* Tell the scheduler how much we idled: */ | 926 | sched_clock_idle_wakeup_event(0); |
977 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
978 | 927 | ||
979 | local_irq_enable(); | ||
980 | if (cx->entry_method != ACPI_CSTATE_FFH) | 928 | if (cx->entry_method != ACPI_CSTATE_FFH) |
981 | current_thread_info()->status |= TS_POLLING; | 929 | current_thread_info()->status |= TS_POLLING; |
982 | 930 | ||
@@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
987 | struct cpuidle_driver acpi_idle_driver = { | 935 | struct cpuidle_driver acpi_idle_driver = { |
988 | .name = "acpi_idle", | 936 | .name = "acpi_idle", |
989 | .owner = THIS_MODULE, | 937 | .owner = THIS_MODULE, |
938 | .en_core_tk_irqen = 1, | ||
990 | }; | 939 | }; |
991 | 940 | ||
992 | /** | 941 | /** |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c new file mode 100644 index 000000000000..a3868f6c222a --- /dev/null +++ b/drivers/acpi/resource.c | |||
@@ -0,0 +1,526 @@ | |||
1 | /* | ||
2 | * drivers/acpi/resource.c - ACPI device resources interpretation. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corp. | ||
5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | */ | ||
24 | |||
25 | #include <linux/acpi.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/export.h> | ||
28 | #include <linux/ioport.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #ifdef CONFIG_X86 | ||
32 | #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) | ||
33 | #else | ||
34 | #define valid_IRQ(i) (true) | ||
35 | #endif | ||
36 | |||
37 | static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect, | ||
38 | bool window) | ||
39 | { | ||
40 | unsigned long flags = IORESOURCE_MEM; | ||
41 | |||
42 | if (len == 0) | ||
43 | flags |= IORESOURCE_DISABLED; | ||
44 | |||
45 | if (write_protect == ACPI_READ_WRITE_MEMORY) | ||
46 | flags |= IORESOURCE_MEM_WRITEABLE; | ||
47 | |||
48 | if (window) | ||
49 | flags |= IORESOURCE_WINDOW; | ||
50 | |||
51 | return flags; | ||
52 | } | ||
53 | |||
54 | static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, | ||
55 | u8 write_protect) | ||
56 | { | ||
57 | res->start = start; | ||
58 | res->end = start + len - 1; | ||
59 | res->flags = acpi_dev_memresource_flags(len, write_protect, false); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * acpi_dev_resource_memory - Extract ACPI memory resource information. | ||
64 | * @ares: Input ACPI resource object. | ||
65 | * @res: Output generic resource object. | ||
66 | * | ||
67 | * Check if the given ACPI resource object represents a memory resource and | ||
68 | * if that's the case, use the information in it to populate the generic | ||
69 | * resource object pointed to by @res. | ||
70 | */ | ||
71 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) | ||
72 | { | ||
73 | struct acpi_resource_memory24 *memory24; | ||
74 | struct acpi_resource_memory32 *memory32; | ||
75 | struct acpi_resource_fixed_memory32 *fixed_memory32; | ||
76 | |||
77 | switch (ares->type) { | ||
78 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
79 | memory24 = &ares->data.memory24; | ||
80 | acpi_dev_get_memresource(res, memory24->minimum, | ||
81 | memory24->address_length, | ||
82 | memory24->write_protect); | ||
83 | break; | ||
84 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
85 | memory32 = &ares->data.memory32; | ||
86 | acpi_dev_get_memresource(res, memory32->minimum, | ||
87 | memory32->address_length, | ||
88 | memory32->write_protect); | ||
89 | break; | ||
90 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
91 | fixed_memory32 = &ares->data.fixed_memory32; | ||
92 | acpi_dev_get_memresource(res, fixed_memory32->address, | ||
93 | fixed_memory32->address_length, | ||
94 | fixed_memory32->write_protect); | ||
95 | break; | ||
96 | default: | ||
97 | return false; | ||
98 | } | ||
99 | return true; | ||
100 | } | ||
101 | EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); | ||
102 | |||
103 | static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode, | ||
104 | bool window) | ||
105 | { | ||
106 | int flags = IORESOURCE_IO; | ||
107 | |||
108 | if (io_decode == ACPI_DECODE_16) | ||
109 | flags |= IORESOURCE_IO_16BIT_ADDR; | ||
110 | |||
111 | if (start > end || end >= 0x10003) | ||
112 | flags |= IORESOURCE_DISABLED; | ||
113 | |||
114 | if (window) | ||
115 | flags |= IORESOURCE_WINDOW; | ||
116 | |||
117 | return flags; | ||
118 | } | ||
119 | |||
120 | static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, | ||
121 | u8 io_decode) | ||
122 | { | ||
123 | u64 end = start + len - 1; | ||
124 | |||
125 | res->start = start; | ||
126 | res->end = end; | ||
127 | res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * acpi_dev_resource_io - Extract ACPI I/O resource information. | ||
132 | * @ares: Input ACPI resource object. | ||
133 | * @res: Output generic resource object. | ||
134 | * | ||
135 | * Check if the given ACPI resource object represents an I/O resource and | ||
136 | * if that's the case, use the information in it to populate the generic | ||
137 | * resource object pointed to by @res. | ||
138 | */ | ||
139 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) | ||
140 | { | ||
141 | struct acpi_resource_io *io; | ||
142 | struct acpi_resource_fixed_io *fixed_io; | ||
143 | |||
144 | switch (ares->type) { | ||
145 | case ACPI_RESOURCE_TYPE_IO: | ||
146 | io = &ares->data.io; | ||
147 | acpi_dev_get_ioresource(res, io->minimum, | ||
148 | io->address_length, | ||
149 | io->io_decode); | ||
150 | break; | ||
151 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
152 | fixed_io = &ares->data.fixed_io; | ||
153 | acpi_dev_get_ioresource(res, fixed_io->address, | ||
154 | fixed_io->address_length, | ||
155 | ACPI_DECODE_10); | ||
156 | break; | ||
157 | default: | ||
158 | return false; | ||
159 | } | ||
160 | return true; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(acpi_dev_resource_io); | ||
163 | |||
164 | /** | ||
165 | * acpi_dev_resource_address_space - Extract ACPI address space information. | ||
166 | * @ares: Input ACPI resource object. | ||
167 | * @res: Output generic resource object. | ||
168 | * | ||
169 | * Check if the given ACPI resource object represents an address space resource | ||
170 | * and if that's the case, use the information in it to populate the generic | ||
171 | * resource object pointed to by @res. | ||
172 | */ | ||
173 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
174 | struct resource *res) | ||
175 | { | ||
176 | acpi_status status; | ||
177 | struct acpi_resource_address64 addr; | ||
178 | bool window; | ||
179 | u64 len; | ||
180 | u8 io_decode; | ||
181 | |||
182 | switch (ares->type) { | ||
183 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
184 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
185 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
186 | break; | ||
187 | default: | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | status = acpi_resource_to_address64(ares, &addr); | ||
192 | if (ACPI_FAILURE(status)) | ||
193 | return true; | ||
194 | |||
195 | res->start = addr.minimum; | ||
196 | res->end = addr.maximum; | ||
197 | window = addr.producer_consumer == ACPI_PRODUCER; | ||
198 | |||
199 | switch(addr.resource_type) { | ||
200 | case ACPI_MEMORY_RANGE: | ||
201 | len = addr.maximum - addr.minimum + 1; | ||
202 | res->flags = acpi_dev_memresource_flags(len, | ||
203 | addr.info.mem.write_protect, | ||
204 | window); | ||
205 | break; | ||
206 | case ACPI_IO_RANGE: | ||
207 | io_decode = addr.granularity == 0xfff ? | ||
208 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
209 | res->flags = acpi_dev_ioresource_flags(addr.minimum, | ||
210 | addr.maximum, | ||
211 | io_decode, window); | ||
212 | break; | ||
213 | case ACPI_BUS_NUMBER_RANGE: | ||
214 | res->flags = IORESOURCE_BUS; | ||
215 | break; | ||
216 | default: | ||
217 | res->flags = 0; | ||
218 | } | ||
219 | |||
220 | return true; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); | ||
223 | |||
224 | /** | ||
225 | * acpi_dev_resource_ext_address_space - Extract ACPI address space information. | ||
226 | * @ares: Input ACPI resource object. | ||
227 | * @res: Output generic resource object. | ||
228 | * | ||
229 | * Check if the given ACPI resource object represents an extended address space | ||
230 | * resource and if that's the case, use the information in it to populate the | ||
231 | * generic resource object pointed to by @res. | ||
232 | */ | ||
233 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | ||
234 | struct resource *res) | ||
235 | { | ||
236 | struct acpi_resource_extended_address64 *ext_addr; | ||
237 | bool window; | ||
238 | u64 len; | ||
239 | u8 io_decode; | ||
240 | |||
241 | if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) | ||
242 | return false; | ||
243 | |||
244 | ext_addr = &ares->data.ext_address64; | ||
245 | |||
246 | res->start = ext_addr->minimum; | ||
247 | res->end = ext_addr->maximum; | ||
248 | window = ext_addr->producer_consumer == ACPI_PRODUCER; | ||
249 | |||
250 | switch(ext_addr->resource_type) { | ||
251 | case ACPI_MEMORY_RANGE: | ||
252 | len = ext_addr->maximum - ext_addr->minimum + 1; | ||
253 | res->flags = acpi_dev_memresource_flags(len, | ||
254 | ext_addr->info.mem.write_protect, | ||
255 | window); | ||
256 | break; | ||
257 | case ACPI_IO_RANGE: | ||
258 | io_decode = ext_addr->granularity == 0xfff ? | ||
259 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
260 | res->flags = acpi_dev_ioresource_flags(ext_addr->minimum, | ||
261 | ext_addr->maximum, | ||
262 | io_decode, window); | ||
263 | break; | ||
264 | case ACPI_BUS_NUMBER_RANGE: | ||
265 | res->flags = IORESOURCE_BUS; | ||
266 | break; | ||
267 | default: | ||
268 | res->flags = 0; | ||
269 | } | ||
270 | |||
271 | return true; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); | ||
274 | |||
275 | /** | ||
276 | * acpi_dev_irq_flags - Determine IRQ resource flags. | ||
277 | * @triggering: Triggering type as provided by ACPI. | ||
278 | * @polarity: Interrupt polarity as provided by ACPI. | ||
279 | * @shareable: Whether or not the interrupt is shareable. | ||
280 | */ | ||
281 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | |||
285 | if (triggering == ACPI_LEVEL_SENSITIVE) | ||
286 | flags = polarity == ACPI_ACTIVE_LOW ? | ||
287 | IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL; | ||
288 | else | ||
289 | flags = polarity == ACPI_ACTIVE_LOW ? | ||
290 | IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE; | ||
291 | |||
292 | if (shareable == ACPI_SHARED) | ||
293 | flags |= IORESOURCE_IRQ_SHAREABLE; | ||
294 | |||
295 | return flags | IORESOURCE_IRQ; | ||
296 | } | ||
297 | EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); | ||
298 | |||
299 | static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) | ||
300 | { | ||
301 | res->start = gsi; | ||
302 | res->end = gsi; | ||
303 | res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED; | ||
304 | } | ||
305 | |||
306 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | ||
307 | u8 triggering, u8 polarity, u8 shareable) | ||
308 | { | ||
309 | int irq, p, t; | ||
310 | |||
311 | if (!valid_IRQ(gsi)) { | ||
312 | acpi_dev_irqresource_disabled(res, gsi); | ||
313 | return; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * In IO-APIC mode, use overrided attribute. Two reasons: | ||
318 | * 1. BIOS bug in DSDT | ||
319 | * 2. BIOS uses IO-APIC mode Interrupt Source Override | ||
320 | */ | ||
321 | if (!acpi_get_override_irq(gsi, &t, &p)) { | ||
322 | u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
323 | u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
324 | |||
325 | if (triggering != trig || polarity != pol) { | ||
326 | pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, | ||
327 | t ? "edge" : "level", p ? "low" : "high"); | ||
328 | triggering = trig; | ||
329 | polarity = pol; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | res->flags = acpi_dev_irq_flags(triggering, polarity, shareable); | ||
334 | irq = acpi_register_gsi(NULL, gsi, triggering, polarity); | ||
335 | if (irq >= 0) { | ||
336 | res->start = irq; | ||
337 | res->end = irq; | ||
338 | } else { | ||
339 | acpi_dev_irqresource_disabled(res, gsi); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information. | ||
345 | * @ares: Input ACPI resource object. | ||
346 | * @index: Index into the array of GSIs represented by the resource. | ||
347 | * @res: Output generic resource object. | ||
348 | * | ||
349 | * Check if the given ACPI resource object represents an interrupt resource | ||
350 | * and @index does not exceed the resource's interrupt count (true is returned | ||
351 | * in that case regardless of the results of the other checks)). If that's the | ||
352 | * case, register the GSI corresponding to @index from the array of interrupts | ||
353 | * represented by the resource and populate the generic resource object pointed | ||
354 | * to by @res accordingly. If the registration of the GSI is not successful, | ||
355 | * IORESOURCE_DISABLED will be set it that object's flags. | ||
356 | */ | ||
357 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | ||
358 | struct resource *res) | ||
359 | { | ||
360 | struct acpi_resource_irq *irq; | ||
361 | struct acpi_resource_extended_irq *ext_irq; | ||
362 | |||
363 | switch (ares->type) { | ||
364 | case ACPI_RESOURCE_TYPE_IRQ: | ||
365 | /* | ||
366 | * Per spec, only one interrupt per descriptor is allowed in | ||
367 | * _CRS, but some firmware violates this, so parse them all. | ||
368 | */ | ||
369 | irq = &ares->data.irq; | ||
370 | if (index >= irq->interrupt_count) { | ||
371 | acpi_dev_irqresource_disabled(res, 0); | ||
372 | return false; | ||
373 | } | ||
374 | acpi_dev_get_irqresource(res, irq->interrupts[index], | ||
375 | irq->triggering, irq->polarity, | ||
376 | irq->sharable); | ||
377 | break; | ||
378 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | ||
379 | ext_irq = &ares->data.extended_irq; | ||
380 | if (index >= ext_irq->interrupt_count) { | ||
381 | acpi_dev_irqresource_disabled(res, 0); | ||
382 | return false; | ||
383 | } | ||
384 | acpi_dev_get_irqresource(res, ext_irq->interrupts[index], | ||
385 | ext_irq->triggering, ext_irq->polarity, | ||
386 | ext_irq->sharable); | ||
387 | break; | ||
388 | default: | ||
389 | return false; | ||
390 | } | ||
391 | |||
392 | return true; | ||
393 | } | ||
394 | EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); | ||
395 | |||
396 | /** | ||
397 | * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources(). | ||
398 | * @list: The head of the resource list to free. | ||
399 | */ | ||
400 | void acpi_dev_free_resource_list(struct list_head *list) | ||
401 | { | ||
402 | struct resource_list_entry *rentry, *re; | ||
403 | |||
404 | list_for_each_entry_safe(rentry, re, list, node) { | ||
405 | list_del(&rentry->node); | ||
406 | kfree(rentry); | ||
407 | } | ||
408 | } | ||
409 | EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); | ||
410 | |||
411 | struct res_proc_context { | ||
412 | struct list_head *list; | ||
413 | int (*preproc)(struct acpi_resource *, void *); | ||
414 | void *preproc_data; | ||
415 | int count; | ||
416 | int error; | ||
417 | }; | ||
418 | |||
419 | static acpi_status acpi_dev_new_resource_entry(struct resource *r, | ||
420 | struct res_proc_context *c) | ||
421 | { | ||
422 | struct resource_list_entry *rentry; | ||
423 | |||
424 | rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); | ||
425 | if (!rentry) { | ||
426 | c->error = -ENOMEM; | ||
427 | return AE_NO_MEMORY; | ||
428 | } | ||
429 | rentry->res = *r; | ||
430 | list_add_tail(&rentry->node, c->list); | ||
431 | c->count++; | ||
432 | return AE_OK; | ||
433 | } | ||
434 | |||
435 | static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, | ||
436 | void *context) | ||
437 | { | ||
438 | struct res_proc_context *c = context; | ||
439 | struct resource r; | ||
440 | int i; | ||
441 | |||
442 | if (c->preproc) { | ||
443 | int ret; | ||
444 | |||
445 | ret = c->preproc(ares, c->preproc_data); | ||
446 | if (ret < 0) { | ||
447 | c->error = ret; | ||
448 | return AE_CTRL_TERMINATE; | ||
449 | } else if (ret > 0) { | ||
450 | return AE_OK; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | memset(&r, 0, sizeof(r)); | ||
455 | |||
456 | if (acpi_dev_resource_memory(ares, &r) | ||
457 | || acpi_dev_resource_io(ares, &r) | ||
458 | || acpi_dev_resource_address_space(ares, &r) | ||
459 | || acpi_dev_resource_ext_address_space(ares, &r)) | ||
460 | return acpi_dev_new_resource_entry(&r, c); | ||
461 | |||
462 | for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) { | ||
463 | acpi_status status; | ||
464 | |||
465 | status = acpi_dev_new_resource_entry(&r, c); | ||
466 | if (ACPI_FAILURE(status)) | ||
467 | return status; | ||
468 | } | ||
469 | |||
470 | return AE_OK; | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * acpi_dev_get_resources - Get current resources of a device. | ||
475 | * @adev: ACPI device node to get the resources for. | ||
476 | * @list: Head of the resultant list of resources (must be empty). | ||
477 | * @preproc: The caller's preprocessing routine. | ||
478 | * @preproc_data: Pointer passed to the caller's preprocessing routine. | ||
479 | * | ||
480 | * Evaluate the _CRS method for the given device node and process its output by | ||
481 | * (1) executing the @preproc() rountine provided by the caller, passing the | ||
482 | * resource pointer and @preproc_data to it as arguments, for each ACPI resource | ||
483 | * returned and (2) converting all of the returned ACPI resources into struct | ||
484 | * resource objects if possible. If the return value of @preproc() in step (1) | ||
485 | * is different from 0, step (2) is not applied to the given ACPI resource and | ||
486 | * if that value is negative, the whole processing is aborted and that value is | ||
487 | * returned as the final error code. | ||
488 | * | ||
489 | * The resultant struct resource objects are put on the list pointed to by | ||
490 | * @list, that must be empty initially, as members of struct resource_list_entry | ||
491 | * objects. Callers of this routine should use %acpi_dev_free_resource_list() to | ||
492 | * free that list. | ||
493 | * | ||
494 | * The number of resources in the output list is returned on success, an error | ||
495 | * code reflecting the error condition is returned otherwise. | ||
496 | */ | ||
497 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | ||
498 | int (*preproc)(struct acpi_resource *, void *), | ||
499 | void *preproc_data) | ||
500 | { | ||
501 | struct res_proc_context c; | ||
502 | acpi_handle not_used; | ||
503 | acpi_status status; | ||
504 | |||
505 | if (!adev || !adev->handle || !list_empty(list)) | ||
506 | return -EINVAL; | ||
507 | |||
508 | status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, ¬_used); | ||
509 | if (ACPI_FAILURE(status)) | ||
510 | return 0; | ||
511 | |||
512 | c.list = list; | ||
513 | c.preproc = preproc; | ||
514 | c.preproc_data = preproc_data; | ||
515 | c.count = 0; | ||
516 | c.error = 0; | ||
517 | status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS, | ||
518 | acpi_dev_process_resource, &c); | ||
519 | if (ACPI_FAILURE(status)) { | ||
520 | acpi_dev_free_resource_list(list); | ||
521 | return c.error ? c.error : -EIO; | ||
522 | } | ||
523 | |||
524 | return c.count; | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(acpi_dev_get_resources); | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 1fcb8678665c..53502d1bbf26 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -29,6 +29,27 @@ extern struct acpi_device *acpi_root; | |||
29 | 29 | ||
30 | static const char *dummy_hid = "device"; | 30 | static const char *dummy_hid = "device"; |
31 | 31 | ||
32 | /* | ||
33 | * The following ACPI IDs are known to be suitable for representing as | ||
34 | * platform devices. | ||
35 | */ | ||
36 | static const struct acpi_device_id acpi_platform_device_ids[] = { | ||
37 | |||
38 | { "PNP0D40" }, | ||
39 | |||
40 | /* Haswell LPSS devices */ | ||
41 | { "INT33C0", 0 }, | ||
42 | { "INT33C1", 0 }, | ||
43 | { "INT33C2", 0 }, | ||
44 | { "INT33C3", 0 }, | ||
45 | { "INT33C4", 0 }, | ||
46 | { "INT33C5", 0 }, | ||
47 | { "INT33C6", 0 }, | ||
48 | { "INT33C7", 0 }, | ||
49 | |||
50 | { } | ||
51 | }; | ||
52 | |||
32 | static LIST_HEAD(acpi_device_list); | 53 | static LIST_HEAD(acpi_device_list); |
33 | static LIST_HEAD(acpi_bus_id_list); | 54 | static LIST_HEAD(acpi_bus_id_list); |
34 | DEFINE_MUTEX(acpi_device_lock); | 55 | DEFINE_MUTEX(acpi_device_lock); |
@@ -97,6 +118,7 @@ void acpi_bus_hot_remove_device(void *context) | |||
97 | struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context; | 118 | struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context; |
98 | struct acpi_device *device; | 119 | struct acpi_device *device; |
99 | acpi_handle handle = ej_event->handle; | 120 | acpi_handle handle = ej_event->handle; |
121 | acpi_handle temp; | ||
100 | struct acpi_object_list arg_list; | 122 | struct acpi_object_list arg_list; |
101 | union acpi_object arg; | 123 | union acpi_object arg; |
102 | acpi_status status = AE_OK; | 124 | acpi_status status = AE_OK; |
@@ -117,13 +139,16 @@ void acpi_bus_hot_remove_device(void *context) | |||
117 | goto err_out; | 139 | goto err_out; |
118 | } | 140 | } |
119 | 141 | ||
142 | /* device has been freed */ | ||
143 | device = NULL; | ||
144 | |||
120 | /* power off device */ | 145 | /* power off device */ |
121 | status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); | 146 | status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); |
122 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) | 147 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) |
123 | printk(KERN_WARNING PREFIX | 148 | printk(KERN_WARNING PREFIX |
124 | "Power-off device failed\n"); | 149 | "Power-off device failed\n"); |
125 | 150 | ||
126 | if (device->flags.lockable) { | 151 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) { |
127 | arg_list.count = 1; | 152 | arg_list.count = 1; |
128 | arg_list.pointer = &arg; | 153 | arg_list.pointer = &arg; |
129 | arg.type = ACPI_TYPE_INTEGER; | 154 | arg.type = ACPI_TYPE_INTEGER; |
@@ -157,6 +182,7 @@ err_out: | |||
157 | kfree(context); | 182 | kfree(context); |
158 | return; | 183 | return; |
159 | } | 184 | } |
185 | EXPORT_SYMBOL(acpi_bus_hot_remove_device); | ||
160 | 186 | ||
161 | static ssize_t | 187 | static ssize_t |
162 | acpi_eject_store(struct device *d, struct device_attribute *attr, | 188 | acpi_eject_store(struct device *d, struct device_attribute *attr, |
@@ -216,6 +242,25 @@ acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *bu | |||
216 | } | 242 | } |
217 | static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); | 243 | static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); |
218 | 244 | ||
245 | static ssize_t acpi_device_uid_show(struct device *dev, | ||
246 | struct device_attribute *attr, char *buf) | ||
247 | { | ||
248 | struct acpi_device *acpi_dev = to_acpi_device(dev); | ||
249 | |||
250 | return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); | ||
251 | } | ||
252 | static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); | ||
253 | |||
254 | static ssize_t acpi_device_adr_show(struct device *dev, | ||
255 | struct device_attribute *attr, char *buf) | ||
256 | { | ||
257 | struct acpi_device *acpi_dev = to_acpi_device(dev); | ||
258 | |||
259 | return sprintf(buf, "0x%08x\n", | ||
260 | (unsigned int)(acpi_dev->pnp.bus_address)); | ||
261 | } | ||
262 | static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); | ||
263 | |||
219 | static ssize_t | 264 | static ssize_t |
220 | acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { | 265 | acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { |
221 | struct acpi_device *acpi_dev = to_acpi_device(dev); | 266 | struct acpi_device *acpi_dev = to_acpi_device(dev); |
@@ -259,11 +304,21 @@ static ssize_t description_show(struct device *dev, | |||
259 | } | 304 | } |
260 | static DEVICE_ATTR(description, 0444, description_show, NULL); | 305 | static DEVICE_ATTR(description, 0444, description_show, NULL); |
261 | 306 | ||
307 | static ssize_t | ||
308 | acpi_device_sun_show(struct device *dev, struct device_attribute *attr, | ||
309 | char *buf) { | ||
310 | struct acpi_device *acpi_dev = to_acpi_device(dev); | ||
311 | |||
312 | return sprintf(buf, "%lu\n", acpi_dev->pnp.sun); | ||
313 | } | ||
314 | static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); | ||
315 | |||
262 | static int acpi_device_setup_files(struct acpi_device *dev) | 316 | static int acpi_device_setup_files(struct acpi_device *dev) |
263 | { | 317 | { |
264 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 318 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
265 | acpi_status status; | 319 | acpi_status status; |
266 | acpi_handle temp; | 320 | acpi_handle temp; |
321 | unsigned long long sun; | ||
267 | int result = 0; | 322 | int result = 0; |
268 | 323 | ||
269 | /* | 324 | /* |
@@ -300,6 +355,21 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
300 | goto end; | 355 | goto end; |
301 | } | 356 | } |
302 | 357 | ||
358 | if (dev->flags.bus_address) | ||
359 | result = device_create_file(&dev->dev, &dev_attr_adr); | ||
360 | if (dev->pnp.unique_id) | ||
361 | result = device_create_file(&dev->dev, &dev_attr_uid); | ||
362 | |||
363 | status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun); | ||
364 | if (ACPI_SUCCESS(status)) { | ||
365 | dev->pnp.sun = (unsigned long)sun; | ||
366 | result = device_create_file(&dev->dev, &dev_attr_sun); | ||
367 | if (result) | ||
368 | goto end; | ||
369 | } else { | ||
370 | dev->pnp.sun = (unsigned long)-1; | ||
371 | } | ||
372 | |||
303 | /* | 373 | /* |
304 | * If device has _EJ0, 'eject' file is created that is used to trigger | 374 | * If device has _EJ0, 'eject' file is created that is used to trigger |
305 | * hot-removal function from userland. | 375 | * hot-removal function from userland. |
@@ -331,6 +401,14 @@ static void acpi_device_remove_files(struct acpi_device *dev) | |||
331 | if (ACPI_SUCCESS(status)) | 401 | if (ACPI_SUCCESS(status)) |
332 | device_remove_file(&dev->dev, &dev_attr_eject); | 402 | device_remove_file(&dev->dev, &dev_attr_eject); |
333 | 403 | ||
404 | status = acpi_get_handle(dev->handle, "_SUN", &temp); | ||
405 | if (ACPI_SUCCESS(status)) | ||
406 | device_remove_file(&dev->dev, &dev_attr_sun); | ||
407 | |||
408 | if (dev->pnp.unique_id) | ||
409 | device_remove_file(&dev->dev, &dev_attr_uid); | ||
410 | if (dev->flags.bus_address) | ||
411 | device_remove_file(&dev->dev, &dev_attr_adr); | ||
334 | device_remove_file(&dev->dev, &dev_attr_modalias); | 412 | device_remove_file(&dev->dev, &dev_attr_modalias); |
335 | device_remove_file(&dev->dev, &dev_attr_hid); | 413 | device_remove_file(&dev->dev, &dev_attr_hid); |
336 | if (dev->handle) | 414 | if (dev->handle) |
@@ -340,8 +418,8 @@ static void acpi_device_remove_files(struct acpi_device *dev) | |||
340 | ACPI Bus operations | 418 | ACPI Bus operations |
341 | -------------------------------------------------------------------------- */ | 419 | -------------------------------------------------------------------------- */ |
342 | 420 | ||
343 | int acpi_match_device_ids(struct acpi_device *device, | 421 | static const struct acpi_device_id *__acpi_match_device( |
344 | const struct acpi_device_id *ids) | 422 | struct acpi_device *device, const struct acpi_device_id *ids) |
345 | { | 423 | { |
346 | const struct acpi_device_id *id; | 424 | const struct acpi_device_id *id; |
347 | struct acpi_hardware_id *hwid; | 425 | struct acpi_hardware_id *hwid; |
@@ -351,14 +429,44 @@ int acpi_match_device_ids(struct acpi_device *device, | |||
351 | * driver for it. | 429 | * driver for it. |
352 | */ | 430 | */ |
353 | if (!device->status.present) | 431 | if (!device->status.present) |
354 | return -ENODEV; | 432 | return NULL; |
355 | 433 | ||
356 | for (id = ids; id->id[0]; id++) | 434 | for (id = ids; id->id[0]; id++) |
357 | list_for_each_entry(hwid, &device->pnp.ids, list) | 435 | list_for_each_entry(hwid, &device->pnp.ids, list) |
358 | if (!strcmp((char *) id->id, hwid->id)) | 436 | if (!strcmp((char *) id->id, hwid->id)) |
359 | return 0; | 437 | return id; |
360 | 438 | ||
361 | return -ENOENT; | 439 | return NULL; |
440 | } | ||
441 | |||
442 | /** | ||
443 | * acpi_match_device - Match a struct device against a given list of ACPI IDs | ||
444 | * @ids: Array of struct acpi_device_id object to match against. | ||
445 | * @dev: The device structure to match. | ||
446 | * | ||
447 | * Check if @dev has a valid ACPI handle and if there is a struct acpi_device | ||
448 | * object for that handle and use that object to match against a given list of | ||
449 | * device IDs. | ||
450 | * | ||
451 | * Return a pointer to the first matching ID on success or %NULL on failure. | ||
452 | */ | ||
453 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | ||
454 | const struct device *dev) | ||
455 | { | ||
456 | struct acpi_device *adev; | ||
457 | |||
458 | if (!ids || !ACPI_HANDLE(dev) | ||
459 | || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev))) | ||
460 | return NULL; | ||
461 | |||
462 | return __acpi_match_device(adev, ids); | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(acpi_match_device); | ||
465 | |||
466 | int acpi_match_device_ids(struct acpi_device *device, | ||
467 | const struct acpi_device_id *ids) | ||
468 | { | ||
469 | return __acpi_match_device(device, ids) ? 0 : -ENOENT; | ||
362 | } | 470 | } |
363 | EXPORT_SYMBOL(acpi_match_device_ids); | 471 | EXPORT_SYMBOL(acpi_match_device_ids); |
364 | 472 | ||
@@ -377,6 +485,7 @@ static void acpi_device_release(struct device *dev) | |||
377 | struct acpi_device *acpi_dev = to_acpi_device(dev); | 485 | struct acpi_device *acpi_dev = to_acpi_device(dev); |
378 | 486 | ||
379 | acpi_free_ids(acpi_dev); | 487 | acpi_free_ids(acpi_dev); |
488 | kfree(acpi_dev->pnp.unique_id); | ||
380 | kfree(acpi_dev); | 489 | kfree(acpi_dev); |
381 | } | 490 | } |
382 | 491 | ||
@@ -859,8 +968,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, | |||
859 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | 968 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) |
860 | { | 969 | { |
861 | struct acpi_device_id button_device_ids[] = { | 970 | struct acpi_device_id button_device_ids[] = { |
862 | {"PNP0C0D", 0}, | ||
863 | {"PNP0C0C", 0}, | 971 | {"PNP0C0C", 0}, |
972 | {"PNP0C0D", 0}, | ||
864 | {"PNP0C0E", 0}, | 973 | {"PNP0C0E", 0}, |
865 | {"", 0}, | 974 | {"", 0}, |
866 | }; | 975 | }; |
@@ -872,6 +981,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) | |||
872 | /* Power button, Lid switch always enable wakeup */ | 981 | /* Power button, Lid switch always enable wakeup */ |
873 | if (!acpi_match_device_ids(device, button_device_ids)) { | 982 | if (!acpi_match_device_ids(device, button_device_ids)) { |
874 | device->wakeup.flags.run_wake = 1; | 983 | device->wakeup.flags.run_wake = 1; |
984 | if (!acpi_match_device_ids(device, &button_device_ids[1])) { | ||
985 | /* Do not use Lid/sleep button for S5 wakeup */ | ||
986 | if (device->wakeup.sleep_state == ACPI_STATE_S5) | ||
987 | device->wakeup.sleep_state = ACPI_STATE_S4; | ||
988 | } | ||
875 | device_set_wakeup_capable(&device->dev, true); | 989 | device_set_wakeup_capable(&device->dev, true); |
876 | return; | 990 | return; |
877 | } | 991 | } |
@@ -965,8 +1079,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
965 | * D3hot is only valid if _PR3 present. | 1079 | * D3hot is only valid if _PR3 present. |
966 | */ | 1080 | */ |
967 | if (ps->resources.count || | 1081 | if (ps->resources.count || |
968 | (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) | 1082 | (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) { |
969 | ps->flags.valid = 1; | 1083 | ps->flags.valid = 1; |
1084 | ps->flags.os_accessible = 1; | ||
1085 | } | ||
970 | 1086 | ||
971 | ps->power = -1; /* Unknown - driver assigned */ | 1087 | ps->power = -1; /* Unknown - driver assigned */ |
972 | ps->latency = -1; /* Unknown - driver assigned */ | 1088 | ps->latency = -1; /* Unknown - driver assigned */ |
@@ -982,6 +1098,11 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
982 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) | 1098 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) |
983 | device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; | 1099 | device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; |
984 | 1100 | ||
1101 | /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ | ||
1102 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || | ||
1103 | device->power.flags.power_resources) | ||
1104 | device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; | ||
1105 | |||
985 | acpi_bus_init_power(device); | 1106 | acpi_bus_init_power(device); |
986 | 1107 | ||
987 | return 0; | 1108 | return 0; |
@@ -1013,11 +1134,6 @@ static int acpi_bus_get_flags(struct acpi_device *device) | |||
1013 | device->flags.ejectable = 1; | 1134 | device->flags.ejectable = 1; |
1014 | } | 1135 | } |
1015 | 1136 | ||
1016 | /* Presence of _LCK indicates 'lockable' */ | ||
1017 | status = acpi_get_handle(device->handle, "_LCK", &temp); | ||
1018 | if (ACPI_SUCCESS(status)) | ||
1019 | device->flags.lockable = 1; | ||
1020 | |||
1021 | /* Power resources cannot be power manageable. */ | 1137 | /* Power resources cannot be power manageable. */ |
1022 | if (device->device_type == ACPI_BUS_TYPE_POWER) | 1138 | if (device->device_type == ACPI_BUS_TYPE_POWER) |
1023 | return 0; | 1139 | return 0; |
@@ -1185,7 +1301,7 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
1185 | { | 1301 | { |
1186 | acpi_status status; | 1302 | acpi_status status; |
1187 | struct acpi_device_info *info; | 1303 | struct acpi_device_info *info; |
1188 | struct acpica_device_id_list *cid_list; | 1304 | struct acpi_pnp_device_id_list *cid_list; |
1189 | int i; | 1305 | int i; |
1190 | 1306 | ||
1191 | switch (device->device_type) { | 1307 | switch (device->device_type) { |
@@ -1212,6 +1328,9 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
1212 | device->pnp.bus_address = info->address; | 1328 | device->pnp.bus_address = info->address; |
1213 | device->flags.bus_address = 1; | 1329 | device->flags.bus_address = 1; |
1214 | } | 1330 | } |
1331 | if (info->valid & ACPI_VALID_UID) | ||
1332 | device->pnp.unique_id = kstrdup(info->unique_id.string, | ||
1333 | GFP_KERNEL); | ||
1215 | 1334 | ||
1216 | kfree(info); | 1335 | kfree(info); |
1217 | 1336 | ||
@@ -1483,8 +1602,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
1483 | */ | 1602 | */ |
1484 | device = NULL; | 1603 | device = NULL; |
1485 | acpi_bus_get_device(handle, &device); | 1604 | acpi_bus_get_device(handle, &device); |
1486 | if (ops->acpi_op_add && !device) | 1605 | if (ops->acpi_op_add && !device) { |
1487 | acpi_add_single_object(&device, handle, type, sta, ops); | 1606 | acpi_add_single_object(&device, handle, type, sta, ops); |
1607 | /* Is the device a known good platform device? */ | ||
1608 | if (device | ||
1609 | && !acpi_match_device_ids(device, acpi_platform_device_ids)) | ||
1610 | acpi_create_platform_device(device); | ||
1611 | } | ||
1488 | 1612 | ||
1489 | if (!device) | 1613 | if (!device) |
1490 | return AE_CTRL_DEPTH; | 1614 | return AE_CTRL_DEPTH; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index fdcdbb652915..2fcc67d34b11 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/reboot.h> | 18 | #include <linux/reboot.h> |
19 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/pm_runtime.h> | ||
22 | 21 | ||
23 | #include <asm/io.h> | 22 | #include <asm/io.h> |
24 | 23 | ||
@@ -81,6 +80,12 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
81 | 80 | ||
82 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
83 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
83 | |||
84 | u32 acpi_target_system_state(void) | ||
85 | { | ||
86 | return acpi_target_sleep_state; | ||
87 | } | ||
88 | |||
84 | static bool pwr_btn_event_pending; | 89 | static bool pwr_btn_event_pending; |
85 | 90 | ||
86 | /* | 91 | /* |
@@ -98,6 +103,21 @@ void __init acpi_nvs_nosave(void) | |||
98 | } | 103 | } |
99 | 104 | ||
100 | /* | 105 | /* |
106 | * The ACPI specification wants us to save NVS memory regions during hibernation | ||
107 | * but says nothing about saving NVS during S3. Not all versions of Windows | ||
108 | * save NVS on S3 suspend either, and it is clear that not all systems need | ||
109 | * NVS to be saved at S3 time. To improve suspend/resume time, allow the | ||
110 | * user to disable saving NVS on S3 if their system does not require it, but | ||
111 | * continue to save/restore NVS for S4 as specified. | ||
112 | */ | ||
113 | static bool nvs_nosave_s3; | ||
114 | |||
115 | void __init acpi_nvs_nosave_s3(void) | ||
116 | { | ||
117 | nvs_nosave_s3 = true; | ||
118 | } | ||
119 | |||
120 | /* | ||
101 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the | 121 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the |
102 | * user to request that behavior by using the 'acpi_old_suspend_ordering' | 122 | * user to request that behavior by using the 'acpi_old_suspend_ordering' |
103 | * kernel command line option that causes the following variable to be set. | 123 | * kernel command line option that causes the following variable to be set. |
@@ -109,6 +129,180 @@ void __init acpi_old_suspend_ordering(void) | |||
109 | old_suspend_ordering = true; | 129 | old_suspend_ordering = true; |
110 | } | 130 | } |
111 | 131 | ||
132 | static int __init init_old_suspend_ordering(const struct dmi_system_id *d) | ||
133 | { | ||
134 | acpi_old_suspend_ordering(); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int __init init_nvs_nosave(const struct dmi_system_id *d) | ||
139 | { | ||
140 | acpi_nvs_nosave(); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | ||
145 | { | ||
146 | .callback = init_old_suspend_ordering, | ||
147 | .ident = "Abit KN9 (nForce4 variant)", | ||
148 | .matches = { | ||
149 | DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), | ||
150 | DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), | ||
151 | }, | ||
152 | }, | ||
153 | { | ||
154 | .callback = init_old_suspend_ordering, | ||
155 | .ident = "HP xw4600 Workstation", | ||
156 | .matches = { | ||
157 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
158 | DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), | ||
159 | }, | ||
160 | }, | ||
161 | { | ||
162 | .callback = init_old_suspend_ordering, | ||
163 | .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", | ||
164 | .matches = { | ||
165 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), | ||
166 | DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), | ||
167 | }, | ||
168 | }, | ||
169 | { | ||
170 | .callback = init_old_suspend_ordering, | ||
171 | .ident = "Panasonic CF51-2L", | ||
172 | .matches = { | ||
173 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
174 | "Matsushita Electric Industrial Co.,Ltd."), | ||
175 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | ||
176 | }, | ||
177 | }, | ||
178 | { | ||
179 | .callback = init_nvs_nosave, | ||
180 | .ident = "Sony Vaio VGN-FW21E", | ||
181 | .matches = { | ||
182 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
183 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), | ||
184 | }, | ||
185 | }, | ||
186 | { | ||
187 | .callback = init_nvs_nosave, | ||
188 | .ident = "Sony Vaio VPCEB17FX", | ||
189 | .matches = { | ||
190 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
191 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), | ||
192 | }, | ||
193 | }, | ||
194 | { | ||
195 | .callback = init_nvs_nosave, | ||
196 | .ident = "Sony Vaio VGN-SR11M", | ||
197 | .matches = { | ||
198 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
199 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), | ||
200 | }, | ||
201 | }, | ||
202 | { | ||
203 | .callback = init_nvs_nosave, | ||
204 | .ident = "Everex StepNote Series", | ||
205 | .matches = { | ||
206 | DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), | ||
207 | DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), | ||
208 | }, | ||
209 | }, | ||
210 | { | ||
211 | .callback = init_nvs_nosave, | ||
212 | .ident = "Sony Vaio VPCEB1Z1E", | ||
213 | .matches = { | ||
214 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
215 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), | ||
216 | }, | ||
217 | }, | ||
218 | { | ||
219 | .callback = init_nvs_nosave, | ||
220 | .ident = "Sony Vaio VGN-NW130D", | ||
221 | .matches = { | ||
222 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
223 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), | ||
224 | }, | ||
225 | }, | ||
226 | { | ||
227 | .callback = init_nvs_nosave, | ||
228 | .ident = "Sony Vaio VPCCW29FX", | ||
229 | .matches = { | ||
230 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
231 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), | ||
232 | }, | ||
233 | }, | ||
234 | { | ||
235 | .callback = init_nvs_nosave, | ||
236 | .ident = "Averatec AV1020-ED2", | ||
237 | .matches = { | ||
238 | DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), | ||
239 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), | ||
240 | }, | ||
241 | }, | ||
242 | { | ||
243 | .callback = init_old_suspend_ordering, | ||
244 | .ident = "Asus A8N-SLI DELUXE", | ||
245 | .matches = { | ||
246 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
247 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), | ||
248 | }, | ||
249 | }, | ||
250 | { | ||
251 | .callback = init_old_suspend_ordering, | ||
252 | .ident = "Asus A8N-SLI Premium", | ||
253 | .matches = { | ||
254 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
255 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), | ||
256 | }, | ||
257 | }, | ||
258 | { | ||
259 | .callback = init_nvs_nosave, | ||
260 | .ident = "Sony Vaio VGN-SR26GN_P", | ||
261 | .matches = { | ||
262 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
263 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), | ||
264 | }, | ||
265 | }, | ||
266 | { | ||
267 | .callback = init_nvs_nosave, | ||
268 | .ident = "Sony Vaio VPCEB1S1E", | ||
269 | .matches = { | ||
270 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
271 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), | ||
272 | }, | ||
273 | }, | ||
274 | { | ||
275 | .callback = init_nvs_nosave, | ||
276 | .ident = "Sony Vaio VGN-FW520F", | ||
277 | .matches = { | ||
278 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
279 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), | ||
280 | }, | ||
281 | }, | ||
282 | { | ||
283 | .callback = init_nvs_nosave, | ||
284 | .ident = "Asus K54C", | ||
285 | .matches = { | ||
286 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
287 | DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), | ||
288 | }, | ||
289 | }, | ||
290 | { | ||
291 | .callback = init_nvs_nosave, | ||
292 | .ident = "Asus K54HR", | ||
293 | .matches = { | ||
294 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
295 | DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), | ||
296 | }, | ||
297 | }, | ||
298 | {}, | ||
299 | }; | ||
300 | |||
301 | static void acpi_sleep_dmi_check(void) | ||
302 | { | ||
303 | dmi_check_system(acpisleep_dmi_table); | ||
304 | } | ||
305 | |||
112 | /** | 306 | /** |
113 | * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. | 307 | * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. |
114 | */ | 308 | */ |
@@ -224,6 +418,7 @@ static void acpi_pm_end(void) | |||
224 | } | 418 | } |
225 | #else /* !CONFIG_ACPI_SLEEP */ | 419 | #else /* !CONFIG_ACPI_SLEEP */ |
226 | #define acpi_target_sleep_state ACPI_STATE_S0 | 420 | #define acpi_target_sleep_state ACPI_STATE_S0 |
421 | static inline void acpi_sleep_dmi_check(void) {} | ||
227 | #endif /* CONFIG_ACPI_SLEEP */ | 422 | #endif /* CONFIG_ACPI_SLEEP */ |
228 | 423 | ||
229 | #ifdef CONFIG_SUSPEND | 424 | #ifdef CONFIG_SUSPEND |
@@ -243,7 +438,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) | |||
243 | u32 acpi_state = acpi_suspend_states[pm_state]; | 438 | u32 acpi_state = acpi_suspend_states[pm_state]; |
244 | int error = 0; | 439 | int error = 0; |
245 | 440 | ||
246 | error = nvs_nosave ? 0 : suspend_nvs_alloc(); | 441 | error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); |
247 | if (error) | 442 | if (error) |
248 | return error; | 443 | return error; |
249 | 444 | ||
@@ -382,167 +577,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { | |||
382 | .end = acpi_pm_end, | 577 | .end = acpi_pm_end, |
383 | .recover = acpi_pm_finish, | 578 | .recover = acpi_pm_finish, |
384 | }; | 579 | }; |
385 | |||
386 | static int __init init_old_suspend_ordering(const struct dmi_system_id *d) | ||
387 | { | ||
388 | old_suspend_ordering = true; | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int __init init_nvs_nosave(const struct dmi_system_id *d) | ||
393 | { | ||
394 | acpi_nvs_nosave(); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | ||
399 | { | ||
400 | .callback = init_old_suspend_ordering, | ||
401 | .ident = "Abit KN9 (nForce4 variant)", | ||
402 | .matches = { | ||
403 | DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), | ||
404 | DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), | ||
405 | }, | ||
406 | }, | ||
407 | { | ||
408 | .callback = init_old_suspend_ordering, | ||
409 | .ident = "HP xw4600 Workstation", | ||
410 | .matches = { | ||
411 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
412 | DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), | ||
413 | }, | ||
414 | }, | ||
415 | { | ||
416 | .callback = init_old_suspend_ordering, | ||
417 | .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", | ||
418 | .matches = { | ||
419 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), | ||
420 | DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), | ||
421 | }, | ||
422 | }, | ||
423 | { | ||
424 | .callback = init_old_suspend_ordering, | ||
425 | .ident = "Panasonic CF51-2L", | ||
426 | .matches = { | ||
427 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
428 | "Matsushita Electric Industrial Co.,Ltd."), | ||
429 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | ||
430 | }, | ||
431 | }, | ||
432 | { | ||
433 | .callback = init_nvs_nosave, | ||
434 | .ident = "Sony Vaio VGN-FW21E", | ||
435 | .matches = { | ||
436 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
437 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), | ||
438 | }, | ||
439 | }, | ||
440 | { | ||
441 | .callback = init_nvs_nosave, | ||
442 | .ident = "Sony Vaio VPCEB17FX", | ||
443 | .matches = { | ||
444 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
445 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), | ||
446 | }, | ||
447 | }, | ||
448 | { | ||
449 | .callback = init_nvs_nosave, | ||
450 | .ident = "Sony Vaio VGN-SR11M", | ||
451 | .matches = { | ||
452 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
453 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), | ||
454 | }, | ||
455 | }, | ||
456 | { | ||
457 | .callback = init_nvs_nosave, | ||
458 | .ident = "Everex StepNote Series", | ||
459 | .matches = { | ||
460 | DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), | ||
461 | DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), | ||
462 | }, | ||
463 | }, | ||
464 | { | ||
465 | .callback = init_nvs_nosave, | ||
466 | .ident = "Sony Vaio VPCEB1Z1E", | ||
467 | .matches = { | ||
468 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
469 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), | ||
470 | }, | ||
471 | }, | ||
472 | { | ||
473 | .callback = init_nvs_nosave, | ||
474 | .ident = "Sony Vaio VGN-NW130D", | ||
475 | .matches = { | ||
476 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
477 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), | ||
478 | }, | ||
479 | }, | ||
480 | { | ||
481 | .callback = init_nvs_nosave, | ||
482 | .ident = "Sony Vaio VPCCW29FX", | ||
483 | .matches = { | ||
484 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
485 | DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), | ||
486 | }, | ||
487 | }, | ||
488 | { | ||
489 | .callback = init_nvs_nosave, | ||
490 | .ident = "Averatec AV1020-ED2", | ||
491 | .matches = { | ||
492 | DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), | ||
493 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), | ||
494 | }, | ||
495 | }, | ||
496 | { | ||
497 | .callback = init_old_suspend_ordering, | ||
498 | .ident = "Asus A8N-SLI DELUXE", | ||
499 | .matches = { | ||
500 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
501 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), | ||
502 | }, | ||
503 | }, | ||
504 | { | ||
505 | .callback = init_old_suspend_ordering, | ||
506 | .ident = "Asus A8N-SLI Premium", | ||
507 | .matches = { | ||
508 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
509 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), | ||
510 | }, | ||
511 | }, | ||
512 | { | ||
513 | .callback = init_nvs_nosave, | ||
514 | .ident = "Sony Vaio VGN-SR26GN_P", | ||
515 | .matches = { | ||
516 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
517 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), | ||
518 | }, | ||
519 | }, | ||
520 | { | ||
521 | .callback = init_nvs_nosave, | ||
522 | .ident = "Sony Vaio VGN-FW520F", | ||
523 | .matches = { | ||
524 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
525 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), | ||
526 | }, | ||
527 | }, | ||
528 | { | ||
529 | .callback = init_nvs_nosave, | ||
530 | .ident = "Asus K54C", | ||
531 | .matches = { | ||
532 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
533 | DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), | ||
534 | }, | ||
535 | }, | ||
536 | { | ||
537 | .callback = init_nvs_nosave, | ||
538 | .ident = "Asus K54HR", | ||
539 | .matches = { | ||
540 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
541 | DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), | ||
542 | }, | ||
543 | }, | ||
544 | {}, | ||
545 | }; | ||
546 | #endif /* CONFIG_SUSPEND */ | 580 | #endif /* CONFIG_SUSPEND */ |
547 | 581 | ||
548 | #ifdef CONFIG_HIBERNATION | 582 | #ifdef CONFIG_HIBERNATION |
@@ -681,177 +715,6 @@ int acpi_suspend(u32 acpi_state) | |||
681 | return -EINVAL; | 715 | return -EINVAL; |
682 | } | 716 | } |
683 | 717 | ||
684 | #ifdef CONFIG_PM | ||
685 | /** | ||
686 | * acpi_pm_device_sleep_state - return preferred power state of ACPI device | ||
687 | * in the system sleep state given by %acpi_target_sleep_state | ||
688 | * @dev: device to examine; its driver model wakeup flags control | ||
689 | * whether it should be able to wake up the system | ||
690 | * @d_min_p: used to store the upper limit of allowed states range | ||
691 | * @d_max_in: specify the lowest allowed states | ||
692 | * Return value: preferred power state of the device on success, -ENODEV | ||
693 | * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
694 | * | ||
695 | * Find the lowest power (highest number) ACPI device power state that | ||
696 | * device @dev can be in while the system is in the sleep state represented | ||
697 | * by %acpi_target_sleep_state. If @wake is nonzero, the device should be | ||
698 | * able to wake up the system from this sleep state. If @d_min_p is set, | ||
699 | * the highest power (lowest number) device power state of @dev allowed | ||
700 | * in this system sleep state is stored at the location pointed to by it. | ||
701 | * | ||
702 | * The caller must ensure that @dev is valid before using this function. | ||
703 | * The caller is also responsible for figuring out if the device is | ||
704 | * supposed to be able to wake up the system and passing this information | ||
705 | * via @wake. | ||
706 | */ | ||
707 | |||
708 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | ||
709 | { | ||
710 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
711 | struct acpi_device *adev; | ||
712 | char acpi_method[] = "_SxD"; | ||
713 | unsigned long long d_min, d_max; | ||
714 | |||
715 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) | ||
716 | return -EINVAL; | ||
717 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
718 | printk(KERN_DEBUG "ACPI handle has no context!\n"); | ||
719 | return -ENODEV; | ||
720 | } | ||
721 | |||
722 | acpi_method[2] = '0' + acpi_target_sleep_state; | ||
723 | /* | ||
724 | * If the sleep state is S0, the lowest limit from ACPI is D3, | ||
725 | * but if the device has _S0W, we will use the value from _S0W | ||
726 | * as the lowest limit from ACPI. Finally, we will constrain | ||
727 | * the lowest limit with the specified one. | ||
728 | */ | ||
729 | d_min = ACPI_STATE_D0; | ||
730 | d_max = ACPI_STATE_D3; | ||
731 | |||
732 | /* | ||
733 | * If present, _SxD methods return the minimum D-state (highest power | ||
734 | * state) we can use for the corresponding S-states. Otherwise, the | ||
735 | * minimum D-state is D0 (ACPI 3.x). | ||
736 | * | ||
737 | * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer | ||
738 | * provided -- that's our fault recovery, we ignore retval. | ||
739 | */ | ||
740 | if (acpi_target_sleep_state > ACPI_STATE_S0) | ||
741 | acpi_evaluate_integer(handle, acpi_method, NULL, &d_min); | ||
742 | |||
743 | /* | ||
744 | * If _PRW says we can wake up the system from the target sleep state, | ||
745 | * the D-state returned by _SxD is sufficient for that (we assume a | ||
746 | * wakeup-aware driver if wake is set). Still, if _SxW exists | ||
747 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that | ||
748 | * can wake the system. _S0W may be valid, too. | ||
749 | */ | ||
750 | if (acpi_target_sleep_state == ACPI_STATE_S0 || | ||
751 | (device_may_wakeup(dev) && adev->wakeup.flags.valid && | ||
752 | adev->wakeup.sleep_state >= acpi_target_sleep_state)) { | ||
753 | acpi_status status; | ||
754 | |||
755 | acpi_method[3] = 'W'; | ||
756 | status = acpi_evaluate_integer(handle, acpi_method, NULL, | ||
757 | &d_max); | ||
758 | if (ACPI_FAILURE(status)) { | ||
759 | if (acpi_target_sleep_state != ACPI_STATE_S0 || | ||
760 | status != AE_NOT_FOUND) | ||
761 | d_max = d_min; | ||
762 | } else if (d_max < d_min) { | ||
763 | /* Warn the user of the broken DSDT */ | ||
764 | printk(KERN_WARNING "ACPI: Wrong value from %s\n", | ||
765 | acpi_method); | ||
766 | /* Sanitize it */ | ||
767 | d_min = d_max; | ||
768 | } | ||
769 | } | ||
770 | |||
771 | if (d_max_in < d_min) | ||
772 | return -EINVAL; | ||
773 | if (d_min_p) | ||
774 | *d_min_p = d_min; | ||
775 | /* constrain d_max with specified lowest limit (max number) */ | ||
776 | if (d_max > d_max_in) { | ||
777 | for (d_max = d_max_in; d_max > d_min; d_max--) { | ||
778 | if (adev->power.states[d_max].flags.valid) | ||
779 | break; | ||
780 | } | ||
781 | } | ||
782 | return d_max; | ||
783 | } | ||
784 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); | ||
785 | #endif /* CONFIG_PM */ | ||
786 | |||
787 | #ifdef CONFIG_PM_SLEEP | ||
788 | /** | ||
789 | * acpi_pm_device_run_wake - Enable/disable wake-up for given device. | ||
790 | * @phys_dev: Device to enable/disable the platform to wake-up the system for. | ||
791 | * @enable: Whether enable or disable the wake-up functionality. | ||
792 | * | ||
793 | * Find the ACPI device object corresponding to @pci_dev and try to | ||
794 | * enable/disable the GPE associated with it. | ||
795 | */ | ||
796 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | ||
797 | { | ||
798 | struct acpi_device *dev; | ||
799 | acpi_handle handle; | ||
800 | |||
801 | if (!device_run_wake(phys_dev)) | ||
802 | return -EINVAL; | ||
803 | |||
804 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
805 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { | ||
806 | dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", | ||
807 | __func__); | ||
808 | return -ENODEV; | ||
809 | } | ||
810 | |||
811 | if (enable) { | ||
812 | acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); | ||
813 | acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); | ||
814 | } else { | ||
815 | acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); | ||
816 | acpi_disable_wakeup_device_power(dev); | ||
817 | } | ||
818 | |||
819 | return 0; | ||
820 | } | ||
821 | EXPORT_SYMBOL(acpi_pm_device_run_wake); | ||
822 | |||
823 | /** | ||
824 | * acpi_pm_device_sleep_wake - enable or disable the system wake-up | ||
825 | * capability of given device | ||
826 | * @dev: device to handle | ||
827 | * @enable: 'true' - enable, 'false' - disable the wake-up capability | ||
828 | */ | ||
829 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | ||
830 | { | ||
831 | acpi_handle handle; | ||
832 | struct acpi_device *adev; | ||
833 | int error; | ||
834 | |||
835 | if (!device_can_wakeup(dev)) | ||
836 | return -EINVAL; | ||
837 | |||
838 | handle = DEVICE_ACPI_HANDLE(dev); | ||
839 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
840 | dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__); | ||
841 | return -ENODEV; | ||
842 | } | ||
843 | |||
844 | error = enable ? | ||
845 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : | ||
846 | acpi_disable_wakeup_device_power(adev); | ||
847 | if (!error) | ||
848 | dev_info(dev, "wake-up capability %s by ACPI\n", | ||
849 | enable ? "enabled" : "disabled"); | ||
850 | |||
851 | return error; | ||
852 | } | ||
853 | #endif /* CONFIG_PM_SLEEP */ | ||
854 | |||
855 | static void acpi_power_off_prepare(void) | 718 | static void acpi_power_off_prepare(void) |
856 | { | 719 | { |
857 | /* Prepare to power off the system */ | 720 | /* Prepare to power off the system */ |
@@ -873,13 +736,13 @@ int __init acpi_sleep_init(void) | |||
873 | u8 type_a, type_b; | 736 | u8 type_a, type_b; |
874 | #ifdef CONFIG_SUSPEND | 737 | #ifdef CONFIG_SUSPEND |
875 | int i = 0; | 738 | int i = 0; |
876 | |||
877 | dmi_check_system(acpisleep_dmi_table); | ||
878 | #endif | 739 | #endif |
879 | 740 | ||
880 | if (acpi_disabled) | 741 | if (acpi_disabled) |
881 | return 0; | 742 | return 0; |
882 | 743 | ||
744 | acpi_sleep_dmi_check(); | ||
745 | |||
883 | sleep_states[ACPI_STATE_S0] = 1; | 746 | sleep_states[ACPI_STATE_S0] = 1; |
884 | printk(KERN_INFO PREFIX "(supports S0"); | 747 | printk(KERN_INFO PREFIX "(supports S0"); |
885 | 748 | ||
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 7c3f98ba4afe..ea61ca9129cd 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -476,7 +476,7 @@ static void fixed_event_count(u32 event_number) | |||
476 | return; | 476 | return; |
477 | } | 477 | } |
478 | 478 | ||
479 | static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, | 479 | static void acpi_global_event_handler(u32 event_type, acpi_handle device, |
480 | u32 event_number, void *context) | 480 | u32 event_number, void *context) |
481 | { | 481 | { |
482 | if (event_type == ACPI_EVENT_TYPE_GPE) | 482 | if (event_type == ACPI_EVENT_TYPE_GPE) |
@@ -638,7 +638,7 @@ void acpi_irq_stats_init(void) | |||
638 | if (all_counters == NULL) | 638 | if (all_counters == NULL) |
639 | goto fail; | 639 | goto fail; |
640 | 640 | ||
641 | status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); | 641 | status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); |
642 | if (ACPI_FAILURE(status)) | 642 | if (ACPI_FAILURE(status)) |
643 | goto fail; | 643 | goto fail; |
644 | 644 | ||
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 804204d41999..6e8cc16b54c1 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -984,6 +984,38 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) | |||
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | /* | ||
988 | * On some platforms, the AML code has dependency about | ||
989 | * the evaluating order of _TMP and _CRT/_HOT/_PSV/_ACx. | ||
990 | * 1. On HP Pavilion G4-1016tx, _TMP must be invoked after | ||
991 | * /_CRT/_HOT/_PSV/_ACx, or else system will be power off. | ||
992 | * 2. On HP Compaq 6715b/6715s, the return value of _PSV is 0 | ||
993 | * if _TMP has never been evaluated. | ||
994 | * | ||
995 | * As this dependency is totally transparent to OS, evaluate | ||
996 | * all of them once, in the order of _CRT/_HOT/_PSV/_ACx, | ||
997 | * _TMP, before they are actually used. | ||
998 | */ | ||
999 | static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz) | ||
1000 | { | ||
1001 | acpi_handle handle = tz->device->handle; | ||
1002 | unsigned long long value; | ||
1003 | int i; | ||
1004 | |||
1005 | acpi_evaluate_integer(handle, "_CRT", NULL, &value); | ||
1006 | acpi_evaluate_integer(handle, "_HOT", NULL, &value); | ||
1007 | acpi_evaluate_integer(handle, "_PSV", NULL, &value); | ||
1008 | for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { | ||
1009 | char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; | ||
1010 | acpi_status status; | ||
1011 | |||
1012 | status = acpi_evaluate_integer(handle, name, NULL, &value); | ||
1013 | if (status == AE_NOT_FOUND) | ||
1014 | break; | ||
1015 | } | ||
1016 | acpi_evaluate_integer(handle, "_TMP", NULL, &value); | ||
1017 | } | ||
1018 | |||
987 | static int acpi_thermal_get_info(struct acpi_thermal *tz) | 1019 | static int acpi_thermal_get_info(struct acpi_thermal *tz) |
988 | { | 1020 | { |
989 | int result = 0; | 1021 | int result = 0; |
@@ -992,6 +1024,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) | |||
992 | if (!tz) | 1024 | if (!tz) |
993 | return -EINVAL; | 1025 | return -EINVAL; |
994 | 1026 | ||
1027 | acpi_thermal_aml_dependency_fix(tz); | ||
1028 | |||
995 | /* Get trip points [_CRT, _PSV, etc.] (required) */ | 1029 | /* Get trip points [_CRT, _PSV, etc.] (required) */ |
996 | result = acpi_thermal_get_trip_points(tz); | 1030 | result = acpi_thermal_get_trip_points(tz); |
997 | if (result) | 1031 | if (result) |
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 462f7e300363..744371304313 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/hardirq.h> | ||
32 | #include <linux/acpi.h> | ||
31 | #include <acpi/acpi_bus.h> | 33 | #include <acpi/acpi_bus.h> |
32 | #include <acpi/acpi_drivers.h> | 34 | #include <acpi/acpi_drivers.h> |
33 | 35 | ||
@@ -457,3 +459,39 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event, | |||
457 | #endif | 459 | #endif |
458 | } | 460 | } |
459 | EXPORT_SYMBOL(acpi_evaluate_hotplug_ost); | 461 | EXPORT_SYMBOL(acpi_evaluate_hotplug_ost); |
462 | |||
463 | /** | ||
464 | * acpi_handle_printk: Print message with ACPI prefix and object path | ||
465 | * | ||
466 | * This function is called through acpi_handle_<level> macros and prints | ||
467 | * a message with ACPI prefix and object path. This function acquires | ||
468 | * the global namespace mutex to obtain an object path. In interrupt | ||
469 | * context, it shows the object path as <n/a>. | ||
470 | */ | ||
471 | void | ||
472 | acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) | ||
473 | { | ||
474 | struct va_format vaf; | ||
475 | va_list args; | ||
476 | struct acpi_buffer buffer = { | ||
477 | .length = ACPI_ALLOCATE_BUFFER, | ||
478 | .pointer = NULL | ||
479 | }; | ||
480 | const char *path; | ||
481 | |||
482 | va_start(args, fmt); | ||
483 | vaf.fmt = fmt; | ||
484 | vaf.va = &args; | ||
485 | |||
486 | if (in_interrupt() || | ||
487 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) | ||
488 | path = "<n/a>"; | ||
489 | else | ||
490 | path = buffer.pointer; | ||
491 | |||
492 | printk("%sACPI: %s: %pV", level, path, &vaf); | ||
493 | |||
494 | va_end(args); | ||
495 | kfree(buffer.pointer); | ||
496 | } | ||
497 | EXPORT_SYMBOL(acpi_handle_printk); | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0230cb6cbb3a..ac9a69cd45f5 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) | |||
389 | return 0; | 389 | return 0; |
390 | } | 390 | } |
391 | 391 | ||
392 | static int video_ignore_initial_backlight(const struct dmi_system_id *d) | ||
393 | { | ||
394 | use_bios_initial_backlight = 0; | ||
395 | return 0; | ||
396 | } | ||
397 | |||
392 | static struct dmi_system_id video_dmi_table[] __initdata = { | 398 | static struct dmi_system_id video_dmi_table[] __initdata = { |
393 | /* | 399 | /* |
394 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 | 400 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 |
@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
433 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), | 439 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), |
434 | }, | 440 | }, |
435 | }, | 441 | }, |
442 | { | ||
443 | .callback = video_ignore_initial_backlight, | ||
444 | .ident = "HP Folio 13-2000", | ||
445 | .matches = { | ||
446 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
447 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), | ||
448 | }, | ||
449 | }, | ||
436 | {} | 450 | {} |
437 | }; | 451 | }; |
438 | 452 | ||
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b728880ef10e..4ac2593234e7 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { | |||
156 | DMI_MATCH(DMI_BOARD_NAME, "X360"), | 156 | DMI_MATCH(DMI_BOARD_NAME, "X360"), |
157 | }, | 157 | }, |
158 | }, | 158 | }, |
159 | { | ||
160 | .callback = video_detect_force_vendor, | ||
161 | .ident = "Asus UL30VT", | ||
162 | .matches = { | ||
163 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
164 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), | ||
165 | }, | ||
166 | }, | ||
159 | { }, | 167 | { }, |
160 | }; | 168 | }; |
161 | 169 | ||
diff --git a/drivers/base/core.c b/drivers/base/core.c index abea76c36a4b..150a41580fad 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -1180,7 +1180,6 @@ void device_del(struct device *dev) | |||
1180 | if (dev->bus) | 1180 | if (dev->bus) |
1181 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 1181 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
1182 | BUS_NOTIFY_DEL_DEVICE, dev); | 1182 | BUS_NOTIFY_DEL_DEVICE, dev); |
1183 | device_pm_remove(dev); | ||
1184 | dpm_sysfs_remove(dev); | 1183 | dpm_sysfs_remove(dev); |
1185 | if (parent) | 1184 | if (parent) |
1186 | klist_del(&dev->p->knode_parent); | 1185 | klist_del(&dev->p->knode_parent); |
@@ -1205,6 +1204,7 @@ void device_del(struct device *dev) | |||
1205 | device_remove_file(dev, &uevent_attr); | 1204 | device_remove_file(dev, &uevent_attr); |
1206 | device_remove_attrs(dev); | 1205 | device_remove_attrs(dev); |
1207 | bus_remove_device(dev); | 1206 | bus_remove_device(dev); |
1207 | device_pm_remove(dev); | ||
1208 | driver_deferred_probe_del(dev); | 1208 | driver_deferred_probe_del(dev); |
1209 | 1209 | ||
1210 | /* Notify the platform of the removal, in case they | 1210 | /* Notify the platform of the removal, in case they |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 72c776f2a1f5..b2ee3bcd5a41 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/pm_runtime.h> | 22 | #include <linux/pm_runtime.h> |
23 | #include <linux/idr.h> | 23 | #include <linux/idr.h> |
24 | #include <linux/acpi.h> | ||
24 | 25 | ||
25 | #include "base.h" | 26 | #include "base.h" |
26 | #include "power/power.h" | 27 | #include "power/power.h" |
@@ -436,6 +437,7 @@ struct platform_device *platform_device_register_full( | |||
436 | goto err_alloc; | 437 | goto err_alloc; |
437 | 438 | ||
438 | pdev->dev.parent = pdevinfo->parent; | 439 | pdev->dev.parent = pdevinfo->parent; |
440 | ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); | ||
439 | 441 | ||
440 | if (pdevinfo->dma_mask) { | 442 | if (pdevinfo->dma_mask) { |
441 | /* | 443 | /* |
@@ -466,6 +468,7 @@ struct platform_device *platform_device_register_full( | |||
466 | ret = platform_device_add(pdev); | 468 | ret = platform_device_add(pdev); |
467 | if (ret) { | 469 | if (ret) { |
468 | err: | 470 | err: |
471 | ACPI_HANDLE_SET(&pdev->dev, NULL); | ||
469 | kfree(pdev->dev.dma_mask); | 472 | kfree(pdev->dev.dma_mask); |
470 | 473 | ||
471 | err_alloc: | 474 | err_alloc: |
@@ -481,8 +484,16 @@ static int platform_drv_probe(struct device *_dev) | |||
481 | { | 484 | { |
482 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 485 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
483 | struct platform_device *dev = to_platform_device(_dev); | 486 | struct platform_device *dev = to_platform_device(_dev); |
487 | int ret; | ||
484 | 488 | ||
485 | return drv->probe(dev); | 489 | if (ACPI_HANDLE(_dev)) |
490 | acpi_dev_pm_attach(_dev, true); | ||
491 | |||
492 | ret = drv->probe(dev); | ||
493 | if (ret && ACPI_HANDLE(_dev)) | ||
494 | acpi_dev_pm_detach(_dev, true); | ||
495 | |||
496 | return ret; | ||
486 | } | 497 | } |
487 | 498 | ||
488 | static int platform_drv_probe_fail(struct device *_dev) | 499 | static int platform_drv_probe_fail(struct device *_dev) |
@@ -494,8 +505,13 @@ static int platform_drv_remove(struct device *_dev) | |||
494 | { | 505 | { |
495 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 506 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
496 | struct platform_device *dev = to_platform_device(_dev); | 507 | struct platform_device *dev = to_platform_device(_dev); |
508 | int ret; | ||
509 | |||
510 | ret = drv->remove(dev); | ||
511 | if (ACPI_HANDLE(_dev)) | ||
512 | acpi_dev_pm_detach(_dev, true); | ||
497 | 513 | ||
498 | return drv->remove(dev); | 514 | return ret; |
499 | } | 515 | } |
500 | 516 | ||
501 | static void platform_drv_shutdown(struct device *_dev) | 517 | static void platform_drv_shutdown(struct device *_dev) |
@@ -504,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev) | |||
504 | struct platform_device *dev = to_platform_device(_dev); | 520 | struct platform_device *dev = to_platform_device(_dev); |
505 | 521 | ||
506 | drv->shutdown(dev); | 522 | drv->shutdown(dev); |
523 | if (ACPI_HANDLE(_dev)) | ||
524 | acpi_dev_pm_detach(_dev, true); | ||
507 | } | 525 | } |
508 | 526 | ||
509 | /** | 527 | /** |
@@ -709,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv) | |||
709 | if (of_driver_match_device(dev, drv)) | 727 | if (of_driver_match_device(dev, drv)) |
710 | return 1; | 728 | return 1; |
711 | 729 | ||
730 | /* Then try ACPI style match */ | ||
731 | if (acpi_driver_match_device(dev, drv)) | ||
732 | return 1; | ||
733 | |||
712 | /* Then try to match against the id table */ | 734 | /* Then try to match against the id table */ |
713 | if (pdrv->id_table) | 735 | if (pdrv->id_table) |
714 | return platform_match_id(pdrv->id_table, pdev) != NULL; | 736 | return platform_match_id(pdrv->id_table, pdev) != NULL; |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index eb78e9640c4a..9d8fde709390 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
99 | 99 | ||
100 | if (ce->status < PCE_STATUS_ERROR) { | 100 | if (ce->status < PCE_STATUS_ERROR) { |
101 | if (ce->status == PCE_STATUS_ENABLED) | 101 | if (ce->status == PCE_STATUS_ENABLED) |
102 | clk_disable(ce->clk); | 102 | clk_disable_unprepare(ce->clk); |
103 | 103 | ||
104 | if (ce->status >= PCE_STATUS_ACQUIRED) | 104 | if (ce->status >= PCE_STATUS_ACQUIRED) |
105 | clk_put(ce->clk); | 105 | clk_put(ce->clk); |
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id) | |||
396 | 396 | ||
397 | clk = clk_get(dev, con_id); | 397 | clk = clk_get(dev, con_id); |
398 | if (!IS_ERR(clk)) { | 398 | if (!IS_ERR(clk)) { |
399 | clk_enable(clk); | 399 | clk_prepare_enable(clk); |
400 | clk_put(clk); | 400 | clk_put(clk); |
401 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | 401 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); |
402 | } | 402 | } |
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id) | |||
413 | 413 | ||
414 | clk = clk_get(dev, con_id); | 414 | clk = clk_get(dev, con_id); |
415 | if (!IS_ERR(clk)) { | 415 | if (!IS_ERR(clk)) { |
416 | clk_disable(clk); | 416 | clk_disable_unprepare(clk); |
417 | clk_put(clk); | 417 | clk_put(clk); |
418 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | 418 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); |
419 | } | 419 | } |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 96b71b6536d6..acc3a8ded29d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
470 | return -EBUSY; | 470 | return -EBUSY; |
471 | 471 | ||
472 | not_suspended = 0; | 472 | not_suspended = 0; |
473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
474 | enum pm_qos_flags_status stat; | ||
475 | |||
476 | stat = dev_pm_qos_flags(pdd->dev, | ||
477 | PM_QOS_FLAG_NO_POWER_OFF | ||
478 | | PM_QOS_FLAG_REMOTE_WAKEUP); | ||
479 | if (stat > PM_QOS_FLAGS_NONE) | ||
480 | return -EBUSY; | ||
481 | |||
474 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | 482 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
475 | || pdd->dev->power.irq_safe)) | 483 | || pdd->dev->power.irq_safe)) |
476 | not_suspended++; | 484 | not_suspended++; |
485 | } | ||
477 | 486 | ||
478 | if (not_suspended > genpd->in_progress) | 487 | if (not_suspended > genpd->in_progress) |
479 | return -EBUSY; | 488 | return -EBUSY; |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index d9468642fc41..50b2831e027d 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/opp.h> | 24 | #include <linux/opp.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/export.h> | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Internal data structure organization with the OPP layer library is as | 29 | * Internal data structure organization with the OPP layer library is as |
@@ -65,6 +66,7 @@ struct opp { | |||
65 | unsigned long u_volt; | 66 | unsigned long u_volt; |
66 | 67 | ||
67 | struct device_opp *dev_opp; | 68 | struct device_opp *dev_opp; |
69 | struct rcu_head head; | ||
68 | }; | 70 | }; |
69 | 71 | ||
70 | /** | 72 | /** |
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp) | |||
160 | 162 | ||
161 | return v; | 163 | return v; |
162 | } | 164 | } |
165 | EXPORT_SYMBOL(opp_get_voltage); | ||
163 | 166 | ||
164 | /** | 167 | /** |
165 | * opp_get_freq() - Gets the frequency corresponding to an available opp | 168 | * opp_get_freq() - Gets the frequency corresponding to an available opp |
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp) | |||
189 | 192 | ||
190 | return f; | 193 | return f; |
191 | } | 194 | } |
195 | EXPORT_SYMBOL(opp_get_freq); | ||
192 | 196 | ||
193 | /** | 197 | /** |
194 | * opp_get_opp_count() - Get number of opps available in the opp list | 198 | * opp_get_opp_count() - Get number of opps available in the opp list |
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev) | |||
221 | 225 | ||
222 | return count; | 226 | return count; |
223 | } | 227 | } |
228 | EXPORT_SYMBOL(opp_get_opp_count); | ||
224 | 229 | ||
225 | /** | 230 | /** |
226 | * opp_find_freq_exact() - search for an exact frequency | 231 | * opp_find_freq_exact() - search for an exact frequency |
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev) | |||
230 | * | 235 | * |
231 | * Searches for exact match in the opp list and returns pointer to the matching | 236 | * Searches for exact match in the opp list and returns pointer to the matching |
232 | * opp if found, else returns ERR_PTR in case of error and should be handled | 237 | * opp if found, else returns ERR_PTR in case of error and should be handled |
233 | * using IS_ERR. | 238 | * using IS_ERR. Error return values can be: |
239 | * EINVAL: for bad pointer | ||
240 | * ERANGE: no match found for search | ||
241 | * ENODEV: if device not found in list of registered devices | ||
234 | * | 242 | * |
235 | * Note: available is a modifier for the search. if available=true, then the | 243 | * Note: available is a modifier for the search. if available=true, then the |
236 | * match is for exact matching frequency and is available in the stored OPP | 244 | * match is for exact matching frequency and is available in the stored OPP |
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
249 | bool available) | 257 | bool available) |
250 | { | 258 | { |
251 | struct device_opp *dev_opp; | 259 | struct device_opp *dev_opp; |
252 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 260 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
253 | 261 | ||
254 | dev_opp = find_device_opp(dev); | 262 | dev_opp = find_device_opp(dev); |
255 | if (IS_ERR(dev_opp)) { | 263 | if (IS_ERR(dev_opp)) { |
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
268 | 276 | ||
269 | return opp; | 277 | return opp; |
270 | } | 278 | } |
279 | EXPORT_SYMBOL(opp_find_freq_exact); | ||
271 | 280 | ||
272 | /** | 281 | /** |
273 | * opp_find_freq_ceil() - Search for an rounded ceil freq | 282 | * opp_find_freq_ceil() - Search for an rounded ceil freq |
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
278 | * for a device. | 287 | * for a device. |
279 | * | 288 | * |
280 | * Returns matching *opp and refreshes *freq accordingly, else returns | 289 | * Returns matching *opp and refreshes *freq accordingly, else returns |
281 | * ERR_PTR in case of error and should be handled using IS_ERR. | 290 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
291 | * values can be: | ||
292 | * EINVAL: for bad pointer | ||
293 | * ERANGE: no match found for search | ||
294 | * ENODEV: if device not found in list of registered devices | ||
282 | * | 295 | * |
283 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 296 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
284 | * protected pointer. The reason for the same is that the opp pointer which is | 297 | * protected pointer. The reason for the same is that the opp pointer which is |
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
289 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | 302 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) |
290 | { | 303 | { |
291 | struct device_opp *dev_opp; | 304 | struct device_opp *dev_opp; |
292 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 305 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
293 | 306 | ||
294 | if (!dev || !freq) { | 307 | if (!dev || !freq) { |
295 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 308 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
298 | 311 | ||
299 | dev_opp = find_device_opp(dev); | 312 | dev_opp = find_device_opp(dev); |
300 | if (IS_ERR(dev_opp)) | 313 | if (IS_ERR(dev_opp)) |
301 | return opp; | 314 | return ERR_CAST(dev_opp); |
302 | 315 | ||
303 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 316 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
304 | if (temp_opp->available && temp_opp->rate >= *freq) { | 317 | if (temp_opp->available && temp_opp->rate >= *freq) { |
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
310 | 323 | ||
311 | return opp; | 324 | return opp; |
312 | } | 325 | } |
326 | EXPORT_SYMBOL(opp_find_freq_ceil); | ||
313 | 327 | ||
314 | /** | 328 | /** |
315 | * opp_find_freq_floor() - Search for a rounded floor freq | 329 | * opp_find_freq_floor() - Search for a rounded floor freq |
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
320 | * for a device. | 334 | * for a device. |
321 | * | 335 | * |
322 | * Returns matching *opp and refreshes *freq accordingly, else returns | 336 | * Returns matching *opp and refreshes *freq accordingly, else returns |
323 | * ERR_PTR in case of error and should be handled using IS_ERR. | 337 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
338 | * values can be: | ||
339 | * EINVAL: for bad pointer | ||
340 | * ERANGE: no match found for search | ||
341 | * ENODEV: if device not found in list of registered devices | ||
324 | * | 342 | * |
325 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 343 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
326 | * protected pointer. The reason for the same is that the opp pointer which is | 344 | * protected pointer. The reason for the same is that the opp pointer which is |
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
331 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | 349 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) |
332 | { | 350 | { |
333 | struct device_opp *dev_opp; | 351 | struct device_opp *dev_opp; |
334 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 352 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
335 | 353 | ||
336 | if (!dev || !freq) { | 354 | if (!dev || !freq) { |
337 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 355 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
340 | 358 | ||
341 | dev_opp = find_device_opp(dev); | 359 | dev_opp = find_device_opp(dev); |
342 | if (IS_ERR(dev_opp)) | 360 | if (IS_ERR(dev_opp)) |
343 | return opp; | 361 | return ERR_CAST(dev_opp); |
344 | 362 | ||
345 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 363 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
346 | if (temp_opp->available) { | 364 | if (temp_opp->available) { |
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
356 | 374 | ||
357 | return opp; | 375 | return opp; |
358 | } | 376 | } |
377 | EXPORT_SYMBOL(opp_find_freq_floor); | ||
359 | 378 | ||
360 | /** | 379 | /** |
361 | * opp_add() - Add an OPP table from a table definitions | 380 | * opp_add() - Add an OPP table from a table definitions |
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
512 | 531 | ||
513 | list_replace_rcu(&opp->node, &new_opp->node); | 532 | list_replace_rcu(&opp->node, &new_opp->node); |
514 | mutex_unlock(&dev_opp_list_lock); | 533 | mutex_unlock(&dev_opp_list_lock); |
515 | synchronize_rcu(); | 534 | kfree_rcu(opp, head); |
516 | 535 | ||
517 | /* Notify the change of the OPP availability */ | 536 | /* Notify the change of the OPP availability */ |
518 | if (availability_req) | 537 | if (availability_req) |
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
522 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, | 541 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, |
523 | new_opp); | 542 | new_opp); |
524 | 543 | ||
525 | /* clean up old opp */ | 544 | return 0; |
526 | new_opp = opp; | ||
527 | goto out; | ||
528 | 545 | ||
529 | unlock: | 546 | unlock: |
530 | mutex_unlock(&dev_opp_list_lock); | 547 | mutex_unlock(&dev_opp_list_lock); |
531 | out: | ||
532 | kfree(new_opp); | 548 | kfree(new_opp); |
533 | return r; | 549 | return r; |
534 | } | 550 | } |
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq) | |||
552 | { | 568 | { |
553 | return opp_set_availability(dev, freq, true); | 569 | return opp_set_availability(dev, freq, true); |
554 | } | 570 | } |
571 | EXPORT_SYMBOL(opp_enable); | ||
555 | 572 | ||
556 | /** | 573 | /** |
557 | * opp_disable() - Disable a specific OPP | 574 | * opp_disable() - Disable a specific OPP |
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq) | |||
573 | { | 590 | { |
574 | return opp_set_availability(dev, freq, false); | 591 | return opp_set_availability(dev, freq, false); |
575 | } | 592 | } |
593 | EXPORT_SYMBOL(opp_disable); | ||
576 | 594 | ||
577 | #ifdef CONFIG_CPU_FREQ | 595 | #ifdef CONFIG_CPU_FREQ |
578 | /** | 596 | /** |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 0dbfdf4419af..b16686a0a5a2 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev); | |||
93 | extern void rpm_sysfs_remove(struct device *dev); | 93 | extern void rpm_sysfs_remove(struct device *dev); |
94 | extern int wakeup_sysfs_add(struct device *dev); | 94 | extern int wakeup_sysfs_add(struct device *dev); |
95 | extern void wakeup_sysfs_remove(struct device *dev); | 95 | extern void wakeup_sysfs_remove(struct device *dev); |
96 | extern int pm_qos_sysfs_add(struct device *dev); | 96 | extern int pm_qos_sysfs_add_latency(struct device *dev); |
97 | extern void pm_qos_sysfs_remove(struct device *dev); | 97 | extern void pm_qos_sysfs_remove_latency(struct device *dev); |
98 | extern int pm_qos_sysfs_add_flags(struct device *dev); | ||
99 | extern void pm_qos_sysfs_remove_flags(struct device *dev); | ||
98 | 100 | ||
99 | #else /* CONFIG_PM */ | 101 | #else /* CONFIG_PM */ |
100 | 102 | ||
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index fbbd4ed2edf2..ff46387f5308 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
42 | #include <linux/export.h> | 42 | #include <linux/export.h> |
43 | #include <linux/pm_runtime.h> | ||
43 | 44 | ||
44 | #include "power.h" | 45 | #include "power.h" |
45 | 46 | ||
@@ -48,6 +49,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); | |||
48 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 49 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
49 | 50 | ||
50 | /** | 51 | /** |
52 | * __dev_pm_qos_flags - Check PM QoS flags for a given device. | ||
53 | * @dev: Device to check the PM QoS flags for. | ||
54 | * @mask: Flags to check against. | ||
55 | * | ||
56 | * This routine must be called with dev->power.lock held. | ||
57 | */ | ||
58 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) | ||
59 | { | ||
60 | struct dev_pm_qos *qos = dev->power.qos; | ||
61 | struct pm_qos_flags *pqf; | ||
62 | s32 val; | ||
63 | |||
64 | if (!qos) | ||
65 | return PM_QOS_FLAGS_UNDEFINED; | ||
66 | |||
67 | pqf = &qos->flags; | ||
68 | if (list_empty(&pqf->list)) | ||
69 | return PM_QOS_FLAGS_UNDEFINED; | ||
70 | |||
71 | val = pqf->effective_flags & mask; | ||
72 | if (val) | ||
73 | return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; | ||
74 | |||
75 | return PM_QOS_FLAGS_NONE; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). | ||
80 | * @dev: Device to check the PM QoS flags for. | ||
81 | * @mask: Flags to check against. | ||
82 | */ | ||
83 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) | ||
84 | { | ||
85 | unsigned long irqflags; | ||
86 | enum pm_qos_flags_status ret; | ||
87 | |||
88 | spin_lock_irqsave(&dev->power.lock, irqflags); | ||
89 | ret = __dev_pm_qos_flags(dev, mask); | ||
90 | spin_unlock_irqrestore(&dev->power.lock, irqflags); | ||
91 | |||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | /** | ||
51 | * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. | 96 | * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. |
52 | * @dev: Device to get the PM QoS constraint value for. | 97 | * @dev: Device to get the PM QoS constraint value for. |
53 | * | 98 | * |
@@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | |||
55 | */ | 100 | */ |
56 | s32 __dev_pm_qos_read_value(struct device *dev) | 101 | s32 __dev_pm_qos_read_value(struct device *dev) |
57 | { | 102 | { |
58 | struct pm_qos_constraints *c = dev->power.constraints; | 103 | return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; |
59 | |||
60 | return c ? pm_qos_read_value(c) : 0; | ||
61 | } | 104 | } |
62 | 105 | ||
63 | /** | 106 | /** |
@@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev) | |||
76 | return ret; | 119 | return ret; |
77 | } | 120 | } |
78 | 121 | ||
79 | /* | 122 | /** |
80 | * apply_constraint | 123 | * apply_constraint - Add/modify/remove device PM QoS request. |
81 | * @req: constraint request to apply | 124 | * @req: Constraint request to apply |
82 | * @action: action to perform add/update/remove, of type enum pm_qos_req_action | 125 | * @action: Action to perform (add/update/remove). |
83 | * @value: defines the qos request | 126 | * @value: Value to assign to the QoS request. |
84 | * | 127 | * |
85 | * Internal function to update the constraints list using the PM QoS core | 128 | * Internal function to update the constraints list using the PM QoS core |
86 | * code and if needed call the per-device and the global notification | 129 | * code and if needed call the per-device and the global notification |
87 | * callbacks | 130 | * callbacks |
88 | */ | 131 | */ |
89 | static int apply_constraint(struct dev_pm_qos_request *req, | 132 | static int apply_constraint(struct dev_pm_qos_request *req, |
90 | enum pm_qos_req_action action, int value) | 133 | enum pm_qos_req_action action, s32 value) |
91 | { | 134 | { |
92 | int ret, curr_value; | 135 | struct dev_pm_qos *qos = req->dev->power.qos; |
93 | 136 | int ret; | |
94 | ret = pm_qos_update_target(req->dev->power.constraints, | ||
95 | &req->node, action, value); | ||
96 | 137 | ||
97 | if (ret) { | 138 | switch(req->type) { |
98 | /* Call the global callbacks if needed */ | 139 | case DEV_PM_QOS_LATENCY: |
99 | curr_value = pm_qos_read_value(req->dev->power.constraints); | 140 | ret = pm_qos_update_target(&qos->latency, &req->data.pnode, |
100 | blocking_notifier_call_chain(&dev_pm_notifiers, | 141 | action, value); |
101 | (unsigned long)curr_value, | 142 | if (ret) { |
102 | req); | 143 | value = pm_qos_read_value(&qos->latency); |
144 | blocking_notifier_call_chain(&dev_pm_notifiers, | ||
145 | (unsigned long)value, | ||
146 | req); | ||
147 | } | ||
148 | break; | ||
149 | case DEV_PM_QOS_FLAGS: | ||
150 | ret = pm_qos_update_flags(&qos->flags, &req->data.flr, | ||
151 | action, value); | ||
152 | break; | ||
153 | default: | ||
154 | ret = -EINVAL; | ||
103 | } | 155 | } |
104 | 156 | ||
105 | return ret; | 157 | return ret; |
@@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req, | |||
114 | */ | 166 | */ |
115 | static int dev_pm_qos_constraints_allocate(struct device *dev) | 167 | static int dev_pm_qos_constraints_allocate(struct device *dev) |
116 | { | 168 | { |
169 | struct dev_pm_qos *qos; | ||
117 | struct pm_qos_constraints *c; | 170 | struct pm_qos_constraints *c; |
118 | struct blocking_notifier_head *n; | 171 | struct blocking_notifier_head *n; |
119 | 172 | ||
120 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 173 | qos = kzalloc(sizeof(*qos), GFP_KERNEL); |
121 | if (!c) | 174 | if (!qos) |
122 | return -ENOMEM; | 175 | return -ENOMEM; |
123 | 176 | ||
124 | n = kzalloc(sizeof(*n), GFP_KERNEL); | 177 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
125 | if (!n) { | 178 | if (!n) { |
126 | kfree(c); | 179 | kfree(qos); |
127 | return -ENOMEM; | 180 | return -ENOMEM; |
128 | } | 181 | } |
129 | BLOCKING_INIT_NOTIFIER_HEAD(n); | 182 | BLOCKING_INIT_NOTIFIER_HEAD(n); |
130 | 183 | ||
184 | c = &qos->latency; | ||
131 | plist_head_init(&c->list); | 185 | plist_head_init(&c->list); |
132 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | 186 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; |
133 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | 187 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; |
134 | c->type = PM_QOS_MIN; | 188 | c->type = PM_QOS_MIN; |
135 | c->notifiers = n; | 189 | c->notifiers = n; |
136 | 190 | ||
191 | INIT_LIST_HEAD(&qos->flags.list); | ||
192 | |||
137 | spin_lock_irq(&dev->power.lock); | 193 | spin_lock_irq(&dev->power.lock); |
138 | dev->power.constraints = c; | 194 | dev->power.qos = qos; |
139 | spin_unlock_irq(&dev->power.lock); | 195 | spin_unlock_irq(&dev->power.lock); |
140 | 196 | ||
141 | return 0; | 197 | return 0; |
@@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
151 | void dev_pm_qos_constraints_init(struct device *dev) | 207 | void dev_pm_qos_constraints_init(struct device *dev) |
152 | { | 208 | { |
153 | mutex_lock(&dev_pm_qos_mtx); | 209 | mutex_lock(&dev_pm_qos_mtx); |
154 | dev->power.constraints = NULL; | 210 | dev->power.qos = NULL; |
155 | dev->power.power_state = PMSG_ON; | 211 | dev->power.power_state = PMSG_ON; |
156 | mutex_unlock(&dev_pm_qos_mtx); | 212 | mutex_unlock(&dev_pm_qos_mtx); |
157 | } | 213 | } |
@@ -164,24 +220,28 @@ void dev_pm_qos_constraints_init(struct device *dev) | |||
164 | */ | 220 | */ |
165 | void dev_pm_qos_constraints_destroy(struct device *dev) | 221 | void dev_pm_qos_constraints_destroy(struct device *dev) |
166 | { | 222 | { |
223 | struct dev_pm_qos *qos; | ||
167 | struct dev_pm_qos_request *req, *tmp; | 224 | struct dev_pm_qos_request *req, *tmp; |
168 | struct pm_qos_constraints *c; | 225 | struct pm_qos_constraints *c; |
226 | struct pm_qos_flags *f; | ||
169 | 227 | ||
170 | /* | 228 | /* |
171 | * If the device's PM QoS resume latency limit has been exposed to user | 229 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
172 | * space, it has to be hidden at this point. | 230 | * exposed to user space, they have to be hidden at this point. |
173 | */ | 231 | */ |
174 | dev_pm_qos_hide_latency_limit(dev); | 232 | dev_pm_qos_hide_latency_limit(dev); |
233 | dev_pm_qos_hide_flags(dev); | ||
175 | 234 | ||
176 | mutex_lock(&dev_pm_qos_mtx); | 235 | mutex_lock(&dev_pm_qos_mtx); |
177 | 236 | ||
178 | dev->power.power_state = PMSG_INVALID; | 237 | dev->power.power_state = PMSG_INVALID; |
179 | c = dev->power.constraints; | 238 | qos = dev->power.qos; |
180 | if (!c) | 239 | if (!qos) |
181 | goto out; | 240 | goto out; |
182 | 241 | ||
183 | /* Flush the constraints list for the device */ | 242 | /* Flush the constraints lists for the device. */ |
184 | plist_for_each_entry_safe(req, tmp, &c->list, node) { | 243 | c = &qos->latency; |
244 | plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { | ||
185 | /* | 245 | /* |
186 | * Update constraints list and call the notification | 246 | * Update constraints list and call the notification |
187 | * callbacks if needed | 247 | * callbacks if needed |
@@ -189,13 +249,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
189 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | 249 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); |
190 | memset(req, 0, sizeof(*req)); | 250 | memset(req, 0, sizeof(*req)); |
191 | } | 251 | } |
252 | f = &qos->flags; | ||
253 | list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { | ||
254 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
255 | memset(req, 0, sizeof(*req)); | ||
256 | } | ||
192 | 257 | ||
193 | spin_lock_irq(&dev->power.lock); | 258 | spin_lock_irq(&dev->power.lock); |
194 | dev->power.constraints = NULL; | 259 | dev->power.qos = NULL; |
195 | spin_unlock_irq(&dev->power.lock); | 260 | spin_unlock_irq(&dev->power.lock); |
196 | 261 | ||
197 | kfree(c->notifiers); | 262 | kfree(c->notifiers); |
198 | kfree(c); | 263 | kfree(qos); |
199 | 264 | ||
200 | out: | 265 | out: |
201 | mutex_unlock(&dev_pm_qos_mtx); | 266 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -205,6 +270,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
205 | * dev_pm_qos_add_request - inserts new qos request into the list | 270 | * dev_pm_qos_add_request - inserts new qos request into the list |
206 | * @dev: target device for the constraint | 271 | * @dev: target device for the constraint |
207 | * @req: pointer to a preallocated handle | 272 | * @req: pointer to a preallocated handle |
273 | * @type: type of the request | ||
208 | * @value: defines the qos request | 274 | * @value: defines the qos request |
209 | * | 275 | * |
210 | * This function inserts a new entry in the device constraints list of | 276 | * This function inserts a new entry in the device constraints list of |
@@ -218,9 +284,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
218 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory | 284 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory |
219 | * to allocate for data structures, -ENODEV if the device has just been removed | 285 | * to allocate for data structures, -ENODEV if the device has just been removed |
220 | * from the system. | 286 | * from the system. |
287 | * | ||
288 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
289 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
221 | */ | 290 | */ |
222 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | 291 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
223 | s32 value) | 292 | enum dev_pm_qos_req_type type, s32 value) |
224 | { | 293 | { |
225 | int ret = 0; | 294 | int ret = 0; |
226 | 295 | ||
@@ -235,7 +304,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
235 | 304 | ||
236 | mutex_lock(&dev_pm_qos_mtx); | 305 | mutex_lock(&dev_pm_qos_mtx); |
237 | 306 | ||
238 | if (!dev->power.constraints) { | 307 | if (!dev->power.qos) { |
239 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | 308 | if (dev->power.power_state.event == PM_EVENT_INVALID) { |
240 | /* The device has been removed from the system. */ | 309 | /* The device has been removed from the system. */ |
241 | req->dev = NULL; | 310 | req->dev = NULL; |
@@ -251,8 +320,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
251 | } | 320 | } |
252 | } | 321 | } |
253 | 322 | ||
254 | if (!ret) | 323 | if (!ret) { |
324 | req->type = type; | ||
255 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | 325 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); |
326 | } | ||
256 | 327 | ||
257 | out: | 328 | out: |
258 | mutex_unlock(&dev_pm_qos_mtx); | 329 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -262,6 +333,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
262 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | 333 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); |
263 | 334 | ||
264 | /** | 335 | /** |
336 | * __dev_pm_qos_update_request - Modify an existing device PM QoS request. | ||
337 | * @req : PM QoS request to modify. | ||
338 | * @new_value: New value to request. | ||
339 | */ | ||
340 | static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
341 | s32 new_value) | ||
342 | { | ||
343 | s32 curr_value; | ||
344 | int ret = 0; | ||
345 | |||
346 | if (!req->dev->power.qos) | ||
347 | return -ENODEV; | ||
348 | |||
349 | switch(req->type) { | ||
350 | case DEV_PM_QOS_LATENCY: | ||
351 | curr_value = req->data.pnode.prio; | ||
352 | break; | ||
353 | case DEV_PM_QOS_FLAGS: | ||
354 | curr_value = req->data.flr.flags; | ||
355 | break; | ||
356 | default: | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | if (curr_value != new_value) | ||
361 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); | ||
362 | |||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | /** | ||
265 | * dev_pm_qos_update_request - modifies an existing qos request | 367 | * dev_pm_qos_update_request - modifies an existing qos request |
266 | * @req : handle to list element holding a dev_pm_qos request to use | 368 | * @req : handle to list element holding a dev_pm_qos request to use |
267 | * @new_value: defines the qos request | 369 | * @new_value: defines the qos request |
@@ -275,11 +377,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | |||
275 | * 0 if the aggregated constraint value has not changed, | 377 | * 0 if the aggregated constraint value has not changed, |
276 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | 378 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been |
277 | * removed from the system | 379 | * removed from the system |
380 | * | ||
381 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
382 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
278 | */ | 383 | */ |
279 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | 384 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) |
280 | s32 new_value) | ||
281 | { | 385 | { |
282 | int ret = 0; | 386 | int ret; |
283 | 387 | ||
284 | if (!req) /*guard against callers passing in null */ | 388 | if (!req) /*guard against callers passing in null */ |
285 | return -EINVAL; | 389 | return -EINVAL; |
@@ -289,17 +393,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
289 | return -EINVAL; | 393 | return -EINVAL; |
290 | 394 | ||
291 | mutex_lock(&dev_pm_qos_mtx); | 395 | mutex_lock(&dev_pm_qos_mtx); |
292 | 396 | ret = __dev_pm_qos_update_request(req, new_value); | |
293 | if (req->dev->power.constraints) { | ||
294 | if (new_value != req->node.prio) | ||
295 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, | ||
296 | new_value); | ||
297 | } else { | ||
298 | /* Return if the device has been removed */ | ||
299 | ret = -ENODEV; | ||
300 | } | ||
301 | |||
302 | mutex_unlock(&dev_pm_qos_mtx); | 397 | mutex_unlock(&dev_pm_qos_mtx); |
398 | |||
303 | return ret; | 399 | return ret; |
304 | } | 400 | } |
305 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | 401 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); |
@@ -315,6 +411,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | |||
315 | * 0 if the aggregated constraint value has not changed, | 411 | * 0 if the aggregated constraint value has not changed, |
316 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | 412 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been |
317 | * removed from the system | 413 | * removed from the system |
414 | * | ||
415 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
416 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
318 | */ | 417 | */ |
319 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | 418 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
320 | { | 419 | { |
@@ -329,7 +428,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
329 | 428 | ||
330 | mutex_lock(&dev_pm_qos_mtx); | 429 | mutex_lock(&dev_pm_qos_mtx); |
331 | 430 | ||
332 | if (req->dev->power.constraints) { | 431 | if (req->dev->power.qos) { |
333 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | 432 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, |
334 | PM_QOS_DEFAULT_VALUE); | 433 | PM_QOS_DEFAULT_VALUE); |
335 | memset(req, 0, sizeof(*req)); | 434 | memset(req, 0, sizeof(*req)); |
@@ -362,13 +461,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | |||
362 | 461 | ||
363 | mutex_lock(&dev_pm_qos_mtx); | 462 | mutex_lock(&dev_pm_qos_mtx); |
364 | 463 | ||
365 | if (!dev->power.constraints) | 464 | if (!dev->power.qos) |
366 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? | 465 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? |
367 | dev_pm_qos_constraints_allocate(dev) : -ENODEV; | 466 | dev_pm_qos_constraints_allocate(dev) : -ENODEV; |
368 | 467 | ||
369 | if (!ret) | 468 | if (!ret) |
370 | ret = blocking_notifier_chain_register( | 469 | ret = blocking_notifier_chain_register( |
371 | dev->power.constraints->notifiers, notifier); | 470 | dev->power.qos->latency.notifiers, notifier); |
372 | 471 | ||
373 | mutex_unlock(&dev_pm_qos_mtx); | 472 | mutex_unlock(&dev_pm_qos_mtx); |
374 | return ret; | 473 | return ret; |
@@ -393,9 +492,9 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
393 | mutex_lock(&dev_pm_qos_mtx); | 492 | mutex_lock(&dev_pm_qos_mtx); |
394 | 493 | ||
395 | /* Silently return if the constraints object is not present. */ | 494 | /* Silently return if the constraints object is not present. */ |
396 | if (dev->power.constraints) | 495 | if (dev->power.qos) |
397 | retval = blocking_notifier_chain_unregister( | 496 | retval = blocking_notifier_chain_unregister( |
398 | dev->power.constraints->notifiers, | 497 | dev->power.qos->latency.notifiers, |
399 | notifier); | 498 | notifier); |
400 | 499 | ||
401 | mutex_unlock(&dev_pm_qos_mtx); | 500 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -449,7 +548,8 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
449 | ancestor = ancestor->parent; | 548 | ancestor = ancestor->parent; |
450 | 549 | ||
451 | if (ancestor) | 550 | if (ancestor) |
452 | error = dev_pm_qos_add_request(ancestor, req, value); | 551 | error = dev_pm_qos_add_request(ancestor, req, |
552 | DEV_PM_QOS_LATENCY, value); | ||
453 | 553 | ||
454 | if (error < 0) | 554 | if (error < 0) |
455 | req->dev = NULL; | 555 | req->dev = NULL; |
@@ -459,10 +559,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
459 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); | 559 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); |
460 | 560 | ||
461 | #ifdef CONFIG_PM_RUNTIME | 561 | #ifdef CONFIG_PM_RUNTIME |
462 | static void __dev_pm_qos_drop_user_request(struct device *dev) | 562 | static void __dev_pm_qos_drop_user_request(struct device *dev, |
563 | enum dev_pm_qos_req_type type) | ||
463 | { | 564 | { |
464 | dev_pm_qos_remove_request(dev->power.pq_req); | 565 | switch(type) { |
465 | dev->power.pq_req = NULL; | 566 | case DEV_PM_QOS_LATENCY: |
567 | dev_pm_qos_remove_request(dev->power.qos->latency_req); | ||
568 | dev->power.qos->latency_req = NULL; | ||
569 | break; | ||
570 | case DEV_PM_QOS_FLAGS: | ||
571 | dev_pm_qos_remove_request(dev->power.qos->flags_req); | ||
572 | dev->power.qos->flags_req = NULL; | ||
573 | break; | ||
574 | } | ||
466 | } | 575 | } |
467 | 576 | ||
468 | /** | 577 | /** |
@@ -478,21 +587,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
478 | if (!device_is_registered(dev) || value < 0) | 587 | if (!device_is_registered(dev) || value < 0) |
479 | return -EINVAL; | 588 | return -EINVAL; |
480 | 589 | ||
481 | if (dev->power.pq_req) | 590 | if (dev->power.qos && dev->power.qos->latency_req) |
482 | return -EEXIST; | 591 | return -EEXIST; |
483 | 592 | ||
484 | req = kzalloc(sizeof(*req), GFP_KERNEL); | 593 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
485 | if (!req) | 594 | if (!req) |
486 | return -ENOMEM; | 595 | return -ENOMEM; |
487 | 596 | ||
488 | ret = dev_pm_qos_add_request(dev, req, value); | 597 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); |
489 | if (ret < 0) | 598 | if (ret < 0) |
490 | return ret; | 599 | return ret; |
491 | 600 | ||
492 | dev->power.pq_req = req; | 601 | dev->power.qos->latency_req = req; |
493 | ret = pm_qos_sysfs_add(dev); | 602 | ret = pm_qos_sysfs_add_latency(dev); |
494 | if (ret) | 603 | if (ret) |
495 | __dev_pm_qos_drop_user_request(dev); | 604 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
496 | 605 | ||
497 | return ret; | 606 | return ret; |
498 | } | 607 | } |
@@ -504,10 +613,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | |||
504 | */ | 613 | */ |
505 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 614 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
506 | { | 615 | { |
507 | if (dev->power.pq_req) { | 616 | if (dev->power.qos && dev->power.qos->latency_req) { |
508 | pm_qos_sysfs_remove(dev); | 617 | pm_qos_sysfs_remove_latency(dev); |
509 | __dev_pm_qos_drop_user_request(dev); | 618 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
510 | } | 619 | } |
511 | } | 620 | } |
512 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 621 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
622 | |||
623 | /** | ||
624 | * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. | ||
625 | * @dev: Device whose PM QoS flags are to be exposed to user space. | ||
626 | * @val: Initial values of the flags. | ||
627 | */ | ||
628 | int dev_pm_qos_expose_flags(struct device *dev, s32 val) | ||
629 | { | ||
630 | struct dev_pm_qos_request *req; | ||
631 | int ret; | ||
632 | |||
633 | if (!device_is_registered(dev)) | ||
634 | return -EINVAL; | ||
635 | |||
636 | if (dev->power.qos && dev->power.qos->flags_req) | ||
637 | return -EEXIST; | ||
638 | |||
639 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
640 | if (!req) | ||
641 | return -ENOMEM; | ||
642 | |||
643 | pm_runtime_get_sync(dev); | ||
644 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); | ||
645 | if (ret < 0) | ||
646 | goto fail; | ||
647 | |||
648 | dev->power.qos->flags_req = req; | ||
649 | ret = pm_qos_sysfs_add_flags(dev); | ||
650 | if (ret) | ||
651 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
652 | |||
653 | fail: | ||
654 | pm_runtime_put(dev); | ||
655 | return ret; | ||
656 | } | ||
657 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | ||
658 | |||
659 | /** | ||
660 | * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. | ||
661 | * @dev: Device whose PM QoS flags are to be hidden from user space. | ||
662 | */ | ||
663 | void dev_pm_qos_hide_flags(struct device *dev) | ||
664 | { | ||
665 | if (dev->power.qos && dev->power.qos->flags_req) { | ||
666 | pm_qos_sysfs_remove_flags(dev); | ||
667 | pm_runtime_get_sync(dev); | ||
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
669 | pm_runtime_put(dev); | ||
670 | } | ||
671 | } | ||
672 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | ||
673 | |||
674 | /** | ||
675 | * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. | ||
676 | * @dev: Device to update the PM QoS flags request for. | ||
677 | * @mask: Flags to set/clear. | ||
678 | * @set: Whether to set or clear the flags (true means set). | ||
679 | */ | ||
680 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) | ||
681 | { | ||
682 | s32 value; | ||
683 | int ret; | ||
684 | |||
685 | if (!dev->power.qos || !dev->power.qos->flags_req) | ||
686 | return -EINVAL; | ||
687 | |||
688 | pm_runtime_get_sync(dev); | ||
689 | mutex_lock(&dev_pm_qos_mtx); | ||
690 | |||
691 | value = dev_pm_qos_requested_flags(dev); | ||
692 | if (set) | ||
693 | value |= mask; | ||
694 | else | ||
695 | value &= ~mask; | ||
696 | |||
697 | ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); | ||
698 | |||
699 | mutex_unlock(&dev_pm_qos_mtx); | ||
700 | pm_runtime_put(dev); | ||
701 | |||
702 | return ret; | ||
703 | } | ||
513 | #endif /* CONFIG_PM_RUNTIME */ | 704 | #endif /* CONFIG_PM_RUNTIME */ |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index b91dc6f1e914..50d16e3cb0a9 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | |||
221 | static ssize_t pm_qos_latency_show(struct device *dev, | 221 | static ssize_t pm_qos_latency_show(struct device *dev, |
222 | struct device_attribute *attr, char *buf) | 222 | struct device_attribute *attr, char *buf) |
223 | { | 223 | { |
224 | return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); | 224 | return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); |
225 | } | 225 | } |
226 | 226 | ||
227 | static ssize_t pm_qos_latency_store(struct device *dev, | 227 | static ssize_t pm_qos_latency_store(struct device *dev, |
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev, | |||
237 | if (value < 0) | 237 | if (value < 0) |
238 | return -EINVAL; | 238 | return -EINVAL; |
239 | 239 | ||
240 | ret = dev_pm_qos_update_request(dev->power.pq_req, value); | 240 | ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); |
241 | return ret < 0 ? ret : n; | 241 | return ret < 0 ? ret : n; |
242 | } | 242 | } |
243 | 243 | ||
244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, | 244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, |
245 | pm_qos_latency_show, pm_qos_latency_store); | 245 | pm_qos_latency_show, pm_qos_latency_store); |
246 | |||
247 | static ssize_t pm_qos_no_power_off_show(struct device *dev, | ||
248 | struct device_attribute *attr, | ||
249 | char *buf) | ||
250 | { | ||
251 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
252 | & PM_QOS_FLAG_NO_POWER_OFF)); | ||
253 | } | ||
254 | |||
255 | static ssize_t pm_qos_no_power_off_store(struct device *dev, | ||
256 | struct device_attribute *attr, | ||
257 | const char *buf, size_t n) | ||
258 | { | ||
259 | int ret; | ||
260 | |||
261 | if (kstrtoint(buf, 0, &ret)) | ||
262 | return -EINVAL; | ||
263 | |||
264 | if (ret != 0 && ret != 1) | ||
265 | return -EINVAL; | ||
266 | |||
267 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); | ||
268 | return ret < 0 ? ret : n; | ||
269 | } | ||
270 | |||
271 | static DEVICE_ATTR(pm_qos_no_power_off, 0644, | ||
272 | pm_qos_no_power_off_show, pm_qos_no_power_off_store); | ||
273 | |||
274 | static ssize_t pm_qos_remote_wakeup_show(struct device *dev, | ||
275 | struct device_attribute *attr, | ||
276 | char *buf) | ||
277 | { | ||
278 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
279 | & PM_QOS_FLAG_REMOTE_WAKEUP)); | ||
280 | } | ||
281 | |||
282 | static ssize_t pm_qos_remote_wakeup_store(struct device *dev, | ||
283 | struct device_attribute *attr, | ||
284 | const char *buf, size_t n) | ||
285 | { | ||
286 | int ret; | ||
287 | |||
288 | if (kstrtoint(buf, 0, &ret)) | ||
289 | return -EINVAL; | ||
290 | |||
291 | if (ret != 0 && ret != 1) | ||
292 | return -EINVAL; | ||
293 | |||
294 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret); | ||
295 | return ret < 0 ? ret : n; | ||
296 | } | ||
297 | |||
298 | static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, | ||
299 | pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store); | ||
246 | #endif /* CONFIG_PM_RUNTIME */ | 300 | #endif /* CONFIG_PM_RUNTIME */ |
247 | 301 | ||
248 | #ifdef CONFIG_PM_SLEEP | 302 | #ifdef CONFIG_PM_SLEEP |
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = { | |||
564 | .attrs = runtime_attrs, | 618 | .attrs = runtime_attrs, |
565 | }; | 619 | }; |
566 | 620 | ||
567 | static struct attribute *pm_qos_attrs[] = { | 621 | static struct attribute *pm_qos_latency_attrs[] = { |
568 | #ifdef CONFIG_PM_RUNTIME | 622 | #ifdef CONFIG_PM_RUNTIME |
569 | &dev_attr_pm_qos_resume_latency_us.attr, | 623 | &dev_attr_pm_qos_resume_latency_us.attr, |
570 | #endif /* CONFIG_PM_RUNTIME */ | 624 | #endif /* CONFIG_PM_RUNTIME */ |
571 | NULL, | 625 | NULL, |
572 | }; | 626 | }; |
573 | static struct attribute_group pm_qos_attr_group = { | 627 | static struct attribute_group pm_qos_latency_attr_group = { |
574 | .name = power_group_name, | 628 | .name = power_group_name, |
575 | .attrs = pm_qos_attrs, | 629 | .attrs = pm_qos_latency_attrs, |
630 | }; | ||
631 | |||
632 | static struct attribute *pm_qos_flags_attrs[] = { | ||
633 | #ifdef CONFIG_PM_RUNTIME | ||
634 | &dev_attr_pm_qos_no_power_off.attr, | ||
635 | &dev_attr_pm_qos_remote_wakeup.attr, | ||
636 | #endif /* CONFIG_PM_RUNTIME */ | ||
637 | NULL, | ||
638 | }; | ||
639 | static struct attribute_group pm_qos_flags_attr_group = { | ||
640 | .name = power_group_name, | ||
641 | .attrs = pm_qos_flags_attrs, | ||
576 | }; | 642 | }; |
577 | 643 | ||
578 | int dpm_sysfs_add(struct device *dev) | 644 | int dpm_sysfs_add(struct device *dev) |
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev) | |||
615 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | 681 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |
616 | } | 682 | } |
617 | 683 | ||
618 | int pm_qos_sysfs_add(struct device *dev) | 684 | int pm_qos_sysfs_add_latency(struct device *dev) |
685 | { | ||
686 | return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
687 | } | ||
688 | |||
689 | void pm_qos_sysfs_remove_latency(struct device *dev) | ||
690 | { | ||
691 | sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
692 | } | ||
693 | |||
694 | int pm_qos_sysfs_add_flags(struct device *dev) | ||
619 | { | 695 | { |
620 | return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); | 696 | return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); |
621 | } | 697 | } |
622 | 698 | ||
623 | void pm_qos_sysfs_remove(struct device *dev) | 699 | void pm_qos_sysfs_remove_flags(struct device *dev) |
624 | { | 700 | { |
625 | sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); | 701 | sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); |
626 | } | 702 | } |
627 | 703 | ||
628 | void rpm_sysfs_remove(struct device *dev) | 704 | void rpm_sysfs_remove(struct device *dev) |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 5961e6415f08..a0b3661d90b0 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ | |||
76 | help | 76 | help |
77 | This adds the CPUFreq driver for Samsung EXYNOS5250 | 77 | This adds the CPUFreq driver for Samsung EXYNOS5250 |
78 | SoC. | 78 | SoC. |
79 | |||
80 | config ARM_SPEAR_CPUFREQ | ||
81 | bool "SPEAr CPUFreq support" | ||
82 | depends on PLAT_SPEAR | ||
83 | default y | ||
84 | help | ||
85 | This adds the CPUFreq driver support for SPEAr SOCs. | ||
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 1bc90e1306d8..1f254ec087c1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o | |||
7 | obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o | 7 | obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o |
8 | obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o | 8 | obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o |
9 | obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o | 9 | obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o |
10 | obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | 10 | obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o |
11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | 11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o |
12 | 12 | ||
13 | # CPUfreq cross-arch helpers | 13 | # CPUfreq cross-arch helpers |
14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | 14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o |
@@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o | |||
50 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o | 50 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o |
51 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o | 51 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o |
52 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o | 52 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o |
53 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o | ||
53 | 54 | ||
54 | ################################################################################## | 55 | ################################################################################## |
55 | # PowerPC platform drivers | 56 | # PowerPC platform drivers |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index e9158278c71d..52bf36d599f5 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
174 | .attr = cpu0_cpufreq_attr, | 174 | .attr = cpu0_cpufreq_attr, |
175 | }; | 175 | }; |
176 | 176 | ||
177 | static int __devinit cpu0_cpufreq_driver_init(void) | 177 | static int cpu0_cpufreq_driver_init(void) |
178 | { | 178 | { |
179 | struct device_node *np; | 179 | struct device_node *np; |
180 | int ret; | 180 | int ret; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fb8a5279c5d8..1f93dbd72355 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
18 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 21 | #include <linux/module.h> |
20 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void) | |||
127 | pure_initcall(init_cpufreq_transition_notifier_list); | 129 | pure_initcall(init_cpufreq_transition_notifier_list); |
128 | 130 | ||
129 | static int off __read_mostly; | 131 | static int off __read_mostly; |
130 | int cpufreq_disabled(void) | 132 | static int cpufreq_disabled(void) |
131 | { | 133 | { |
132 | return off; | 134 | return off; |
133 | } | 135 | } |
@@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
402 | static ssize_t store_##file_name \ | 404 | static ssize_t store_##file_name \ |
403 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ | 405 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ |
404 | { \ | 406 | { \ |
405 | unsigned int ret = -EINVAL; \ | 407 | unsigned int ret; \ |
406 | struct cpufreq_policy new_policy; \ | 408 | struct cpufreq_policy new_policy; \ |
407 | \ | 409 | \ |
408 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ | 410 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ |
@@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) | |||
445 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | 447 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) |
446 | return sprintf(buf, "performance\n"); | 448 | return sprintf(buf, "performance\n"); |
447 | else if (policy->governor) | 449 | else if (policy->governor) |
448 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", | 450 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", |
449 | policy->governor->name); | 451 | policy->governor->name); |
450 | return -EINVAL; | 452 | return -EINVAL; |
451 | } | 453 | } |
@@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) | |||
457 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | 459 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, |
458 | const char *buf, size_t count) | 460 | const char *buf, size_t count) |
459 | { | 461 | { |
460 | unsigned int ret = -EINVAL; | 462 | unsigned int ret; |
461 | char str_governor[16]; | 463 | char str_governor[16]; |
462 | struct cpufreq_policy new_policy; | 464 | struct cpufreq_policy new_policy; |
463 | 465 | ||
@@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | |||
491 | */ | 493 | */ |
492 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) | 494 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) |
493 | { | 495 | { |
494 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); | 496 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); |
495 | } | 497 | } |
496 | 498 | ||
497 | /** | 499 | /** |
@@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | |||
512 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) | 514 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) |
513 | - (CPUFREQ_NAME_LEN + 2))) | 515 | - (CPUFREQ_NAME_LEN + 2))) |
514 | goto out; | 516 | goto out; |
515 | i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); | 517 | i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); |
516 | } | 518 | } |
517 | out: | 519 | out: |
518 | i += sprintf(&buf[i], "\n"); | 520 | i += sprintf(&buf[i], "\n"); |
@@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) | |||
581 | } | 583 | } |
582 | 584 | ||
583 | /** | 585 | /** |
584 | * show_scaling_driver - show the current cpufreq HW/BIOS limitation | 586 | * show_bios_limit - show the current cpufreq HW/BIOS limitation |
585 | */ | 587 | */ |
586 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) | 588 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) |
587 | { | 589 | { |
@@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1468 | unsigned int relation) | 1470 | unsigned int relation) |
1469 | { | 1471 | { |
1470 | int retval = -EINVAL; | 1472 | int retval = -EINVAL; |
1473 | unsigned int old_target_freq = target_freq; | ||
1471 | 1474 | ||
1472 | if (cpufreq_disabled()) | 1475 | if (cpufreq_disabled()) |
1473 | return -ENODEV; | 1476 | return -ENODEV; |
1474 | 1477 | ||
1475 | pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1478 | /* Make sure that target_freq is within supported range */ |
1476 | target_freq, relation); | 1479 | if (target_freq > policy->max) |
1480 | target_freq = policy->max; | ||
1481 | if (target_freq < policy->min) | ||
1482 | target_freq = policy->min; | ||
1483 | |||
1484 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | ||
1485 | policy->cpu, target_freq, relation, old_target_freq); | ||
1486 | |||
1487 | if (target_freq == policy->cur) | ||
1488 | return 0; | ||
1489 | |||
1477 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1490 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
1478 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1491 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1479 | 1492 | ||
@@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |||
1509 | { | 1522 | { |
1510 | int ret = 0; | 1523 | int ret = 0; |
1511 | 1524 | ||
1525 | if (!(cpu_online(cpu) && cpufreq_driver->getavg)) | ||
1526 | return 0; | ||
1527 | |||
1512 | policy = cpufreq_cpu_get(policy->cpu); | 1528 | policy = cpufreq_cpu_get(policy->cpu); |
1513 | if (!policy) | 1529 | if (!policy) |
1514 | return -EINVAL; | 1530 | return -EINVAL; |
1515 | 1531 | ||
1516 | if (cpu_online(cpu) && cpufreq_driver->getavg) | 1532 | ret = cpufreq_driver->getavg(policy, cpu); |
1517 | ret = cpufreq_driver->getavg(policy, cpu); | ||
1518 | 1533 | ||
1519 | cpufreq_cpu_put(policy); | 1534 | cpufreq_cpu_put(policy); |
1520 | return ret; | 1535 | return ret; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a152af7e1991..64ef737e7e72 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -11,83 +11,30 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/cpufreq.h> | 14 | #include <linux/cpufreq.h> |
18 | #include <linux/cpu.h> | 15 | #include <linux/init.h> |
19 | #include <linux/jiffies.h> | 16 | #include <linux/kernel.h> |
20 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/kobject.h> | ||
19 | #include <linux/module.h> | ||
21 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
22 | #include <linux/hrtimer.h> | 21 | #include <linux/notifier.h> |
23 | #include <linux/tick.h> | 22 | #include <linux/percpu-defs.h> |
24 | #include <linux/ktime.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/sched.h> | 24 | #include <linux/types.h> |
26 | 25 | ||
27 | /* | 26 | #include "cpufreq_governor.h" |
28 | * dbs is used in this file as a shortform for demandbased switching | ||
29 | * It helps to keep variable names smaller, simpler | ||
30 | */ | ||
31 | 27 | ||
28 | /* Conservative governor macors */ | ||
32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 29 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
33 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | 30 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
34 | |||
35 | /* | ||
36 | * The polling frequency of this governor depends on the capability of | ||
37 | * the processor. Default polling frequency is 1000 times the transition | ||
38 | * latency of the processor. The governor will work on any processor with | ||
39 | * transition latency <= 10mS, using appropriate sampling | ||
40 | * rate. | ||
41 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
42 | * this governor will not work. | ||
43 | * All times here are in uS. | ||
44 | */ | ||
45 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
46 | |||
47 | static unsigned int min_sampling_rate; | ||
48 | |||
49 | #define LATENCY_MULTIPLIER (1000) | ||
50 | #define MIN_LATENCY_MULTIPLIER (100) | ||
51 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 31 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
52 | #define MAX_SAMPLING_DOWN_FACTOR (10) | 32 | #define MAX_SAMPLING_DOWN_FACTOR (10) |
53 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
54 | |||
55 | static void do_dbs_timer(struct work_struct *work); | ||
56 | |||
57 | struct cpu_dbs_info_s { | ||
58 | cputime64_t prev_cpu_idle; | ||
59 | cputime64_t prev_cpu_wall; | ||
60 | cputime64_t prev_cpu_nice; | ||
61 | struct cpufreq_policy *cur_policy; | ||
62 | struct delayed_work work; | ||
63 | unsigned int down_skip; | ||
64 | unsigned int requested_freq; | ||
65 | int cpu; | ||
66 | unsigned int enable:1; | ||
67 | /* | ||
68 | * percpu mutex that serializes governor limit change with | ||
69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
70 | * when user is changing the governor or limits. | ||
71 | */ | ||
72 | struct mutex timer_mutex; | ||
73 | }; | ||
74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | ||
75 | 33 | ||
76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 34 | static struct dbs_data cs_dbs_data; |
35 | static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); | ||
77 | 36 | ||
78 | /* | 37 | static struct cs_dbs_tuners cs_tuners = { |
79 | * dbs_mutex protects dbs_enable in governor start/stop. | ||
80 | */ | ||
81 | static DEFINE_MUTEX(dbs_mutex); | ||
82 | |||
83 | static struct dbs_tuners { | ||
84 | unsigned int sampling_rate; | ||
85 | unsigned int sampling_down_factor; | ||
86 | unsigned int up_threshold; | ||
87 | unsigned int down_threshold; | ||
88 | unsigned int ignore_nice; | ||
89 | unsigned int freq_step; | ||
90 | } dbs_tuners_ins = { | ||
91 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 38 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
92 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | 39 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, |
93 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 40 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
@@ -95,95 +42,121 @@ static struct dbs_tuners { | |||
95 | .freq_step = 5, | 42 | .freq_step = 5, |
96 | }; | 43 | }; |
97 | 44 | ||
98 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | 45 | /* |
46 | * Every sampling_rate, we check, if current idle time is less than 20% | ||
47 | * (default), then we try to increase frequency Every sampling_rate * | ||
48 | * sampling_down_factor, we check, if current idle time is more than 80%, then | ||
49 | * we try to decrease frequency | ||
50 | * | ||
51 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | ||
52 | * happens at minimum steps of 5% (default) of maximum frequency | ||
53 | */ | ||
54 | static void cs_check_cpu(int cpu, unsigned int load) | ||
99 | { | 55 | { |
100 | u64 idle_time; | 56 | struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); |
101 | u64 cur_wall_time; | 57 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; |
102 | u64 busy_time; | 58 | unsigned int freq_target; |
59 | |||
60 | /* | ||
61 | * break out if we 'cannot' reduce the speed as the user might | ||
62 | * want freq_step to be zero | ||
63 | */ | ||
64 | if (cs_tuners.freq_step == 0) | ||
65 | return; | ||
103 | 66 | ||
104 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 67 | /* Check for frequency increase */ |
68 | if (load > cs_tuners.up_threshold) { | ||
69 | dbs_info->down_skip = 0; | ||
105 | 70 | ||
106 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | 71 | /* if we are already at full speed then break out early */ |
107 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | 72 | if (dbs_info->requested_freq == policy->max) |
108 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | 73 | return; |
109 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
110 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
111 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
112 | 74 | ||
113 | idle_time = cur_wall_time - busy_time; | 75 | freq_target = (cs_tuners.freq_step * policy->max) / 100; |
114 | if (wall) | ||
115 | *wall = jiffies_to_usecs(cur_wall_time); | ||
116 | 76 | ||
117 | return jiffies_to_usecs(idle_time); | 77 | /* max freq cannot be less than 100. But who knows.... */ |
78 | if (unlikely(freq_target == 0)) | ||
79 | freq_target = 5; | ||
80 | |||
81 | dbs_info->requested_freq += freq_target; | ||
82 | if (dbs_info->requested_freq > policy->max) | ||
83 | dbs_info->requested_freq = policy->max; | ||
84 | |||
85 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | ||
86 | CPUFREQ_RELATION_H); | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * The optimal frequency is the frequency that is the lowest that can | ||
92 | * support the current CPU usage without triggering the up policy. To be | ||
93 | * safe, we focus 10 points under the threshold. | ||
94 | */ | ||
95 | if (load < (cs_tuners.down_threshold - 10)) { | ||
96 | freq_target = (cs_tuners.freq_step * policy->max) / 100; | ||
97 | |||
98 | dbs_info->requested_freq -= freq_target; | ||
99 | if (dbs_info->requested_freq < policy->min) | ||
100 | dbs_info->requested_freq = policy->min; | ||
101 | |||
102 | /* | ||
103 | * if we cannot reduce the frequency anymore, break out early | ||
104 | */ | ||
105 | if (policy->cur == policy->min) | ||
106 | return; | ||
107 | |||
108 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | ||
109 | CPUFREQ_RELATION_H); | ||
110 | return; | ||
111 | } | ||
118 | } | 112 | } |
119 | 113 | ||
120 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 114 | static void cs_dbs_timer(struct work_struct *work) |
121 | { | 115 | { |
122 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | 116 | struct cs_cpu_dbs_info_s *dbs_info = container_of(work, |
117 | struct cs_cpu_dbs_info_s, cdbs.work.work); | ||
118 | unsigned int cpu = dbs_info->cdbs.cpu; | ||
119 | int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); | ||
123 | 120 | ||
124 | if (idle_time == -1ULL) | 121 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
125 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
126 | else | ||
127 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
128 | 122 | ||
129 | return idle_time; | 123 | dbs_check_cpu(&cs_dbs_data, cpu); |
124 | |||
125 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | ||
126 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | ||
130 | } | 127 | } |
131 | 128 | ||
132 | /* keep track of frequency transitions */ | 129 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
133 | static int | 130 | void *data) |
134 | dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
135 | void *data) | ||
136 | { | 131 | { |
137 | struct cpufreq_freqs *freq = data; | 132 | struct cpufreq_freqs *freq = data; |
138 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, | 133 | struct cs_cpu_dbs_info_s *dbs_info = |
139 | freq->cpu); | 134 | &per_cpu(cs_cpu_dbs_info, freq->cpu); |
140 | |||
141 | struct cpufreq_policy *policy; | 135 | struct cpufreq_policy *policy; |
142 | 136 | ||
143 | if (!this_dbs_info->enable) | 137 | if (!dbs_info->enable) |
144 | return 0; | 138 | return 0; |
145 | 139 | ||
146 | policy = this_dbs_info->cur_policy; | 140 | policy = dbs_info->cdbs.cur_policy; |
147 | 141 | ||
148 | /* | 142 | /* |
149 | * we only care if our internally tracked freq moves outside | 143 | * we only care if our internally tracked freq moves outside the 'valid' |
150 | * the 'valid' ranges of freqency available to us otherwise | 144 | * ranges of freqency available to us otherwise we do not change it |
151 | * we do not change it | ||
152 | */ | 145 | */ |
153 | if (this_dbs_info->requested_freq > policy->max | 146 | if (dbs_info->requested_freq > policy->max |
154 | || this_dbs_info->requested_freq < policy->min) | 147 | || dbs_info->requested_freq < policy->min) |
155 | this_dbs_info->requested_freq = freq->new; | 148 | dbs_info->requested_freq = freq->new; |
156 | 149 | ||
157 | return 0; | 150 | return 0; |
158 | } | 151 | } |
159 | 152 | ||
160 | static struct notifier_block dbs_cpufreq_notifier_block = { | ||
161 | .notifier_call = dbs_cpufreq_notifier | ||
162 | }; | ||
163 | |||
164 | /************************** sysfs interface ************************/ | 153 | /************************** sysfs interface ************************/ |
165 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 154 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
166 | struct attribute *attr, char *buf) | 155 | struct attribute *attr, char *buf) |
167 | { | 156 | { |
168 | return sprintf(buf, "%u\n", min_sampling_rate); | 157 | return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate); |
169 | } | 158 | } |
170 | 159 | ||
171 | define_one_global_ro(sampling_rate_min); | ||
172 | |||
173 | /* cpufreq_conservative Governor Tunables */ | ||
174 | #define show_one(file_name, object) \ | ||
175 | static ssize_t show_##file_name \ | ||
176 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | ||
177 | { \ | ||
178 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | ||
179 | } | ||
180 | show_one(sampling_rate, sampling_rate); | ||
181 | show_one(sampling_down_factor, sampling_down_factor); | ||
182 | show_one(up_threshold, up_threshold); | ||
183 | show_one(down_threshold, down_threshold); | ||
184 | show_one(ignore_nice_load, ignore_nice); | ||
185 | show_one(freq_step, freq_step); | ||
186 | |||
187 | static ssize_t store_sampling_down_factor(struct kobject *a, | 160 | static ssize_t store_sampling_down_factor(struct kobject *a, |
188 | struct attribute *b, | 161 | struct attribute *b, |
189 | const char *buf, size_t count) | 162 | const char *buf, size_t count) |
@@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
195 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 168 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
196 | return -EINVAL; | 169 | return -EINVAL; |
197 | 170 | ||
198 | dbs_tuners_ins.sampling_down_factor = input; | 171 | cs_tuners.sampling_down_factor = input; |
199 | return count; | 172 | return count; |
200 | } | 173 | } |
201 | 174 | ||
@@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
209 | if (ret != 1) | 182 | if (ret != 1) |
210 | return -EINVAL; | 183 | return -EINVAL; |
211 | 184 | ||
212 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 185 | cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); |
213 | return count; | 186 | return count; |
214 | } | 187 | } |
215 | 188 | ||
@@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
220 | int ret; | 193 | int ret; |
221 | ret = sscanf(buf, "%u", &input); | 194 | ret = sscanf(buf, "%u", &input); |
222 | 195 | ||
223 | if (ret != 1 || input > 100 || | 196 | if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) |
224 | input <= dbs_tuners_ins.down_threshold) | ||
225 | return -EINVAL; | 197 | return -EINVAL; |
226 | 198 | ||
227 | dbs_tuners_ins.up_threshold = input; | 199 | cs_tuners.up_threshold = input; |
228 | return count; | 200 | return count; |
229 | } | 201 | } |
230 | 202 | ||
@@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, | |||
237 | 209 | ||
238 | /* cannot be lower than 11 otherwise freq will not fall */ | 210 | /* cannot be lower than 11 otherwise freq will not fall */ |
239 | if (ret != 1 || input < 11 || input > 100 || | 211 | if (ret != 1 || input < 11 || input > 100 || |
240 | input >= dbs_tuners_ins.up_threshold) | 212 | input >= cs_tuners.up_threshold) |
241 | return -EINVAL; | 213 | return -EINVAL; |
242 | 214 | ||
243 | dbs_tuners_ins.down_threshold = input; | 215 | cs_tuners.down_threshold = input; |
244 | return count; | 216 | return count; |
245 | } | 217 | } |
246 | 218 | ||
247 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 219 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, |
248 | const char *buf, size_t count) | 220 | const char *buf, size_t count) |
249 | { | 221 | { |
250 | unsigned int input; | 222 | unsigned int input, j; |
251 | int ret; | 223 | int ret; |
252 | 224 | ||
253 | unsigned int j; | ||
254 | |||
255 | ret = sscanf(buf, "%u", &input); | 225 | ret = sscanf(buf, "%u", &input); |
256 | if (ret != 1) | 226 | if (ret != 1) |
257 | return -EINVAL; | 227 | return -EINVAL; |
@@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
259 | if (input > 1) | 229 | if (input > 1) |
260 | input = 1; | 230 | input = 1; |
261 | 231 | ||
262 | if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ | 232 | if (input == cs_tuners.ignore_nice) /* nothing to do */ |
263 | return count; | 233 | return count; |
264 | 234 | ||
265 | dbs_tuners_ins.ignore_nice = input; | 235 | cs_tuners.ignore_nice = input; |
266 | 236 | ||
267 | /* we need to re-evaluate prev_cpu_idle */ | 237 | /* we need to re-evaluate prev_cpu_idle */ |
268 | for_each_online_cpu(j) { | 238 | for_each_online_cpu(j) { |
269 | struct cpu_dbs_info_s *dbs_info; | 239 | struct cs_cpu_dbs_info_s *dbs_info; |
270 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 240 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
271 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 241 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
272 | &dbs_info->prev_cpu_wall); | 242 | &dbs_info->cdbs.prev_cpu_wall); |
273 | if (dbs_tuners_ins.ignore_nice) | 243 | if (cs_tuners.ignore_nice) |
274 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 244 | dbs_info->cdbs.prev_cpu_nice = |
245 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
275 | } | 246 | } |
276 | return count; | 247 | return count; |
277 | } | 248 | } |
@@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, | |||
289 | if (input > 100) | 260 | if (input > 100) |
290 | input = 100; | 261 | input = 100; |
291 | 262 | ||
292 | /* no need to test here if freq_step is zero as the user might actually | 263 | /* |
293 | * want this, they would be crazy though :) */ | 264 | * no need to test here if freq_step is zero as the user might actually |
294 | dbs_tuners_ins.freq_step = input; | 265 | * want this, they would be crazy though :) |
266 | */ | ||
267 | cs_tuners.freq_step = input; | ||
295 | return count; | 268 | return count; |
296 | } | 269 | } |
297 | 270 | ||
271 | show_one(cs, sampling_rate, sampling_rate); | ||
272 | show_one(cs, sampling_down_factor, sampling_down_factor); | ||
273 | show_one(cs, up_threshold, up_threshold); | ||
274 | show_one(cs, down_threshold, down_threshold); | ||
275 | show_one(cs, ignore_nice_load, ignore_nice); | ||
276 | show_one(cs, freq_step, freq_step); | ||
277 | |||
298 | define_one_global_rw(sampling_rate); | 278 | define_one_global_rw(sampling_rate); |
299 | define_one_global_rw(sampling_down_factor); | 279 | define_one_global_rw(sampling_down_factor); |
300 | define_one_global_rw(up_threshold); | 280 | define_one_global_rw(up_threshold); |
301 | define_one_global_rw(down_threshold); | 281 | define_one_global_rw(down_threshold); |
302 | define_one_global_rw(ignore_nice_load); | 282 | define_one_global_rw(ignore_nice_load); |
303 | define_one_global_rw(freq_step); | 283 | define_one_global_rw(freq_step); |
284 | define_one_global_ro(sampling_rate_min); | ||
304 | 285 | ||
305 | static struct attribute *dbs_attributes[] = { | 286 | static struct attribute *dbs_attributes[] = { |
306 | &sampling_rate_min.attr, | 287 | &sampling_rate_min.attr, |
@@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = { | |||
313 | NULL | 294 | NULL |
314 | }; | 295 | }; |
315 | 296 | ||
316 | static struct attribute_group dbs_attr_group = { | 297 | static struct attribute_group cs_attr_group = { |
317 | .attrs = dbs_attributes, | 298 | .attrs = dbs_attributes, |
318 | .name = "conservative", | 299 | .name = "conservative", |
319 | }; | 300 | }; |
320 | 301 | ||
321 | /************************** sysfs end ************************/ | 302 | /************************** sysfs end ************************/ |
322 | 303 | ||
323 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 304 | define_get_cpu_dbs_routines(cs_cpu_dbs_info); |
324 | { | ||
325 | unsigned int load = 0; | ||
326 | unsigned int max_load = 0; | ||
327 | unsigned int freq_target; | ||
328 | |||
329 | struct cpufreq_policy *policy; | ||
330 | unsigned int j; | ||
331 | |||
332 | policy = this_dbs_info->cur_policy; | ||
333 | |||
334 | /* | ||
335 | * Every sampling_rate, we check, if current idle time is less | ||
336 | * than 20% (default), then we try to increase frequency | ||
337 | * Every sampling_rate*sampling_down_factor, we check, if current | ||
338 | * idle time is more than 80%, then we try to decrease frequency | ||
339 | * | ||
340 | * Any frequency increase takes it to the maximum frequency. | ||
341 | * Frequency reduction happens at minimum steps of | ||
342 | * 5% (default) of maximum frequency | ||
343 | */ | ||
344 | |||
345 | /* Get Absolute Load */ | ||
346 | for_each_cpu(j, policy->cpus) { | ||
347 | struct cpu_dbs_info_s *j_dbs_info; | ||
348 | cputime64_t cur_wall_time, cur_idle_time; | ||
349 | unsigned int idle_time, wall_time; | ||
350 | |||
351 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | ||
352 | |||
353 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
354 | |||
355 | wall_time = (unsigned int) | ||
356 | (cur_wall_time - j_dbs_info->prev_cpu_wall); | ||
357 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
358 | |||
359 | idle_time = (unsigned int) | ||
360 | (cur_idle_time - j_dbs_info->prev_cpu_idle); | ||
361 | j_dbs_info->prev_cpu_idle = cur_idle_time; | ||
362 | |||
363 | if (dbs_tuners_ins.ignore_nice) { | ||
364 | u64 cur_nice; | ||
365 | unsigned long cur_nice_jiffies; | ||
366 | |||
367 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
368 | j_dbs_info->prev_cpu_nice; | ||
369 | /* | ||
370 | * Assumption: nice time between sampling periods will | ||
371 | * be less than 2^32 jiffies for 32 bit sys | ||
372 | */ | ||
373 | cur_nice_jiffies = (unsigned long) | ||
374 | cputime64_to_jiffies64(cur_nice); | ||
375 | |||
376 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
377 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
378 | } | ||
379 | 305 | ||
380 | if (unlikely(!wall_time || wall_time < idle_time)) | 306 | static struct notifier_block cs_cpufreq_notifier_block = { |
381 | continue; | 307 | .notifier_call = dbs_cpufreq_notifier, |
382 | 308 | }; | |
383 | load = 100 * (wall_time - idle_time) / wall_time; | ||
384 | |||
385 | if (load > max_load) | ||
386 | max_load = load; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * break out if we 'cannot' reduce the speed as the user might | ||
391 | * want freq_step to be zero | ||
392 | */ | ||
393 | if (dbs_tuners_ins.freq_step == 0) | ||
394 | return; | ||
395 | |||
396 | /* Check for frequency increase */ | ||
397 | if (max_load > dbs_tuners_ins.up_threshold) { | ||
398 | this_dbs_info->down_skip = 0; | ||
399 | |||
400 | /* if we are already at full speed then break out early */ | ||
401 | if (this_dbs_info->requested_freq == policy->max) | ||
402 | return; | ||
403 | |||
404 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | ||
405 | |||
406 | /* max freq cannot be less than 100. But who knows.... */ | ||
407 | if (unlikely(freq_target == 0)) | ||
408 | freq_target = 5; | ||
409 | |||
410 | this_dbs_info->requested_freq += freq_target; | ||
411 | if (this_dbs_info->requested_freq > policy->max) | ||
412 | this_dbs_info->requested_freq = policy->max; | ||
413 | |||
414 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | ||
415 | CPUFREQ_RELATION_H); | ||
416 | return; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * The optimal frequency is the frequency that is the lowest that | ||
421 | * can support the current CPU usage without triggering the up | ||
422 | * policy. To be safe, we focus 10 points under the threshold. | ||
423 | */ | ||
424 | if (max_load < (dbs_tuners_ins.down_threshold - 10)) { | ||
425 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | ||
426 | |||
427 | this_dbs_info->requested_freq -= freq_target; | ||
428 | if (this_dbs_info->requested_freq < policy->min) | ||
429 | this_dbs_info->requested_freq = policy->min; | ||
430 | |||
431 | /* | ||
432 | * if we cannot reduce the frequency anymore, break out early | ||
433 | */ | ||
434 | if (policy->cur == policy->min) | ||
435 | return; | ||
436 | |||
437 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | ||
438 | CPUFREQ_RELATION_H); | ||
439 | return; | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static void do_dbs_timer(struct work_struct *work) | ||
444 | { | ||
445 | struct cpu_dbs_info_s *dbs_info = | ||
446 | container_of(work, struct cpu_dbs_info_s, work.work); | ||
447 | unsigned int cpu = dbs_info->cpu; | ||
448 | |||
449 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
450 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
451 | |||
452 | delay -= jiffies % delay; | ||
453 | |||
454 | mutex_lock(&dbs_info->timer_mutex); | ||
455 | |||
456 | dbs_check_cpu(dbs_info); | ||
457 | |||
458 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); | ||
459 | mutex_unlock(&dbs_info->timer_mutex); | ||
460 | } | ||
461 | |||
462 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | ||
463 | { | ||
464 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
465 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
466 | delay -= jiffies % delay; | ||
467 | 309 | ||
468 | dbs_info->enable = 1; | 310 | static struct cs_ops cs_ops = { |
469 | INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); | 311 | .notifier_block = &cs_cpufreq_notifier_block, |
470 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); | 312 | }; |
471 | } | ||
472 | 313 | ||
473 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 314 | static struct dbs_data cs_dbs_data = { |
474 | { | 315 | .governor = GOV_CONSERVATIVE, |
475 | dbs_info->enable = 0; | 316 | .attr_group = &cs_attr_group, |
476 | cancel_delayed_work_sync(&dbs_info->work); | 317 | .tuners = &cs_tuners, |
477 | } | 318 | .get_cpu_cdbs = get_cpu_cdbs, |
319 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | ||
320 | .gov_dbs_timer = cs_dbs_timer, | ||
321 | .gov_check_cpu = cs_check_cpu, | ||
322 | .gov_ops = &cs_ops, | ||
323 | }; | ||
478 | 324 | ||
479 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 325 | static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
480 | unsigned int event) | 326 | unsigned int event) |
481 | { | 327 | { |
482 | unsigned int cpu = policy->cpu; | 328 | return cpufreq_governor_dbs(&cs_dbs_data, policy, event); |
483 | struct cpu_dbs_info_s *this_dbs_info; | ||
484 | unsigned int j; | ||
485 | int rc; | ||
486 | |||
487 | this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); | ||
488 | |||
489 | switch (event) { | ||
490 | case CPUFREQ_GOV_START: | ||
491 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
492 | return -EINVAL; | ||
493 | |||
494 | mutex_lock(&dbs_mutex); | ||
495 | |||
496 | for_each_cpu(j, policy->cpus) { | ||
497 | struct cpu_dbs_info_s *j_dbs_info; | ||
498 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | ||
499 | j_dbs_info->cur_policy = policy; | ||
500 | |||
501 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | ||
502 | &j_dbs_info->prev_cpu_wall); | ||
503 | if (dbs_tuners_ins.ignore_nice) | ||
504 | j_dbs_info->prev_cpu_nice = | ||
505 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
506 | } | ||
507 | this_dbs_info->cpu = cpu; | ||
508 | this_dbs_info->down_skip = 0; | ||
509 | this_dbs_info->requested_freq = policy->cur; | ||
510 | |||
511 | mutex_init(&this_dbs_info->timer_mutex); | ||
512 | dbs_enable++; | ||
513 | /* | ||
514 | * Start the timerschedule work, when this governor | ||
515 | * is used for first time | ||
516 | */ | ||
517 | if (dbs_enable == 1) { | ||
518 | unsigned int latency; | ||
519 | /* policy latency is in nS. Convert it to uS first */ | ||
520 | latency = policy->cpuinfo.transition_latency / 1000; | ||
521 | if (latency == 0) | ||
522 | latency = 1; | ||
523 | |||
524 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
525 | &dbs_attr_group); | ||
526 | if (rc) { | ||
527 | mutex_unlock(&dbs_mutex); | ||
528 | return rc; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * conservative does not implement micro like ondemand | ||
533 | * governor, thus we are bound to jiffes/HZ | ||
534 | */ | ||
535 | min_sampling_rate = | ||
536 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | ||
537 | /* Bring kernel and HW constraints together */ | ||
538 | min_sampling_rate = max(min_sampling_rate, | ||
539 | MIN_LATENCY_MULTIPLIER * latency); | ||
540 | dbs_tuners_ins.sampling_rate = | ||
541 | max(min_sampling_rate, | ||
542 | latency * LATENCY_MULTIPLIER); | ||
543 | |||
544 | cpufreq_register_notifier( | ||
545 | &dbs_cpufreq_notifier_block, | ||
546 | CPUFREQ_TRANSITION_NOTIFIER); | ||
547 | } | ||
548 | mutex_unlock(&dbs_mutex); | ||
549 | |||
550 | dbs_timer_init(this_dbs_info); | ||
551 | |||
552 | break; | ||
553 | |||
554 | case CPUFREQ_GOV_STOP: | ||
555 | dbs_timer_exit(this_dbs_info); | ||
556 | |||
557 | mutex_lock(&dbs_mutex); | ||
558 | dbs_enable--; | ||
559 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
560 | |||
561 | /* | ||
562 | * Stop the timerschedule work, when this governor | ||
563 | * is used for first time | ||
564 | */ | ||
565 | if (dbs_enable == 0) | ||
566 | cpufreq_unregister_notifier( | ||
567 | &dbs_cpufreq_notifier_block, | ||
568 | CPUFREQ_TRANSITION_NOTIFIER); | ||
569 | |||
570 | mutex_unlock(&dbs_mutex); | ||
571 | if (!dbs_enable) | ||
572 | sysfs_remove_group(cpufreq_global_kobject, | ||
573 | &dbs_attr_group); | ||
574 | |||
575 | break; | ||
576 | |||
577 | case CPUFREQ_GOV_LIMITS: | ||
578 | mutex_lock(&this_dbs_info->timer_mutex); | ||
579 | if (policy->max < this_dbs_info->cur_policy->cur) | ||
580 | __cpufreq_driver_target( | ||
581 | this_dbs_info->cur_policy, | ||
582 | policy->max, CPUFREQ_RELATION_H); | ||
583 | else if (policy->min > this_dbs_info->cur_policy->cur) | ||
584 | __cpufreq_driver_target( | ||
585 | this_dbs_info->cur_policy, | ||
586 | policy->min, CPUFREQ_RELATION_L); | ||
587 | dbs_check_cpu(this_dbs_info); | ||
588 | mutex_unlock(&this_dbs_info->timer_mutex); | ||
589 | |||
590 | break; | ||
591 | } | ||
592 | return 0; | ||
593 | } | 329 | } |
594 | 330 | ||
595 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | 331 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE |
@@ -597,13 +333,14 @@ static | |||
597 | #endif | 333 | #endif |
598 | struct cpufreq_governor cpufreq_gov_conservative = { | 334 | struct cpufreq_governor cpufreq_gov_conservative = { |
599 | .name = "conservative", | 335 | .name = "conservative", |
600 | .governor = cpufreq_governor_dbs, | 336 | .governor = cs_cpufreq_governor_dbs, |
601 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 337 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
602 | .owner = THIS_MODULE, | 338 | .owner = THIS_MODULE, |
603 | }; | 339 | }; |
604 | 340 | ||
605 | static int __init cpufreq_gov_dbs_init(void) | 341 | static int __init cpufreq_gov_dbs_init(void) |
606 | { | 342 | { |
343 | mutex_init(&cs_dbs_data.mutex); | ||
607 | return cpufreq_register_governor(&cpufreq_gov_conservative); | 344 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
608 | } | 345 | } |
609 | 346 | ||
@@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void) | |||
612 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 349 | cpufreq_unregister_governor(&cpufreq_gov_conservative); |
613 | } | 350 | } |
614 | 351 | ||
615 | |||
616 | MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); | 352 | MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); |
617 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " | 353 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " |
618 | "Low Latency Frequency Transition capable processors " | 354 | "Low Latency Frequency Transition capable processors " |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c new file mode 100644 index 000000000000..6c5f1d383cdc --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/cpufreq_governor.c | ||
3 | * | ||
4 | * CPUFREQ governors common code | ||
5 | * | ||
6 | * Copyright (C) 2001 Russell King | ||
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | ||
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | ||
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
19 | #include <asm/cputime.h> | ||
20 | #include <linux/cpufreq.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/kernel_stat.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/tick.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | |||
29 | #include "cpufreq_governor.h" | ||
30 | |||
31 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | ||
32 | { | ||
33 | u64 idle_time; | ||
34 | u64 cur_wall_time; | ||
35 | u64 busy_time; | ||
36 | |||
37 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | ||
38 | |||
39 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | ||
40 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | ||
41 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | ||
42 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
43 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
44 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
45 | |||
46 | idle_time = cur_wall_time - busy_time; | ||
47 | if (wall) | ||
48 | *wall = cputime_to_usecs(cur_wall_time); | ||
49 | |||
50 | return cputime_to_usecs(idle_time); | ||
51 | } | ||
52 | |||
53 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) | ||
54 | { | ||
55 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | ||
56 | |||
57 | if (idle_time == -1ULL) | ||
58 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
59 | else | ||
60 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
61 | |||
62 | return idle_time; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); | ||
65 | |||
66 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | ||
67 | { | ||
68 | struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
69 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
70 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
71 | struct cpufreq_policy *policy; | ||
72 | unsigned int max_load = 0; | ||
73 | unsigned int ignore_nice; | ||
74 | unsigned int j; | ||
75 | |||
76 | if (dbs_data->governor == GOV_ONDEMAND) | ||
77 | ignore_nice = od_tuners->ignore_nice; | ||
78 | else | ||
79 | ignore_nice = cs_tuners->ignore_nice; | ||
80 | |||
81 | policy = cdbs->cur_policy; | ||
82 | |||
83 | /* Get Absolute Load (in terms of freq for ondemand gov) */ | ||
84 | for_each_cpu(j, policy->cpus) { | ||
85 | struct cpu_dbs_common_info *j_cdbs; | ||
86 | u64 cur_wall_time, cur_idle_time, cur_iowait_time; | ||
87 | unsigned int idle_time, wall_time, iowait_time; | ||
88 | unsigned int load; | ||
89 | |||
90 | j_cdbs = dbs_data->get_cpu_cdbs(j); | ||
91 | |||
92 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
93 | |||
94 | wall_time = (unsigned int) | ||
95 | (cur_wall_time - j_cdbs->prev_cpu_wall); | ||
96 | j_cdbs->prev_cpu_wall = cur_wall_time; | ||
97 | |||
98 | idle_time = (unsigned int) | ||
99 | (cur_idle_time - j_cdbs->prev_cpu_idle); | ||
100 | j_cdbs->prev_cpu_idle = cur_idle_time; | ||
101 | |||
102 | if (ignore_nice) { | ||
103 | u64 cur_nice; | ||
104 | unsigned long cur_nice_jiffies; | ||
105 | |||
106 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
107 | cdbs->prev_cpu_nice; | ||
108 | /* | ||
109 | * Assumption: nice time between sampling periods will | ||
110 | * be less than 2^32 jiffies for 32 bit sys | ||
111 | */ | ||
112 | cur_nice_jiffies = (unsigned long) | ||
113 | cputime64_to_jiffies64(cur_nice); | ||
114 | |||
115 | cdbs->prev_cpu_nice = | ||
116 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
117 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
118 | } | ||
119 | |||
120 | if (dbs_data->governor == GOV_ONDEMAND) { | ||
121 | struct od_cpu_dbs_info_s *od_j_dbs_info = | ||
122 | dbs_data->get_cpu_dbs_info_s(cpu); | ||
123 | |||
124 | cur_iowait_time = get_cpu_iowait_time_us(j, | ||
125 | &cur_wall_time); | ||
126 | if (cur_iowait_time == -1ULL) | ||
127 | cur_iowait_time = 0; | ||
128 | |||
129 | iowait_time = (unsigned int) (cur_iowait_time - | ||
130 | od_j_dbs_info->prev_cpu_iowait); | ||
131 | od_j_dbs_info->prev_cpu_iowait = cur_iowait_time; | ||
132 | |||
133 | /* | ||
134 | * For the purpose of ondemand, waiting for disk IO is | ||
135 | * an indication that you're performance critical, and | ||
136 | * not that the system is actually idle. So subtract the | ||
137 | * iowait time from the cpu idle time. | ||
138 | */ | ||
139 | if (od_tuners->io_is_busy && idle_time >= iowait_time) | ||
140 | idle_time -= iowait_time; | ||
141 | } | ||
142 | |||
143 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
144 | continue; | ||
145 | |||
146 | load = 100 * (wall_time - idle_time) / wall_time; | ||
147 | |||
148 | if (dbs_data->governor == GOV_ONDEMAND) { | ||
149 | int freq_avg = __cpufreq_driver_getavg(policy, j); | ||
150 | if (freq_avg <= 0) | ||
151 | freq_avg = policy->cur; | ||
152 | |||
153 | load *= freq_avg; | ||
154 | } | ||
155 | |||
156 | if (load > max_load) | ||
157 | max_load = load; | ||
158 | } | ||
159 | |||
160 | dbs_data->gov_check_cpu(cpu, max_load); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | ||
163 | |||
164 | static inline void dbs_timer_init(struct dbs_data *dbs_data, | ||
165 | struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) | ||
166 | { | ||
167 | int delay = delay_for_sampling_rate(sampling_rate); | ||
168 | |||
169 | INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); | ||
170 | schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay); | ||
171 | } | ||
172 | |||
173 | static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) | ||
174 | { | ||
175 | cancel_delayed_work_sync(&cdbs->work); | ||
176 | } | ||
177 | |||
178 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | ||
179 | struct cpufreq_policy *policy, unsigned int event) | ||
180 | { | ||
181 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; | ||
182 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; | ||
183 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
184 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
185 | struct cpu_dbs_common_info *cpu_cdbs; | ||
186 | unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; | ||
187 | int rc; | ||
188 | |||
189 | cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
190 | |||
191 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
192 | cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | ||
193 | sampling_rate = &cs_tuners->sampling_rate; | ||
194 | ignore_nice = cs_tuners->ignore_nice; | ||
195 | } else { | ||
196 | od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | ||
197 | sampling_rate = &od_tuners->sampling_rate; | ||
198 | ignore_nice = od_tuners->ignore_nice; | ||
199 | } | ||
200 | |||
201 | switch (event) { | ||
202 | case CPUFREQ_GOV_START: | ||
203 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
204 | return -EINVAL; | ||
205 | |||
206 | mutex_lock(&dbs_data->mutex); | ||
207 | |||
208 | dbs_data->enable++; | ||
209 | cpu_cdbs->cpu = cpu; | ||
210 | for_each_cpu(j, policy->cpus) { | ||
211 | struct cpu_dbs_common_info *j_cdbs; | ||
212 | j_cdbs = dbs_data->get_cpu_cdbs(j); | ||
213 | |||
214 | j_cdbs->cur_policy = policy; | ||
215 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | ||
216 | &j_cdbs->prev_cpu_wall); | ||
217 | if (ignore_nice) | ||
218 | j_cdbs->prev_cpu_nice = | ||
219 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Start the timerschedule work, when this governor is used for | ||
224 | * first time | ||
225 | */ | ||
226 | if (dbs_data->enable != 1) | ||
227 | goto second_time; | ||
228 | |||
229 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
230 | dbs_data->attr_group); | ||
231 | if (rc) { | ||
232 | mutex_unlock(&dbs_data->mutex); | ||
233 | return rc; | ||
234 | } | ||
235 | |||
236 | /* policy latency is in nS. Convert it to uS first */ | ||
237 | latency = policy->cpuinfo.transition_latency / 1000; | ||
238 | if (latency == 0) | ||
239 | latency = 1; | ||
240 | |||
241 | /* | ||
242 | * conservative does not implement micro like ondemand | ||
243 | * governor, thus we are bound to jiffes/HZ | ||
244 | */ | ||
245 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
246 | struct cs_ops *ops = dbs_data->gov_ops; | ||
247 | |||
248 | cpufreq_register_notifier(ops->notifier_block, | ||
249 | CPUFREQ_TRANSITION_NOTIFIER); | ||
250 | |||
251 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
252 | jiffies_to_usecs(10); | ||
253 | } else { | ||
254 | struct od_ops *ops = dbs_data->gov_ops; | ||
255 | |||
256 | od_tuners->io_is_busy = ops->io_busy(); | ||
257 | } | ||
258 | |||
259 | /* Bring kernel and HW constraints together */ | ||
260 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | ||
261 | MIN_LATENCY_MULTIPLIER * latency); | ||
262 | *sampling_rate = max(dbs_data->min_sampling_rate, latency * | ||
263 | LATENCY_MULTIPLIER); | ||
264 | |||
265 | second_time: | ||
266 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
267 | cs_dbs_info->down_skip = 0; | ||
268 | cs_dbs_info->enable = 1; | ||
269 | cs_dbs_info->requested_freq = policy->cur; | ||
270 | } else { | ||
271 | struct od_ops *ops = dbs_data->gov_ops; | ||
272 | od_dbs_info->rate_mult = 1; | ||
273 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
274 | ops->powersave_bias_init_cpu(cpu); | ||
275 | } | ||
276 | mutex_unlock(&dbs_data->mutex); | ||
277 | |||
278 | mutex_init(&cpu_cdbs->timer_mutex); | ||
279 | dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); | ||
280 | break; | ||
281 | |||
282 | case CPUFREQ_GOV_STOP: | ||
283 | if (dbs_data->governor == GOV_CONSERVATIVE) | ||
284 | cs_dbs_info->enable = 0; | ||
285 | |||
286 | dbs_timer_exit(cpu_cdbs); | ||
287 | |||
288 | mutex_lock(&dbs_data->mutex); | ||
289 | mutex_destroy(&cpu_cdbs->timer_mutex); | ||
290 | dbs_data->enable--; | ||
291 | if (!dbs_data->enable) { | ||
292 | struct cs_ops *ops = dbs_data->gov_ops; | ||
293 | |||
294 | sysfs_remove_group(cpufreq_global_kobject, | ||
295 | dbs_data->attr_group); | ||
296 | if (dbs_data->governor == GOV_CONSERVATIVE) | ||
297 | cpufreq_unregister_notifier(ops->notifier_block, | ||
298 | CPUFREQ_TRANSITION_NOTIFIER); | ||
299 | } | ||
300 | mutex_unlock(&dbs_data->mutex); | ||
301 | |||
302 | break; | ||
303 | |||
304 | case CPUFREQ_GOV_LIMITS: | ||
305 | mutex_lock(&cpu_cdbs->timer_mutex); | ||
306 | if (policy->max < cpu_cdbs->cur_policy->cur) | ||
307 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | ||
308 | policy->max, CPUFREQ_RELATION_H); | ||
309 | else if (policy->min > cpu_cdbs->cur_policy->cur) | ||
310 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | ||
311 | policy->min, CPUFREQ_RELATION_L); | ||
312 | dbs_check_cpu(dbs_data, cpu); | ||
313 | mutex_unlock(&cpu_cdbs->timer_mutex); | ||
314 | break; | ||
315 | } | ||
316 | return 0; | ||
317 | } | ||
318 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); | ||
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h new file mode 100644 index 000000000000..f6616540c53d --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/cpufreq_governor.h | ||
3 | * | ||
4 | * Header file for CPUFreq governors common code | ||
5 | * | ||
6 | * Copyright (C) 2001 Russell King | ||
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | ||
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | ||
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #ifndef _CPUFREQ_GOVERNER_H | ||
18 | #define _CPUFREQ_GOVERNER_H | ||
19 | |||
20 | #include <linux/cpufreq.h> | ||
21 | #include <linux/kobject.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/workqueue.h> | ||
24 | #include <linux/sysfs.h> | ||
25 | |||
26 | /* | ||
27 | * The polling frequency depends on the capability of the processor. Default | ||
28 | * polling frequency is 1000 times the transition latency of the processor. The | ||
29 | * governor will work on any processor with transition latency <= 10mS, using | ||
30 | * appropriate sampling rate. | ||
31 | * | ||
32 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
33 | * this governor will not work. All times here are in uS. | ||
34 | */ | ||
35 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
36 | #define LATENCY_MULTIPLIER (1000) | ||
37 | #define MIN_LATENCY_MULTIPLIER (100) | ||
38 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
39 | |||
40 | /* Ondemand Sampling types */ | ||
41 | enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; | ||
42 | |||
43 | /* Macro creating sysfs show routines */ | ||
44 | #define show_one(_gov, file_name, object) \ | ||
45 | static ssize_t show_##file_name \ | ||
46 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | ||
47 | { \ | ||
48 | return sprintf(buf, "%u\n", _gov##_tuners.object); \ | ||
49 | } | ||
50 | |||
51 | #define define_get_cpu_dbs_routines(_dbs_info) \ | ||
52 | static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ | ||
53 | { \ | ||
54 | return &per_cpu(_dbs_info, cpu).cdbs; \ | ||
55 | } \ | ||
56 | \ | ||
57 | static void *get_cpu_dbs_info_s(int cpu) \ | ||
58 | { \ | ||
59 | return &per_cpu(_dbs_info, cpu); \ | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Abbreviations: | ||
64 | * dbs: used as a shortform for demand based switching It helps to keep variable | ||
65 | * names smaller, simpler | ||
66 | * cdbs: common dbs | ||
67 | * on_*: On-demand governor | ||
68 | * cs_*: Conservative governor | ||
69 | */ | ||
70 | |||
71 | /* Per cpu structures */ | ||
72 | struct cpu_dbs_common_info { | ||
73 | int cpu; | ||
74 | u64 prev_cpu_idle; | ||
75 | u64 prev_cpu_wall; | ||
76 | u64 prev_cpu_nice; | ||
77 | struct cpufreq_policy *cur_policy; | ||
78 | struct delayed_work work; | ||
79 | /* | ||
80 | * percpu mutex that serializes governor limit change with gov_dbs_timer | ||
81 | * invocation. We do not want gov_dbs_timer to run when user is changing | ||
82 | * the governor or limits. | ||
83 | */ | ||
84 | struct mutex timer_mutex; | ||
85 | }; | ||
86 | |||
87 | struct od_cpu_dbs_info_s { | ||
88 | struct cpu_dbs_common_info cdbs; | ||
89 | u64 prev_cpu_iowait; | ||
90 | struct cpufreq_frequency_table *freq_table; | ||
91 | unsigned int freq_lo; | ||
92 | unsigned int freq_lo_jiffies; | ||
93 | unsigned int freq_hi_jiffies; | ||
94 | unsigned int rate_mult; | ||
95 | unsigned int sample_type:1; | ||
96 | }; | ||
97 | |||
98 | struct cs_cpu_dbs_info_s { | ||
99 | struct cpu_dbs_common_info cdbs; | ||
100 | unsigned int down_skip; | ||
101 | unsigned int requested_freq; | ||
102 | unsigned int enable:1; | ||
103 | }; | ||
104 | |||
105 | /* Governers sysfs tunables */ | ||
106 | struct od_dbs_tuners { | ||
107 | unsigned int ignore_nice; | ||
108 | unsigned int sampling_rate; | ||
109 | unsigned int sampling_down_factor; | ||
110 | unsigned int up_threshold; | ||
111 | unsigned int down_differential; | ||
112 | unsigned int powersave_bias; | ||
113 | unsigned int io_is_busy; | ||
114 | }; | ||
115 | |||
116 | struct cs_dbs_tuners { | ||
117 | unsigned int ignore_nice; | ||
118 | unsigned int sampling_rate; | ||
119 | unsigned int sampling_down_factor; | ||
120 | unsigned int up_threshold; | ||
121 | unsigned int down_threshold; | ||
122 | unsigned int freq_step; | ||
123 | }; | ||
124 | |||
125 | /* Per Governer data */ | ||
126 | struct dbs_data { | ||
127 | /* Common across governors */ | ||
128 | #define GOV_ONDEMAND 0 | ||
129 | #define GOV_CONSERVATIVE 1 | ||
130 | int governor; | ||
131 | unsigned int min_sampling_rate; | ||
132 | unsigned int enable; /* number of CPUs using this policy */ | ||
133 | struct attribute_group *attr_group; | ||
134 | void *tuners; | ||
135 | |||
136 | /* dbs_mutex protects dbs_enable in governor start/stop */ | ||
137 | struct mutex mutex; | ||
138 | |||
139 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); | ||
140 | void *(*get_cpu_dbs_info_s)(int cpu); | ||
141 | void (*gov_dbs_timer)(struct work_struct *work); | ||
142 | void (*gov_check_cpu)(int cpu, unsigned int load); | ||
143 | |||
144 | /* Governor specific ops, see below */ | ||
145 | void *gov_ops; | ||
146 | }; | ||
147 | |||
148 | /* Governor specific ops, will be passed to dbs_data->gov_ops */ | ||
149 | struct od_ops { | ||
150 | int (*io_busy)(void); | ||
151 | void (*powersave_bias_init_cpu)(int cpu); | ||
152 | unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, | ||
153 | unsigned int freq_next, unsigned int relation); | ||
154 | void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); | ||
155 | }; | ||
156 | |||
157 | struct cs_ops { | ||
158 | struct notifier_block *notifier_block; | ||
159 | }; | ||
160 | |||
161 | static inline int delay_for_sampling_rate(unsigned int sampling_rate) | ||
162 | { | ||
163 | int delay = usecs_to_jiffies(sampling_rate); | ||
164 | |||
165 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
166 | if (num_online_cpus() > 1) | ||
167 | delay -= jiffies % delay; | ||
168 | |||
169 | return delay; | ||
170 | } | ||
171 | |||
172 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); | ||
173 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); | ||
174 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | ||
175 | struct cpufreq_policy *policy, unsigned int event); | ||
176 | #endif /* _CPUFREQ_GOVERNER_H */ | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 396322f2a83f..7731f7c7e79a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -10,24 +10,23 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | #include <linux/module.h> | 14 | |
15 | #include <linux/init.h> | ||
16 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
17 | #include <linux/cpu.h> | 16 | #include <linux/init.h> |
18 | #include <linux/jiffies.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/kernel_stat.h> | 18 | #include <linux/kernel_stat.h> |
19 | #include <linux/kobject.h> | ||
20 | #include <linux/module.h> | ||
20 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
21 | #include <linux/hrtimer.h> | 22 | #include <linux/percpu-defs.h> |
23 | #include <linux/sysfs.h> | ||
22 | #include <linux/tick.h> | 24 | #include <linux/tick.h> |
23 | #include <linux/ktime.h> | 25 | #include <linux/types.h> |
24 | #include <linux/sched.h> | ||
25 | 26 | ||
26 | /* | 27 | #include "cpufreq_governor.h" |
27 | * dbs is used in this file as a shortform for demandbased switching | ||
28 | * It helps to keep variable names smaller, simpler | ||
29 | */ | ||
30 | 28 | ||
29 | /* On-demand governor macors */ | ||
31 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | 30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) |
32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
33 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 32 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
@@ -38,80 +37,14 @@ | |||
38 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | 37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
39 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
40 | 39 | ||
41 | /* | 40 | static struct dbs_data od_dbs_data; |
42 | * The polling frequency of this governor depends on the capability of | 41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); |
43 | * the processor. Default polling frequency is 1000 times the transition | ||
44 | * latency of the processor. The governor will work on any processor with | ||
45 | * transition latency <= 10mS, using appropriate sampling | ||
46 | * rate. | ||
47 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
48 | * this governor will not work. | ||
49 | * All times here are in uS. | ||
50 | */ | ||
51 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
52 | |||
53 | static unsigned int min_sampling_rate; | ||
54 | |||
55 | #define LATENCY_MULTIPLIER (1000) | ||
56 | #define MIN_LATENCY_MULTIPLIER (100) | ||
57 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
58 | |||
59 | static void do_dbs_timer(struct work_struct *work); | ||
60 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | ||
61 | unsigned int event); | ||
62 | 42 | ||
63 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
64 | static | 44 | static struct cpufreq_governor cpufreq_gov_ondemand; |
65 | #endif | 45 | #endif |
66 | struct cpufreq_governor cpufreq_gov_ondemand = { | ||
67 | .name = "ondemand", | ||
68 | .governor = cpufreq_governor_dbs, | ||
69 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
70 | .owner = THIS_MODULE, | ||
71 | }; | ||
72 | 46 | ||
73 | /* Sampling types */ | 47 | static struct od_dbs_tuners od_tuners = { |
74 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
75 | |||
76 | struct cpu_dbs_info_s { | ||
77 | cputime64_t prev_cpu_idle; | ||
78 | cputime64_t prev_cpu_iowait; | ||
79 | cputime64_t prev_cpu_wall; | ||
80 | cputime64_t prev_cpu_nice; | ||
81 | struct cpufreq_policy *cur_policy; | ||
82 | struct delayed_work work; | ||
83 | struct cpufreq_frequency_table *freq_table; | ||
84 | unsigned int freq_lo; | ||
85 | unsigned int freq_lo_jiffies; | ||
86 | unsigned int freq_hi_jiffies; | ||
87 | unsigned int rate_mult; | ||
88 | int cpu; | ||
89 | unsigned int sample_type:1; | ||
90 | /* | ||
91 | * percpu mutex that serializes governor limit change with | ||
92 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
93 | * when user is changing the governor or limits. | ||
94 | */ | ||
95 | struct mutex timer_mutex; | ||
96 | }; | ||
97 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); | ||
98 | |||
99 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | ||
100 | |||
101 | /* | ||
102 | * dbs_mutex protects dbs_enable in governor start/stop. | ||
103 | */ | ||
104 | static DEFINE_MUTEX(dbs_mutex); | ||
105 | |||
106 | static struct dbs_tuners { | ||
107 | unsigned int sampling_rate; | ||
108 | unsigned int up_threshold; | ||
109 | unsigned int down_differential; | ||
110 | unsigned int ignore_nice; | ||
111 | unsigned int sampling_down_factor; | ||
112 | unsigned int powersave_bias; | ||
113 | unsigned int io_is_busy; | ||
114 | } dbs_tuners_ins = { | ||
115 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
116 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
117 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 50 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, |
@@ -119,48 +52,35 @@ static struct dbs_tuners { | |||
119 | .powersave_bias = 0, | 52 | .powersave_bias = 0, |
120 | }; | 53 | }; |
121 | 54 | ||
122 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | 55 | static void ondemand_powersave_bias_init_cpu(int cpu) |
123 | { | ||
124 | u64 idle_time; | ||
125 | u64 cur_wall_time; | ||
126 | u64 busy_time; | ||
127 | |||
128 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | ||
129 | |||
130 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | ||
131 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | ||
132 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | ||
133 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
134 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
135 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
136 | |||
137 | idle_time = cur_wall_time - busy_time; | ||
138 | if (wall) | ||
139 | *wall = jiffies_to_usecs(cur_wall_time); | ||
140 | |||
141 | return jiffies_to_usecs(idle_time); | ||
142 | } | ||
143 | |||
144 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | ||
145 | { | 56 | { |
146 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | 57 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
147 | |||
148 | if (idle_time == -1ULL) | ||
149 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
150 | else | ||
151 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
152 | 58 | ||
153 | return idle_time; | 59 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
60 | dbs_info->freq_lo = 0; | ||
154 | } | 61 | } |
155 | 62 | ||
156 | static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) | 63 | /* |
64 | * Not all CPUs want IO time to be accounted as busy; this depends on how | ||
65 | * efficient idling at a higher frequency/voltage is. | ||
66 | * Pavel Machek says this is not so for various generations of AMD and old | ||
67 | * Intel systems. | ||
68 | * Mike Chan (androidlcom) calis this is also not true for ARM. | ||
69 | * Because of this, whitelist specific known (series) of CPUs by default, and | ||
70 | * leave all others up to the user. | ||
71 | */ | ||
72 | static int should_io_be_busy(void) | ||
157 | { | 73 | { |
158 | u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); | 74 | #if defined(CONFIG_X86) |
159 | 75 | /* | |
160 | if (iowait_time == -1ULL) | 76 | * For Intel, Core 2 (model 15) andl later have an efficient idle. |
161 | return 0; | 77 | */ |
162 | 78 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
163 | return iowait_time; | 79 | boot_cpu_data.x86 == 6 && |
80 | boot_cpu_data.x86_model >= 15) | ||
81 | return 1; | ||
82 | #endif | ||
83 | return 0; | ||
164 | } | 84 | } |
165 | 85 | ||
166 | /* | 86 | /* |
@@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal | |||
169 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. | 89 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. |
170 | */ | 90 | */ |
171 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | 91 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, |
172 | unsigned int freq_next, | 92 | unsigned int freq_next, unsigned int relation) |
173 | unsigned int relation) | ||
174 | { | 93 | { |
175 | unsigned int freq_req, freq_reduc, freq_avg; | 94 | unsigned int freq_req, freq_reduc, freq_avg; |
176 | unsigned int freq_hi, freq_lo; | 95 | unsigned int freq_hi, freq_lo; |
177 | unsigned int index = 0; | 96 | unsigned int index = 0; |
178 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | 97 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
179 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, | 98 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
180 | policy->cpu); | 99 | policy->cpu); |
181 | 100 | ||
182 | if (!dbs_info->freq_table) { | 101 | if (!dbs_info->freq_table) { |
@@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
188 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, | 107 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, |
189 | relation, &index); | 108 | relation, &index); |
190 | freq_req = dbs_info->freq_table[index].frequency; | 109 | freq_req = dbs_info->freq_table[index].frequency; |
191 | freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; | 110 | freq_reduc = freq_req * od_tuners.powersave_bias / 1000; |
192 | freq_avg = freq_req - freq_reduc; | 111 | freq_avg = freq_req - freq_reduc; |
193 | 112 | ||
194 | /* Find freq bounds for freq_avg in freq_table */ | 113 | /* Find freq bounds for freq_avg in freq_table */ |
@@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
207 | dbs_info->freq_lo_jiffies = 0; | 126 | dbs_info->freq_lo_jiffies = 0; |
208 | return freq_lo; | 127 | return freq_lo; |
209 | } | 128 | } |
210 | jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 129 | jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); |
211 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; | 130 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
212 | jiffies_hi += ((freq_hi - freq_lo) / 2); | 131 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
213 | jiffies_hi /= (freq_hi - freq_lo); | 132 | jiffies_hi /= (freq_hi - freq_lo); |
@@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
218 | return freq_hi; | 137 | return freq_hi; |
219 | } | 138 | } |
220 | 139 | ||
221 | static void ondemand_powersave_bias_init_cpu(int cpu) | ||
222 | { | ||
223 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | ||
224 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); | ||
225 | dbs_info->freq_lo = 0; | ||
226 | } | ||
227 | |||
228 | static void ondemand_powersave_bias_init(void) | 140 | static void ondemand_powersave_bias_init(void) |
229 | { | 141 | { |
230 | int i; | 142 | int i; |
@@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void) | |||
233 | } | 145 | } |
234 | } | 146 | } |
235 | 147 | ||
236 | /************************** sysfs interface ************************/ | 148 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) |
149 | { | ||
150 | if (od_tuners.powersave_bias) | ||
151 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | ||
152 | else if (p->cur == p->max) | ||
153 | return; | ||
237 | 154 | ||
238 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 155 | __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? |
239 | struct attribute *attr, char *buf) | 156 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
157 | } | ||
158 | |||
159 | /* | ||
160 | * Every sampling_rate, we check, if current idle time is less than 20% | ||
161 | * (default), then we try to increase frequency Every sampling_rate, we look for | ||
162 | * a the lowest frequency which can sustain the load while keeping idle time | ||
163 | * over 30%. If such a frequency exist, we try to decrease to this frequency. | ||
164 | * | ||
165 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | ||
166 | * happens at minimum steps of 5% (default) of current frequency | ||
167 | */ | ||
168 | static void od_check_cpu(int cpu, unsigned int load_freq) | ||
240 | { | 169 | { |
241 | return sprintf(buf, "%u\n", min_sampling_rate); | 170 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
171 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; | ||
172 | |||
173 | dbs_info->freq_lo = 0; | ||
174 | |||
175 | /* Check for frequency increase */ | ||
176 | if (load_freq > od_tuners.up_threshold * policy->cur) { | ||
177 | /* If switching to max speed, apply sampling_down_factor */ | ||
178 | if (policy->cur < policy->max) | ||
179 | dbs_info->rate_mult = | ||
180 | od_tuners.sampling_down_factor; | ||
181 | dbs_freq_increase(policy, policy->max); | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | /* Check for frequency decrease */ | ||
186 | /* if we cannot reduce the frequency anymore, break out early */ | ||
187 | if (policy->cur == policy->min) | ||
188 | return; | ||
189 | |||
190 | /* | ||
191 | * The optimal frequency is the frequency that is the lowest that can | ||
192 | * support the current CPU usage without triggering the up policy. To be | ||
193 | * safe, we focus 10 points under the threshold. | ||
194 | */ | ||
195 | if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * | ||
196 | policy->cur) { | ||
197 | unsigned int freq_next; | ||
198 | freq_next = load_freq / (od_tuners.up_threshold - | ||
199 | od_tuners.down_differential); | ||
200 | |||
201 | /* No longer fully busy, reset rate_mult */ | ||
202 | dbs_info->rate_mult = 1; | ||
203 | |||
204 | if (freq_next < policy->min) | ||
205 | freq_next = policy->min; | ||
206 | |||
207 | if (!od_tuners.powersave_bias) { | ||
208 | __cpufreq_driver_target(policy, freq_next, | ||
209 | CPUFREQ_RELATION_L); | ||
210 | } else { | ||
211 | int freq = powersave_bias_target(policy, freq_next, | ||
212 | CPUFREQ_RELATION_L); | ||
213 | __cpufreq_driver_target(policy, freq, | ||
214 | CPUFREQ_RELATION_L); | ||
215 | } | ||
216 | } | ||
242 | } | 217 | } |
243 | 218 | ||
244 | define_one_global_ro(sampling_rate_min); | 219 | static void od_dbs_timer(struct work_struct *work) |
220 | { | ||
221 | struct od_cpu_dbs_info_s *dbs_info = | ||
222 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); | ||
223 | unsigned int cpu = dbs_info->cdbs.cpu; | ||
224 | int delay, sample_type = dbs_info->sample_type; | ||
225 | |||
226 | mutex_lock(&dbs_info->cdbs.timer_mutex); | ||
227 | |||
228 | /* Common NORMAL_SAMPLE setup */ | ||
229 | dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
230 | if (sample_type == OD_SUB_SAMPLE) { | ||
231 | delay = dbs_info->freq_lo_jiffies; | ||
232 | __cpufreq_driver_target(dbs_info->cdbs.cur_policy, | ||
233 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | ||
234 | } else { | ||
235 | dbs_check_cpu(&od_dbs_data, cpu); | ||
236 | if (dbs_info->freq_lo) { | ||
237 | /* Setup timer for SUB_SAMPLE */ | ||
238 | dbs_info->sample_type = OD_SUB_SAMPLE; | ||
239 | delay = dbs_info->freq_hi_jiffies; | ||
240 | } else { | ||
241 | delay = delay_for_sampling_rate(od_tuners.sampling_rate | ||
242 | * dbs_info->rate_mult); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | ||
247 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | ||
248 | } | ||
249 | |||
250 | /************************** sysfs interface ************************/ | ||
245 | 251 | ||
246 | /* cpufreq_ondemand Governor Tunables */ | 252 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
247 | #define show_one(file_name, object) \ | 253 | struct attribute *attr, char *buf) |
248 | static ssize_t show_##file_name \ | 254 | { |
249 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | 255 | return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); |
250 | { \ | ||
251 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | ||
252 | } | 256 | } |
253 | show_one(sampling_rate, sampling_rate); | ||
254 | show_one(io_is_busy, io_is_busy); | ||
255 | show_one(up_threshold, up_threshold); | ||
256 | show_one(sampling_down_factor, sampling_down_factor); | ||
257 | show_one(ignore_nice_load, ignore_nice); | ||
258 | show_one(powersave_bias, powersave_bias); | ||
259 | 257 | ||
260 | /** | 258 | /** |
261 | * update_sampling_rate - update sampling rate effective immediately if needed. | 259 | * update_sampling_rate - update sampling rate effective immediately if needed. |
262 | * @new_rate: new sampling rate | 260 | * @new_rate: new sampling rate |
263 | * | 261 | * |
264 | * If new rate is smaller than the old, simply updaing | 262 | * If new rate is smaller than the old, simply updaing |
265 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, | 263 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
266 | * if the original sampling_rate was 1 second and the requested new sampling | 264 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
267 | * rate is 10 ms because the user needs immediate reaction from ondemand | 265 | * ms because the user needs immediate reaction from ondemand governor, but not |
268 | * governor, but not sure if higher frequency will be required or not, | 266 | * sure if higher frequency will be required or not, then, the governor may |
269 | * then, the governor may change the sampling rate too late; up to 1 second | 267 | * change the sampling rate too late; up to 1 second later. Thus, if we are |
270 | * later. Thus, if we are reducing the sampling rate, we need to make the | 268 | * reducing the sampling rate, we need to make the new value effective |
271 | * new value effective immediately. | 269 | * immediately. |
272 | */ | 270 | */ |
273 | static void update_sampling_rate(unsigned int new_rate) | 271 | static void update_sampling_rate(unsigned int new_rate) |
274 | { | 272 | { |
275 | int cpu; | 273 | int cpu; |
276 | 274 | ||
277 | dbs_tuners_ins.sampling_rate = new_rate | 275 | od_tuners.sampling_rate = new_rate = max(new_rate, |
278 | = max(new_rate, min_sampling_rate); | 276 | od_dbs_data.min_sampling_rate); |
279 | 277 | ||
280 | for_each_online_cpu(cpu) { | 278 | for_each_online_cpu(cpu) { |
281 | struct cpufreq_policy *policy; | 279 | struct cpufreq_policy *policy; |
282 | struct cpu_dbs_info_s *dbs_info; | 280 | struct od_cpu_dbs_info_s *dbs_info; |
283 | unsigned long next_sampling, appointed_at; | 281 | unsigned long next_sampling, appointed_at; |
284 | 282 | ||
285 | policy = cpufreq_cpu_get(cpu); | 283 | policy = cpufreq_cpu_get(cpu); |
286 | if (!policy) | 284 | if (!policy) |
287 | continue; | 285 | continue; |
286 | if (policy->governor != &cpufreq_gov_ondemand) { | ||
287 | cpufreq_cpu_put(policy); | ||
288 | continue; | ||
289 | } | ||
288 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); | 290 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); |
289 | cpufreq_cpu_put(policy); | 291 | cpufreq_cpu_put(policy); |
290 | 292 | ||
291 | mutex_lock(&dbs_info->timer_mutex); | 293 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
292 | 294 | ||
293 | if (!delayed_work_pending(&dbs_info->work)) { | 295 | if (!delayed_work_pending(&dbs_info->cdbs.work)) { |
294 | mutex_unlock(&dbs_info->timer_mutex); | 296 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
295 | continue; | 297 | continue; |
296 | } | 298 | } |
297 | 299 | ||
298 | next_sampling = jiffies + usecs_to_jiffies(new_rate); | 300 | next_sampling = jiffies + usecs_to_jiffies(new_rate); |
299 | appointed_at = dbs_info->work.timer.expires; | 301 | appointed_at = dbs_info->cdbs.work.timer.expires; |
300 | |||
301 | 302 | ||
302 | if (time_before(next_sampling, appointed_at)) { | 303 | if (time_before(next_sampling, appointed_at)) { |
303 | 304 | ||
304 | mutex_unlock(&dbs_info->timer_mutex); | 305 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
305 | cancel_delayed_work_sync(&dbs_info->work); | 306 | cancel_delayed_work_sync(&dbs_info->cdbs.work); |
306 | mutex_lock(&dbs_info->timer_mutex); | 307 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
307 | 308 | ||
308 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, | 309 | schedule_delayed_work_on(dbs_info->cdbs.cpu, |
309 | usecs_to_jiffies(new_rate)); | 310 | &dbs_info->cdbs.work, |
311 | usecs_to_jiffies(new_rate)); | ||
310 | 312 | ||
311 | } | 313 | } |
312 | mutex_unlock(&dbs_info->timer_mutex); | 314 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
313 | } | 315 | } |
314 | } | 316 | } |
315 | 317 | ||
@@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, | |||
334 | ret = sscanf(buf, "%u", &input); | 336 | ret = sscanf(buf, "%u", &input); |
335 | if (ret != 1) | 337 | if (ret != 1) |
336 | return -EINVAL; | 338 | return -EINVAL; |
337 | dbs_tuners_ins.io_is_busy = !!input; | 339 | od_tuners.io_is_busy = !!input; |
338 | return count; | 340 | return count; |
339 | } | 341 | } |
340 | 342 | ||
@@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
349 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 351 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
350 | return -EINVAL; | 352 | return -EINVAL; |
351 | } | 353 | } |
352 | dbs_tuners_ins.up_threshold = input; | 354 | od_tuners.up_threshold = input; |
353 | return count; | 355 | return count; |
354 | } | 356 | } |
355 | 357 | ||
@@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
362 | 364 | ||
363 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 365 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
364 | return -EINVAL; | 366 | return -EINVAL; |
365 | dbs_tuners_ins.sampling_down_factor = input; | 367 | od_tuners.sampling_down_factor = input; |
366 | 368 | ||
367 | /* Reset down sampling multiplier in case it was active */ | 369 | /* Reset down sampling multiplier in case it was active */ |
368 | for_each_online_cpu(j) { | 370 | for_each_online_cpu(j) { |
369 | struct cpu_dbs_info_s *dbs_info; | 371 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
370 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 372 | j); |
371 | dbs_info->rate_mult = 1; | 373 | dbs_info->rate_mult = 1; |
372 | } | 374 | } |
373 | return count; | 375 | return count; |
@@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
388 | if (input > 1) | 390 | if (input > 1) |
389 | input = 1; | 391 | input = 1; |
390 | 392 | ||
391 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | 393 | if (input == od_tuners.ignore_nice) { /* nothing to do */ |
392 | return count; | 394 | return count; |
393 | } | 395 | } |
394 | dbs_tuners_ins.ignore_nice = input; | 396 | od_tuners.ignore_nice = input; |
395 | 397 | ||
396 | /* we need to re-evaluate prev_cpu_idle */ | 398 | /* we need to re-evaluate prev_cpu_idle */ |
397 | for_each_online_cpu(j) { | 399 | for_each_online_cpu(j) { |
398 | struct cpu_dbs_info_s *dbs_info; | 400 | struct od_cpu_dbs_info_s *dbs_info; |
399 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 401 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
400 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 402 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
401 | &dbs_info->prev_cpu_wall); | 403 | &dbs_info->cdbs.prev_cpu_wall); |
402 | if (dbs_tuners_ins.ignore_nice) | 404 | if (od_tuners.ignore_nice) |
403 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 405 | dbs_info->cdbs.prev_cpu_nice = |
406 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
404 | 407 | ||
405 | } | 408 | } |
406 | return count; | 409 | return count; |
@@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | |||
419 | if (input > 1000) | 422 | if (input > 1000) |
420 | input = 1000; | 423 | input = 1000; |
421 | 424 | ||
422 | dbs_tuners_ins.powersave_bias = input; | 425 | od_tuners.powersave_bias = input; |
423 | ondemand_powersave_bias_init(); | 426 | ondemand_powersave_bias_init(); |
424 | return count; | 427 | return count; |
425 | } | 428 | } |
426 | 429 | ||
430 | show_one(od, sampling_rate, sampling_rate); | ||
431 | show_one(od, io_is_busy, io_is_busy); | ||
432 | show_one(od, up_threshold, up_threshold); | ||
433 | show_one(od, sampling_down_factor, sampling_down_factor); | ||
434 | show_one(od, ignore_nice_load, ignore_nice); | ||
435 | show_one(od, powersave_bias, powersave_bias); | ||
436 | |||
427 | define_one_global_rw(sampling_rate); | 437 | define_one_global_rw(sampling_rate); |
428 | define_one_global_rw(io_is_busy); | 438 | define_one_global_rw(io_is_busy); |
429 | define_one_global_rw(up_threshold); | 439 | define_one_global_rw(up_threshold); |
430 | define_one_global_rw(sampling_down_factor); | 440 | define_one_global_rw(sampling_down_factor); |
431 | define_one_global_rw(ignore_nice_load); | 441 | define_one_global_rw(ignore_nice_load); |
432 | define_one_global_rw(powersave_bias); | 442 | define_one_global_rw(powersave_bias); |
443 | define_one_global_ro(sampling_rate_min); | ||
433 | 444 | ||
434 | static struct attribute *dbs_attributes[] = { | 445 | static struct attribute *dbs_attributes[] = { |
435 | &sampling_rate_min.attr, | 446 | &sampling_rate_min.attr, |
@@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = { | |||
442 | NULL | 453 | NULL |
443 | }; | 454 | }; |
444 | 455 | ||
445 | static struct attribute_group dbs_attr_group = { | 456 | static struct attribute_group od_attr_group = { |
446 | .attrs = dbs_attributes, | 457 | .attrs = dbs_attributes, |
447 | .name = "ondemand", | 458 | .name = "ondemand", |
448 | }; | 459 | }; |
449 | 460 | ||
450 | /************************** sysfs end ************************/ | 461 | /************************** sysfs end ************************/ |
451 | 462 | ||
452 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) | 463 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
453 | { | ||
454 | if (dbs_tuners_ins.powersave_bias) | ||
455 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | ||
456 | else if (p->cur == p->max) | ||
457 | return; | ||
458 | |||
459 | __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? | ||
460 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); | ||
461 | } | ||
462 | |||
463 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | ||
464 | { | ||
465 | unsigned int max_load_freq; | ||
466 | |||
467 | struct cpufreq_policy *policy; | ||
468 | unsigned int j; | ||
469 | |||
470 | this_dbs_info->freq_lo = 0; | ||
471 | policy = this_dbs_info->cur_policy; | ||
472 | |||
473 | /* | ||
474 | * Every sampling_rate, we check, if current idle time is less | ||
475 | * than 20% (default), then we try to increase frequency | ||
476 | * Every sampling_rate, we look for a the lowest | ||
477 | * frequency which can sustain the load while keeping idle time over | ||
478 | * 30%. If such a frequency exist, we try to decrease to this frequency. | ||
479 | * | ||
480 | * Any frequency increase takes it to the maximum frequency. | ||
481 | * Frequency reduction happens at minimum steps of | ||
482 | * 5% (default) of current frequency | ||
483 | */ | ||
484 | |||
485 | /* Get Absolute Load - in terms of freq */ | ||
486 | max_load_freq = 0; | ||
487 | |||
488 | for_each_cpu(j, policy->cpus) { | ||
489 | struct cpu_dbs_info_s *j_dbs_info; | ||
490 | cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; | ||
491 | unsigned int idle_time, wall_time, iowait_time; | ||
492 | unsigned int load, load_freq; | ||
493 | int freq_avg; | ||
494 | |||
495 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | ||
496 | |||
497 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
498 | cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); | ||
499 | |||
500 | wall_time = (unsigned int) | ||
501 | (cur_wall_time - j_dbs_info->prev_cpu_wall); | ||
502 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
503 | |||
504 | idle_time = (unsigned int) | ||
505 | (cur_idle_time - j_dbs_info->prev_cpu_idle); | ||
506 | j_dbs_info->prev_cpu_idle = cur_idle_time; | ||
507 | |||
508 | iowait_time = (unsigned int) | ||
509 | (cur_iowait_time - j_dbs_info->prev_cpu_iowait); | ||
510 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; | ||
511 | |||
512 | if (dbs_tuners_ins.ignore_nice) { | ||
513 | u64 cur_nice; | ||
514 | unsigned long cur_nice_jiffies; | ||
515 | |||
516 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
517 | j_dbs_info->prev_cpu_nice; | ||
518 | /* | ||
519 | * Assumption: nice time between sampling periods will | ||
520 | * be less than 2^32 jiffies for 32 bit sys | ||
521 | */ | ||
522 | cur_nice_jiffies = (unsigned long) | ||
523 | cputime64_to_jiffies64(cur_nice); | ||
524 | |||
525 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
526 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * For the purpose of ondemand, waiting for disk IO is an | ||
531 | * indication that you're performance critical, and not that | ||
532 | * the system is actually idle. So subtract the iowait time | ||
533 | * from the cpu idle time. | ||
534 | */ | ||
535 | |||
536 | if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) | ||
537 | idle_time -= iowait_time; | ||
538 | |||
539 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
540 | continue; | ||
541 | |||
542 | load = 100 * (wall_time - idle_time) / wall_time; | ||
543 | |||
544 | freq_avg = __cpufreq_driver_getavg(policy, j); | ||
545 | if (freq_avg <= 0) | ||
546 | freq_avg = policy->cur; | ||
547 | |||
548 | load_freq = load * freq_avg; | ||
549 | if (load_freq > max_load_freq) | ||
550 | max_load_freq = load_freq; | ||
551 | } | ||
552 | 464 | ||
553 | /* Check for frequency increase */ | 465 | static struct od_ops od_ops = { |
554 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { | 466 | .io_busy = should_io_be_busy, |
555 | /* If switching to max speed, apply sampling_down_factor */ | 467 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, |
556 | if (policy->cur < policy->max) | 468 | .powersave_bias_target = powersave_bias_target, |
557 | this_dbs_info->rate_mult = | 469 | .freq_increase = dbs_freq_increase, |
558 | dbs_tuners_ins.sampling_down_factor; | 470 | }; |
559 | dbs_freq_increase(policy, policy->max); | ||
560 | return; | ||
561 | } | ||
562 | |||
563 | /* Check for frequency decrease */ | ||
564 | /* if we cannot reduce the frequency anymore, break out early */ | ||
565 | if (policy->cur == policy->min) | ||
566 | return; | ||
567 | |||
568 | /* | ||
569 | * The optimal frequency is the frequency that is the lowest that | ||
570 | * can support the current CPU usage without triggering the up | ||
571 | * policy. To be safe, we focus 10 points under the threshold. | ||
572 | */ | ||
573 | if (max_load_freq < | ||
574 | (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * | ||
575 | policy->cur) { | ||
576 | unsigned int freq_next; | ||
577 | freq_next = max_load_freq / | ||
578 | (dbs_tuners_ins.up_threshold - | ||
579 | dbs_tuners_ins.down_differential); | ||
580 | |||
581 | /* No longer fully busy, reset rate_mult */ | ||
582 | this_dbs_info->rate_mult = 1; | ||
583 | |||
584 | if (freq_next < policy->min) | ||
585 | freq_next = policy->min; | ||
586 | |||
587 | if (!dbs_tuners_ins.powersave_bias) { | ||
588 | __cpufreq_driver_target(policy, freq_next, | ||
589 | CPUFREQ_RELATION_L); | ||
590 | } else { | ||
591 | int freq = powersave_bias_target(policy, freq_next, | ||
592 | CPUFREQ_RELATION_L); | ||
593 | __cpufreq_driver_target(policy, freq, | ||
594 | CPUFREQ_RELATION_L); | ||
595 | } | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void do_dbs_timer(struct work_struct *work) | ||
600 | { | ||
601 | struct cpu_dbs_info_s *dbs_info = | ||
602 | container_of(work, struct cpu_dbs_info_s, work.work); | ||
603 | unsigned int cpu = dbs_info->cpu; | ||
604 | int sample_type = dbs_info->sample_type; | ||
605 | |||
606 | int delay; | ||
607 | |||
608 | mutex_lock(&dbs_info->timer_mutex); | ||
609 | |||
610 | /* Common NORMAL_SAMPLE setup */ | ||
611 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | ||
612 | if (!dbs_tuners_ins.powersave_bias || | ||
613 | sample_type == DBS_NORMAL_SAMPLE) { | ||
614 | dbs_check_cpu(dbs_info); | ||
615 | if (dbs_info->freq_lo) { | ||
616 | /* Setup timer for SUB_SAMPLE */ | ||
617 | dbs_info->sample_type = DBS_SUB_SAMPLE; | ||
618 | delay = dbs_info->freq_hi_jiffies; | ||
619 | } else { | ||
620 | /* We want all CPUs to do sampling nearly on | ||
621 | * same jiffy | ||
622 | */ | ||
623 | delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate | ||
624 | * dbs_info->rate_mult); | ||
625 | |||
626 | if (num_online_cpus() > 1) | ||
627 | delay -= jiffies % delay; | ||
628 | } | ||
629 | } else { | ||
630 | __cpufreq_driver_target(dbs_info->cur_policy, | ||
631 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | ||
632 | delay = dbs_info->freq_lo_jiffies; | ||
633 | } | ||
634 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); | ||
635 | mutex_unlock(&dbs_info->timer_mutex); | ||
636 | } | ||
637 | |||
638 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | ||
639 | { | ||
640 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
641 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
642 | |||
643 | if (num_online_cpus() > 1) | ||
644 | delay -= jiffies % delay; | ||
645 | 471 | ||
646 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 472 | static struct dbs_data od_dbs_data = { |
647 | INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); | 473 | .governor = GOV_ONDEMAND, |
648 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); | 474 | .attr_group = &od_attr_group, |
649 | } | 475 | .tuners = &od_tuners, |
476 | .get_cpu_cdbs = get_cpu_cdbs, | ||
477 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | ||
478 | .gov_dbs_timer = od_dbs_timer, | ||
479 | .gov_check_cpu = od_check_cpu, | ||
480 | .gov_ops = &od_ops, | ||
481 | }; | ||
650 | 482 | ||
651 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 483 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
484 | unsigned int event) | ||
652 | { | 485 | { |
653 | cancel_delayed_work_sync(&dbs_info->work); | 486 | return cpufreq_governor_dbs(&od_dbs_data, policy, event); |
654 | } | 487 | } |
655 | 488 | ||
656 | /* | 489 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
657 | * Not all CPUs want IO time to be accounted as busy; this dependson how | 490 | static |
658 | * efficient idling at a higher frequency/voltage is. | ||
659 | * Pavel Machek says this is not so for various generations of AMD and old | ||
660 | * Intel systems. | ||
661 | * Mike Chan (androidlcom) calis this is also not true for ARM. | ||
662 | * Because of this, whitelist specific known (series) of CPUs by default, and | ||
663 | * leave all others up to the user. | ||
664 | */ | ||
665 | static int should_io_be_busy(void) | ||
666 | { | ||
667 | #if defined(CONFIG_X86) | ||
668 | /* | ||
669 | * For Intel, Core 2 (model 15) andl later have an efficient idle. | ||
670 | */ | ||
671 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
672 | boot_cpu_data.x86 == 6 && | ||
673 | boot_cpu_data.x86_model >= 15) | ||
674 | return 1; | ||
675 | #endif | 491 | #endif |
676 | return 0; | 492 | struct cpufreq_governor cpufreq_gov_ondemand = { |
677 | } | 493 | .name = "ondemand", |
678 | 494 | .governor = od_cpufreq_governor_dbs, | |
679 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 495 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
680 | unsigned int event) | 496 | .owner = THIS_MODULE, |
681 | { | 497 | }; |
682 | unsigned int cpu = policy->cpu; | ||
683 | struct cpu_dbs_info_s *this_dbs_info; | ||
684 | unsigned int j; | ||
685 | int rc; | ||
686 | |||
687 | this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | ||
688 | |||
689 | switch (event) { | ||
690 | case CPUFREQ_GOV_START: | ||
691 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
692 | return -EINVAL; | ||
693 | |||
694 | mutex_lock(&dbs_mutex); | ||
695 | |||
696 | dbs_enable++; | ||
697 | for_each_cpu(j, policy->cpus) { | ||
698 | struct cpu_dbs_info_s *j_dbs_info; | ||
699 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | ||
700 | j_dbs_info->cur_policy = policy; | ||
701 | |||
702 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | ||
703 | &j_dbs_info->prev_cpu_wall); | ||
704 | if (dbs_tuners_ins.ignore_nice) | ||
705 | j_dbs_info->prev_cpu_nice = | ||
706 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
707 | } | ||
708 | this_dbs_info->cpu = cpu; | ||
709 | this_dbs_info->rate_mult = 1; | ||
710 | ondemand_powersave_bias_init_cpu(cpu); | ||
711 | /* | ||
712 | * Start the timerschedule work, when this governor | ||
713 | * is used for first time | ||
714 | */ | ||
715 | if (dbs_enable == 1) { | ||
716 | unsigned int latency; | ||
717 | |||
718 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
719 | &dbs_attr_group); | ||
720 | if (rc) { | ||
721 | mutex_unlock(&dbs_mutex); | ||
722 | return rc; | ||
723 | } | ||
724 | |||
725 | /* policy latency is in nS. Convert it to uS first */ | ||
726 | latency = policy->cpuinfo.transition_latency / 1000; | ||
727 | if (latency == 0) | ||
728 | latency = 1; | ||
729 | /* Bring kernel and HW constraints together */ | ||
730 | min_sampling_rate = max(min_sampling_rate, | ||
731 | MIN_LATENCY_MULTIPLIER * latency); | ||
732 | dbs_tuners_ins.sampling_rate = | ||
733 | max(min_sampling_rate, | ||
734 | latency * LATENCY_MULTIPLIER); | ||
735 | dbs_tuners_ins.io_is_busy = should_io_be_busy(); | ||
736 | } | ||
737 | mutex_unlock(&dbs_mutex); | ||
738 | |||
739 | mutex_init(&this_dbs_info->timer_mutex); | ||
740 | dbs_timer_init(this_dbs_info); | ||
741 | break; | ||
742 | |||
743 | case CPUFREQ_GOV_STOP: | ||
744 | dbs_timer_exit(this_dbs_info); | ||
745 | |||
746 | mutex_lock(&dbs_mutex); | ||
747 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
748 | dbs_enable--; | ||
749 | mutex_unlock(&dbs_mutex); | ||
750 | if (!dbs_enable) | ||
751 | sysfs_remove_group(cpufreq_global_kobject, | ||
752 | &dbs_attr_group); | ||
753 | |||
754 | break; | ||
755 | |||
756 | case CPUFREQ_GOV_LIMITS: | ||
757 | mutex_lock(&this_dbs_info->timer_mutex); | ||
758 | if (policy->max < this_dbs_info->cur_policy->cur) | ||
759 | __cpufreq_driver_target(this_dbs_info->cur_policy, | ||
760 | policy->max, CPUFREQ_RELATION_H); | ||
761 | else if (policy->min > this_dbs_info->cur_policy->cur) | ||
762 | __cpufreq_driver_target(this_dbs_info->cur_policy, | ||
763 | policy->min, CPUFREQ_RELATION_L); | ||
764 | dbs_check_cpu(this_dbs_info); | ||
765 | mutex_unlock(&this_dbs_info->timer_mutex); | ||
766 | break; | ||
767 | } | ||
768 | return 0; | ||
769 | } | ||
770 | 498 | ||
771 | static int __init cpufreq_gov_dbs_init(void) | 499 | static int __init cpufreq_gov_dbs_init(void) |
772 | { | 500 | { |
773 | u64 idle_time; | 501 | u64 idle_time; |
774 | int cpu = get_cpu(); | 502 | int cpu = get_cpu(); |
775 | 503 | ||
504 | mutex_init(&od_dbs_data.mutex); | ||
776 | idle_time = get_cpu_idle_time_us(cpu, NULL); | 505 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
777 | put_cpu(); | 506 | put_cpu(); |
778 | if (idle_time != -1ULL) { | 507 | if (idle_time != -1ULL) { |
779 | /* Idle micro accounting is supported. Use finer thresholds */ | 508 | /* Idle micro accounting is supported. Use finer thresholds */ |
780 | dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | 509 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
781 | dbs_tuners_ins.down_differential = | 510 | od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; |
782 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
783 | /* | 511 | /* |
784 | * In nohz/micro accounting case we set the minimum frequency | 512 | * In nohz/micro accounting case we set the minimum frequency |
785 | * not depending on HZ, but fixed (very low). The deferred | 513 | * not depending on HZ, but fixed (very low). The deferred |
786 | * timer might skip some samples if idle/sleeping as needed. | 514 | * timer might skip some samples if idle/sleeping as needed. |
787 | */ | 515 | */ |
788 | min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | 516 | od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
789 | } else { | 517 | } else { |
790 | /* For correct statistics, we need 10 ticks for each measure */ | 518 | /* For correct statistics, we need 10 ticks for each measure */ |
791 | min_sampling_rate = | 519 | od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
792 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | 520 | jiffies_to_usecs(10); |
793 | } | 521 | } |
794 | 522 | ||
795 | return cpufreq_register_governor(&cpufreq_gov_ondemand); | 523 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
@@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void) | |||
800 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); | 528 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
801 | } | 529 | } |
802 | 530 | ||
803 | |||
804 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); | 531 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
805 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); | 532 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); |
806 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | 533 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index f13a8a9af6a1..ceee06849b91 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/cpufreq.h> | 17 | #include <linux/cpufreq.h> |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 4c2eb512f2bc..2d948a171155 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/cpufreq.h> | 17 | #include <linux/cpufreq.h> |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 399831690fed..e40e50809644 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -37,7 +37,7 @@ struct cpufreq_stats { | |||
37 | unsigned int max_state; | 37 | unsigned int max_state; |
38 | unsigned int state_num; | 38 | unsigned int state_num; |
39 | unsigned int last_index; | 39 | unsigned int last_index; |
40 | cputime64_t *time_in_state; | 40 | u64 *time_in_state; |
41 | unsigned int *freq_table; | 41 | unsigned int *freq_table; |
42 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 42 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
43 | unsigned int *trans_table; | 43 | unsigned int *trans_table; |
@@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, | |||
223 | count++; | 223 | count++; |
224 | } | 224 | } |
225 | 225 | ||
226 | alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); | 226 | alloc_size = count * sizeof(int) + count * sizeof(u64); |
227 | 227 | ||
228 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 228 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
229 | alloc_size += count * count * sizeof(int); | 229 | alloc_size += count * count * sizeof(int); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index bedac1aa9be3..c8c3d293cc57 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
16 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index af2d81e10f71..7012ea8bf1e7 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -31,13 +31,13 @@ static unsigned int locking_frequency; | |||
31 | static bool frequency_locked; | 31 | static bool frequency_locked; |
32 | static DEFINE_MUTEX(cpufreq_lock); | 32 | static DEFINE_MUTEX(cpufreq_lock); |
33 | 33 | ||
34 | int exynos_verify_speed(struct cpufreq_policy *policy) | 34 | static int exynos_verify_speed(struct cpufreq_policy *policy) |
35 | { | 35 | { |
36 | return cpufreq_frequency_table_verify(policy, | 36 | return cpufreq_frequency_table_verify(policy, |
37 | exynos_info->freq_table); | 37 | exynos_info->freq_table); |
38 | } | 38 | } |
39 | 39 | ||
40 | unsigned int exynos_getspeed(unsigned int cpu) | 40 | static unsigned int exynos_getspeed(unsigned int cpu) |
41 | { | 41 | { |
42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; | 42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; |
43 | } | 43 | } |
@@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
100 | } | 100 | } |
101 | arm_volt = volt_table[index]; | 101 | arm_volt = volt_table[index]; |
102 | 102 | ||
103 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 103 | for_each_cpu(freqs.cpu, policy->cpus) |
104 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
104 | 105 | ||
105 | /* When the new frequency is higher than current frequency */ | 106 | /* When the new frequency is higher than current frequency */ |
106 | if ((freqs.new > freqs.old) && !safe_arm_volt) { | 107 | if ((freqs.new > freqs.old) && !safe_arm_volt) { |
@@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
115 | if (freqs.new != freqs.old) | 116 | if (freqs.new != freqs.old) |
116 | exynos_info->set_freq(old_index, index); | 117 | exynos_info->set_freq(old_index, index); |
117 | 118 | ||
118 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 119 | for_each_cpu(freqs.cpu, policy->cpus) |
120 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
119 | 121 | ||
120 | /* When the new frequency is lower than current frequency */ | 122 | /* When the new frequency is lower than current frequency */ |
121 | if ((freqs.new < freqs.old) || | 123 | if ((freqs.new < freqs.old) || |
@@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
235 | cpumask_copy(policy->related_cpus, cpu_possible_mask); | 237 | cpumask_copy(policy->related_cpus, cpu_possible_mask); |
236 | cpumask_copy(policy->cpus, cpu_online_mask); | 238 | cpumask_copy(policy->cpus, cpu_online_mask); |
237 | } else { | 239 | } else { |
240 | policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
238 | cpumask_setall(policy->cpus); | 241 | cpumask_setall(policy->cpus); |
239 | } | 242 | } |
240 | 243 | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 90431cb92804..49cda256efb2 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 15 | #include <linux/module.h> |
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 53ddbc760af7..f1fa500ac105 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) | |||
930 | return 0; | 930 | return 0; |
931 | } | 931 | } |
932 | 932 | ||
933 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) | 933 | static int longhaul_cpu_exit(struct cpufreq_policy *policy) |
934 | { | 934 | { |
935 | cpufreq_frequency_table_put_attr(policy->cpu); | 935 | cpufreq_frequency_table_put_attr(policy->cpu); |
936 | return 0; | 936 | return 0; |
@@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = { | |||
946 | .target = longhaul_target, | 946 | .target = longhaul_target, |
947 | .get = longhaul_get, | 947 | .get = longhaul_get, |
948 | .init = longhaul_cpu_init, | 948 | .init = longhaul_cpu_init, |
949 | .exit = __devexit_p(longhaul_cpu_exit), | 949 | .exit = longhaul_cpu_exit, |
950 | .name = "longhaul", | 950 | .name = "longhaul", |
951 | .owner = THIS_MODULE, | 951 | .owner = THIS_MODULE, |
952 | .attr = longhaul_attr, | 952 | .attr = longhaul_attr, |
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e3ebb4fa2c3e..056faf6af1a9 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -1186,7 +1186,7 @@ err_out: | |||
1186 | return -ENODEV; | 1186 | return -ENODEV; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | 1189 | static int powernowk8_cpu_exit(struct cpufreq_policy *pol) |
1190 | { | 1190 | { |
1191 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1191 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1192 | 1192 | ||
@@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { | |||
1242 | .target = powernowk8_target, | 1242 | .target = powernowk8_target, |
1243 | .bios_limit = acpi_processor_get_bios_limit, | 1243 | .bios_limit = acpi_processor_get_bios_limit, |
1244 | .init = powernowk8_cpu_init, | 1244 | .init = powernowk8_cpu_init, |
1245 | .exit = __devexit_p(powernowk8_cpu_exit), | 1245 | .exit = powernowk8_cpu_exit, |
1246 | .get = powernowk8_get, | 1246 | .get = powernowk8_get, |
1247 | .name = "powernow-k8", | 1247 | .name = "powernow-k8", |
1248 | .owner = THIS_MODULE, | 1248 | .owner = THIS_MODULE, |
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c new file mode 100644 index 000000000000..4575cfe41755 --- /dev/null +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/spear-cpufreq.c | ||
3 | * | ||
4 | * CPU Frequency Scaling for SPEAr platform | ||
5 | * | ||
6 | * Copyright (C) 2012 ST Microelectronics | ||
7 | * Deepak Sikri <deepak.sikri@st.com> | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public | ||
10 | * License version 2. This program is licensed "as is" without any | ||
11 | * warranty of any kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/cpufreq.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | /* SPEAr CPUFreq driver data structure */ | ||
26 | static struct { | ||
27 | struct clk *clk; | ||
28 | unsigned int transition_latency; | ||
29 | struct cpufreq_frequency_table *freq_tbl; | ||
30 | u32 cnt; | ||
31 | } spear_cpufreq; | ||
32 | |||
33 | int spear_cpufreq_verify(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); | ||
36 | } | ||
37 | |||
38 | static unsigned int spear_cpufreq_get(unsigned int cpu) | ||
39 | { | ||
40 | return clk_get_rate(spear_cpufreq.clk) / 1000; | ||
41 | } | ||
42 | |||
43 | static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) | ||
44 | { | ||
45 | struct clk *sys_pclk; | ||
46 | int pclk; | ||
47 | /* | ||
48 | * In SPEAr1340, cpu clk's parent sys clk can take input from | ||
49 | * following sources | ||
50 | */ | ||
51 | const char *sys_clk_src[] = { | ||
52 | "sys_syn_clk", | ||
53 | "pll1_clk", | ||
54 | "pll2_clk", | ||
55 | "pll3_clk", | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * As sys clk can have multiple source with their own range | ||
60 | * limitation so we choose possible sources accordingly | ||
61 | */ | ||
62 | if (newfreq <= 300000000) | ||
63 | pclk = 0; /* src is sys_syn_clk */ | ||
64 | else if (newfreq > 300000000 && newfreq <= 500000000) | ||
65 | pclk = 3; /* src is pll3_clk */ | ||
66 | else if (newfreq == 600000000) | ||
67 | pclk = 1; /* src is pll1_clk */ | ||
68 | else | ||
69 | return ERR_PTR(-EINVAL); | ||
70 | |||
71 | /* Get parent to sys clock */ | ||
72 | sys_pclk = clk_get(NULL, sys_clk_src[pclk]); | ||
73 | if (IS_ERR(sys_pclk)) | ||
74 | pr_err("Failed to get %s clock\n", sys_clk_src[pclk]); | ||
75 | |||
76 | return sys_pclk; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * In SPEAr1340, we cannot use newfreq directly because we need to actually | ||
81 | * access a source clock (clk) which might not be ancestor of cpu at present. | ||
82 | * Hence in SPEAr1340 we would operate on source clock directly before switching | ||
83 | * cpu clock to it. | ||
84 | */ | ||
85 | static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) | ||
86 | { | ||
87 | struct clk *sys_clk; | ||
88 | int ret = 0; | ||
89 | |||
90 | sys_clk = clk_get_parent(spear_cpufreq.clk); | ||
91 | if (IS_ERR(sys_clk)) { | ||
92 | pr_err("failed to get cpu's parent (sys) clock\n"); | ||
93 | return PTR_ERR(sys_clk); | ||
94 | } | ||
95 | |||
96 | /* Set the rate of the source clock before changing the parent */ | ||
97 | ret = clk_set_rate(sys_pclk, newfreq); | ||
98 | if (ret) { | ||
99 | pr_err("Failed to set sys clk rate to %lu\n", newfreq); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | ret = clk_set_parent(sys_clk, sys_pclk); | ||
104 | if (ret) { | ||
105 | pr_err("Failed to set sys clk parent\n"); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int spear_cpufreq_target(struct cpufreq_policy *policy, | ||
113 | unsigned int target_freq, unsigned int relation) | ||
114 | { | ||
115 | struct cpufreq_freqs freqs; | ||
116 | unsigned long newfreq; | ||
117 | struct clk *srcclk; | ||
118 | int index, ret, mult = 1; | ||
119 | |||
120 | if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, | ||
121 | target_freq, relation, &index)) | ||
122 | return -EINVAL; | ||
123 | |||
124 | freqs.cpu = policy->cpu; | ||
125 | freqs.old = spear_cpufreq_get(0); | ||
126 | |||
127 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; | ||
128 | if (of_machine_is_compatible("st,spear1340")) { | ||
129 | /* | ||
130 | * SPEAr1340 is special in the sense that due to the possibility | ||
131 | * of multiple clock sources for cpu clk's parent we can have | ||
132 | * different clock source for different frequency of cpu clk. | ||
133 | * Hence we need to choose one from amongst these possible clock | ||
134 | * sources. | ||
135 | */ | ||
136 | srcclk = spear1340_cpu_get_possible_parent(newfreq); | ||
137 | if (IS_ERR(srcclk)) { | ||
138 | pr_err("Failed to get src clk\n"); | ||
139 | return PTR_ERR(srcclk); | ||
140 | } | ||
141 | |||
142 | /* SPEAr1340: src clk is always 2 * intended cpu clk */ | ||
143 | mult = 2; | ||
144 | } else { | ||
145 | /* | ||
146 | * src clock to be altered is ancestor of cpu clock. Hence we | ||
147 | * can directly work on cpu clk | ||
148 | */ | ||
149 | srcclk = spear_cpufreq.clk; | ||
150 | } | ||
151 | |||
152 | newfreq = clk_round_rate(srcclk, newfreq * mult); | ||
153 | if (newfreq < 0) { | ||
154 | pr_err("clk_round_rate failed for cpu src clock\n"); | ||
155 | return newfreq; | ||
156 | } | ||
157 | |||
158 | freqs.new = newfreq / 1000; | ||
159 | freqs.new /= mult; | ||
160 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
161 | |||
162 | if (mult == 2) | ||
163 | ret = spear1340_set_cpu_rate(srcclk, newfreq); | ||
164 | else | ||
165 | ret = clk_set_rate(spear_cpufreq.clk, newfreq); | ||
166 | |||
167 | /* Get current rate after clk_set_rate, in case of failure */ | ||
168 | if (ret) { | ||
169 | pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); | ||
170 | freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; | ||
171 | } | ||
172 | |||
173 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int spear_cpufreq_init(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | int ret; | ||
180 | |||
181 | ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); | ||
182 | if (ret) { | ||
183 | pr_err("cpufreq_frequency_table_cpuinfo() failed"); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); | ||
188 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; | ||
189 | policy->cur = spear_cpufreq_get(0); | ||
190 | |||
191 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | ||
192 | cpumask_copy(policy->related_cpus, policy->cpus); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int spear_cpufreq_exit(struct cpufreq_policy *policy) | ||
198 | { | ||
199 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static struct freq_attr *spear_cpufreq_attr[] = { | ||
204 | &cpufreq_freq_attr_scaling_available_freqs, | ||
205 | NULL, | ||
206 | }; | ||
207 | |||
208 | static struct cpufreq_driver spear_cpufreq_driver = { | ||
209 | .name = "cpufreq-spear", | ||
210 | .flags = CPUFREQ_STICKY, | ||
211 | .verify = spear_cpufreq_verify, | ||
212 | .target = spear_cpufreq_target, | ||
213 | .get = spear_cpufreq_get, | ||
214 | .init = spear_cpufreq_init, | ||
215 | .exit = spear_cpufreq_exit, | ||
216 | .attr = spear_cpufreq_attr, | ||
217 | }; | ||
218 | |||
219 | static int spear_cpufreq_driver_init(void) | ||
220 | { | ||
221 | struct device_node *np; | ||
222 | const struct property *prop; | ||
223 | struct cpufreq_frequency_table *freq_tbl; | ||
224 | const __be32 *val; | ||
225 | int cnt, i, ret; | ||
226 | |||
227 | np = of_find_node_by_path("/cpus/cpu@0"); | ||
228 | if (!np) { | ||
229 | pr_err("No cpu node found"); | ||
230 | return -ENODEV; | ||
231 | } | ||
232 | |||
233 | if (of_property_read_u32(np, "clock-latency", | ||
234 | &spear_cpufreq.transition_latency)) | ||
235 | spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; | ||
236 | |||
237 | prop = of_find_property(np, "cpufreq_tbl", NULL); | ||
238 | if (!prop || !prop->value) { | ||
239 | pr_err("Invalid cpufreq_tbl"); | ||
240 | ret = -ENODEV; | ||
241 | goto out_put_node; | ||
242 | } | ||
243 | |||
244 | cnt = prop->length / sizeof(u32); | ||
245 | val = prop->value; | ||
246 | |||
247 | freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); | ||
248 | if (!freq_tbl) { | ||
249 | ret = -ENOMEM; | ||
250 | goto out_put_node; | ||
251 | } | ||
252 | |||
253 | for (i = 0; i < cnt; i++) { | ||
254 | freq_tbl[i].index = i; | ||
255 | freq_tbl[i].frequency = be32_to_cpup(val++); | ||
256 | } | ||
257 | |||
258 | freq_tbl[i].index = i; | ||
259 | freq_tbl[i].frequency = CPUFREQ_TABLE_END; | ||
260 | |||
261 | spear_cpufreq.freq_tbl = freq_tbl; | ||
262 | |||
263 | of_node_put(np); | ||
264 | |||
265 | spear_cpufreq.clk = clk_get(NULL, "cpu_clk"); | ||
266 | if (IS_ERR(spear_cpufreq.clk)) { | ||
267 | pr_err("Unable to get CPU clock\n"); | ||
268 | ret = PTR_ERR(spear_cpufreq.clk); | ||
269 | goto out_put_mem; | ||
270 | } | ||
271 | |||
272 | ret = cpufreq_register_driver(&spear_cpufreq_driver); | ||
273 | if (!ret) | ||
274 | return 0; | ||
275 | |||
276 | pr_err("failed register driver: %d\n", ret); | ||
277 | clk_put(spear_cpufreq.clk); | ||
278 | |||
279 | out_put_mem: | ||
280 | kfree(freq_tbl); | ||
281 | return ret; | ||
282 | |||
283 | out_put_node: | ||
284 | of_node_put(np); | ||
285 | return ret; | ||
286 | } | ||
287 | late_initcall(spear_cpufreq_driver_init); | ||
288 | |||
289 | MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); | ||
290 | MODULE_DESCRIPTION("SPEAr CPUFreq driver"); | ||
291 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index a76b689e553b..234ae651b38f 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -9,6 +9,15 @@ config CPU_IDLE | |||
9 | 9 | ||
10 | If you're using an ACPI-enabled platform, you should say Y here. | 10 | If you're using an ACPI-enabled platform, you should say Y here. |
11 | 11 | ||
12 | config CPU_IDLE_MULTIPLE_DRIVERS | ||
13 | bool "Support multiple cpuidle drivers" | ||
14 | depends on CPU_IDLE | ||
15 | default n | ||
16 | help | ||
17 | Allows the cpuidle framework to use different drivers for each CPU. | ||
18 | This is useful if you have a system with different CPU latencies and | ||
19 | states. If unsure say N. | ||
20 | |||
12 | config CPU_IDLE_GOV_LADDER | 21 | config CPU_IDLE_GOV_LADDER |
13 | bool | 22 | bool |
14 | depends on CPU_IDLE | 23 | depends on CPU_IDLE |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7f15b8514a18..8df53dd8dbe1 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -68,7 +68,7 @@ static cpuidle_enter_t cpuidle_enter_ops; | |||
68 | int cpuidle_play_dead(void) | 68 | int cpuidle_play_dead(void) |
69 | { | 69 | { |
70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
71 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 71 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
72 | int i, dead_state = -1; | 72 | int i, dead_state = -1; |
73 | int power_usage = -1; | 73 | int power_usage = -1; |
74 | 74 | ||
@@ -109,8 +109,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
109 | /* This can be moved to within driver enter routine | 109 | /* This can be moved to within driver enter routine |
110 | * but that results in multiple copies of same code. | 110 | * but that results in multiple copies of same code. |
111 | */ | 111 | */ |
112 | dev->states_usage[entered_state].time += | 112 | dev->states_usage[entered_state].time += dev->last_residency; |
113 | (unsigned long long)dev->last_residency; | ||
114 | dev->states_usage[entered_state].usage++; | 113 | dev->states_usage[entered_state].usage++; |
115 | } else { | 114 | } else { |
116 | dev->last_residency = 0; | 115 | dev->last_residency = 0; |
@@ -128,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
128 | int cpuidle_idle_call(void) | 127 | int cpuidle_idle_call(void) |
129 | { | 128 | { |
130 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 129 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
131 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 130 | struct cpuidle_driver *drv; |
132 | int next_state, entered_state; | 131 | int next_state, entered_state; |
133 | 132 | ||
134 | if (off) | 133 | if (off) |
@@ -141,9 +140,15 @@ int cpuidle_idle_call(void) | |||
141 | if (!dev || !dev->enabled) | 140 | if (!dev || !dev->enabled) |
142 | return -EBUSY; | 141 | return -EBUSY; |
143 | 142 | ||
143 | drv = cpuidle_get_cpu_driver(dev); | ||
144 | |||
144 | /* ask the governor for the next state */ | 145 | /* ask the governor for the next state */ |
145 | next_state = cpuidle_curr_governor->select(drv, dev); | 146 | next_state = cpuidle_curr_governor->select(drv, dev); |
146 | if (need_resched()) { | 147 | if (need_resched()) { |
148 | dev->last_residency = 0; | ||
149 | /* give the governor an opportunity to reflect on the outcome */ | ||
150 | if (cpuidle_curr_governor->reflect) | ||
151 | cpuidle_curr_governor->reflect(dev, next_state); | ||
147 | local_irq_enable(); | 152 | local_irq_enable(); |
148 | return 0; | 153 | return 0; |
149 | } | 154 | } |
@@ -308,15 +313,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {} | |||
308 | int cpuidle_enable_device(struct cpuidle_device *dev) | 313 | int cpuidle_enable_device(struct cpuidle_device *dev) |
309 | { | 314 | { |
310 | int ret, i; | 315 | int ret, i; |
311 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 316 | struct cpuidle_driver *drv; |
312 | 317 | ||
313 | if (!dev) | 318 | if (!dev) |
314 | return -EINVAL; | 319 | return -EINVAL; |
315 | 320 | ||
316 | if (dev->enabled) | 321 | if (dev->enabled) |
317 | return 0; | 322 | return 0; |
323 | |||
324 | drv = cpuidle_get_cpu_driver(dev); | ||
325 | |||
318 | if (!drv || !cpuidle_curr_governor) | 326 | if (!drv || !cpuidle_curr_governor) |
319 | return -EIO; | 327 | return -EIO; |
328 | |||
320 | if (!dev->state_count) | 329 | if (!dev->state_count) |
321 | dev->state_count = drv->state_count; | 330 | dev->state_count = drv->state_count; |
322 | 331 | ||
@@ -331,7 +340,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
331 | 340 | ||
332 | poll_idle_init(drv); | 341 | poll_idle_init(drv); |
333 | 342 | ||
334 | if ((ret = cpuidle_add_state_sysfs(dev))) | 343 | ret = cpuidle_add_device_sysfs(dev); |
344 | if (ret) | ||
335 | return ret; | 345 | return ret; |
336 | 346 | ||
337 | if (cpuidle_curr_governor->enable && | 347 | if (cpuidle_curr_governor->enable && |
@@ -352,7 +362,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
352 | return 0; | 362 | return 0; |
353 | 363 | ||
354 | fail_sysfs: | 364 | fail_sysfs: |
355 | cpuidle_remove_state_sysfs(dev); | 365 | cpuidle_remove_device_sysfs(dev); |
356 | 366 | ||
357 | return ret; | 367 | return ret; |
358 | } | 368 | } |
@@ -368,17 +378,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |||
368 | */ | 378 | */ |
369 | void cpuidle_disable_device(struct cpuidle_device *dev) | 379 | void cpuidle_disable_device(struct cpuidle_device *dev) |
370 | { | 380 | { |
381 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
382 | |||
371 | if (!dev || !dev->enabled) | 383 | if (!dev || !dev->enabled) |
372 | return; | 384 | return; |
373 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) | 385 | |
386 | if (!drv || !cpuidle_curr_governor) | ||
374 | return; | 387 | return; |
375 | 388 | ||
376 | dev->enabled = 0; | 389 | dev->enabled = 0; |
377 | 390 | ||
378 | if (cpuidle_curr_governor->disable) | 391 | if (cpuidle_curr_governor->disable) |
379 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); | 392 | cpuidle_curr_governor->disable(drv, dev); |
380 | 393 | ||
381 | cpuidle_remove_state_sysfs(dev); | 394 | cpuidle_remove_device_sysfs(dev); |
382 | enabled_devices--; | 395 | enabled_devices--; |
383 | } | 396 | } |
384 | 397 | ||
@@ -394,17 +407,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |||
394 | static int __cpuidle_register_device(struct cpuidle_device *dev) | 407 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
395 | { | 408 | { |
396 | int ret; | 409 | int ret; |
397 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 410 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
398 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
399 | 411 | ||
400 | if (!try_module_get(cpuidle_driver->owner)) | 412 | if (!try_module_get(drv->owner)) |
401 | return -EINVAL; | 413 | return -EINVAL; |
402 | 414 | ||
403 | init_completion(&dev->kobj_unregister); | ||
404 | |||
405 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 415 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
406 | list_add(&dev->device_list, &cpuidle_detected_devices); | 416 | list_add(&dev->device_list, &cpuidle_detected_devices); |
407 | ret = cpuidle_add_sysfs(cpu_dev); | 417 | ret = cpuidle_add_sysfs(dev); |
408 | if (ret) | 418 | if (ret) |
409 | goto err_sysfs; | 419 | goto err_sysfs; |
410 | 420 | ||
@@ -416,12 +426,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
416 | return 0; | 426 | return 0; |
417 | 427 | ||
418 | err_coupled: | 428 | err_coupled: |
419 | cpuidle_remove_sysfs(cpu_dev); | 429 | cpuidle_remove_sysfs(dev); |
420 | wait_for_completion(&dev->kobj_unregister); | ||
421 | err_sysfs: | 430 | err_sysfs: |
422 | list_del(&dev->device_list); | 431 | list_del(&dev->device_list); |
423 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 432 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
424 | module_put(cpuidle_driver->owner); | 433 | module_put(drv->owner); |
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | 436 | ||
@@ -460,8 +469,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); | |||
460 | */ | 469 | */ |
461 | void cpuidle_unregister_device(struct cpuidle_device *dev) | 470 | void cpuidle_unregister_device(struct cpuidle_device *dev) |
462 | { | 471 | { |
463 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 472 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
464 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
465 | 473 | ||
466 | if (dev->registered == 0) | 474 | if (dev->registered == 0) |
467 | return; | 475 | return; |
@@ -470,16 +478,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) | |||
470 | 478 | ||
471 | cpuidle_disable_device(dev); | 479 | cpuidle_disable_device(dev); |
472 | 480 | ||
473 | cpuidle_remove_sysfs(cpu_dev); | 481 | cpuidle_remove_sysfs(dev); |
474 | list_del(&dev->device_list); | 482 | list_del(&dev->device_list); |
475 | wait_for_completion(&dev->kobj_unregister); | ||
476 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 483 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
477 | 484 | ||
478 | cpuidle_coupled_unregister_device(dev); | 485 | cpuidle_coupled_unregister_device(dev); |
479 | 486 | ||
480 | cpuidle_resume_and_unlock(); | 487 | cpuidle_resume_and_unlock(); |
481 | 488 | ||
482 | module_put(cpuidle_driver->owner); | 489 | module_put(drv->owner); |
483 | } | 490 | } |
484 | 491 | ||
485 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | 492 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); |
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 76e7f696ad8c..ee97e9672ecf 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __DRIVER_CPUIDLE_H | 5 | #ifndef __DRIVER_CPUIDLE_H |
6 | #define __DRIVER_CPUIDLE_H | 6 | #define __DRIVER_CPUIDLE_H |
7 | 7 | ||
8 | #include <linux/device.h> | ||
9 | |||
10 | /* For internal use only */ | 8 | /* For internal use only */ |
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | 9 | extern struct cpuidle_governor *cpuidle_curr_governor; |
12 | extern struct list_head cpuidle_governors; | 10 | extern struct list_head cpuidle_governors; |
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void); | |||
25 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | 23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); |
26 | 24 | ||
27 | /* sysfs */ | 25 | /* sysfs */ |
26 | |||
27 | struct device; | ||
28 | |||
28 | extern int cpuidle_add_interface(struct device *dev); | 29 | extern int cpuidle_add_interface(struct device *dev); |
29 | extern void cpuidle_remove_interface(struct device *dev); | 30 | extern void cpuidle_remove_interface(struct device *dev); |
30 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | 31 | extern int cpuidle_add_device_sysfs(struct cpuidle_device *device); |
31 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | 32 | extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device); |
32 | extern int cpuidle_add_sysfs(struct device *dev); | 33 | extern int cpuidle_add_sysfs(struct cpuidle_device *dev); |
33 | extern void cpuidle_remove_sysfs(struct device *dev); | 34 | extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); |
34 | 35 | ||
35 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 36 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
36 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, | 37 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 87db3877fead..3af841fb397a 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -14,9 +14,10 @@ | |||
14 | 14 | ||
15 | #include "cpuidle.h" | 15 | #include "cpuidle.h" |
16 | 16 | ||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 17 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | int cpuidle_driver_refcount; | 18 | |
19 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
20 | static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); | ||
20 | 21 | ||
21 | static void set_power_states(struct cpuidle_driver *drv) | 22 | static void set_power_states(struct cpuidle_driver *drv) |
22 | { | 23 | { |
@@ -40,11 +41,15 @@ static void set_power_states(struct cpuidle_driver *drv) | |||
40 | drv->states[i].power_usage = -1 - i; | 41 | drv->states[i].power_usage = -1 - i; |
41 | } | 42 | } |
42 | 43 | ||
43 | /** | 44 | static void __cpuidle_driver_init(struct cpuidle_driver *drv) |
44 | * cpuidle_register_driver - registers a driver | 45 | { |
45 | * @drv: the driver | 46 | drv->refcnt = 0; |
46 | */ | 47 | |
47 | int cpuidle_register_driver(struct cpuidle_driver *drv) | 48 | if (!drv->power_specified) |
49 | set_power_states(drv); | ||
50 | } | ||
51 | |||
52 | static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) | ||
48 | { | 53 | { |
49 | if (!drv || !drv->state_count) | 54 | if (!drv || !drv->state_count) |
50 | return -EINVAL; | 55 | return -EINVAL; |
@@ -52,31 +57,145 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
52 | if (cpuidle_disabled()) | 57 | if (cpuidle_disabled()) |
53 | return -ENODEV; | 58 | return -ENODEV; |
54 | 59 | ||
55 | spin_lock(&cpuidle_driver_lock); | 60 | if (__cpuidle_get_cpu_driver(cpu)) |
56 | if (cpuidle_curr_driver) { | ||
57 | spin_unlock(&cpuidle_driver_lock); | ||
58 | return -EBUSY; | 61 | return -EBUSY; |
62 | |||
63 | __cpuidle_driver_init(drv); | ||
64 | |||
65 | __cpuidle_set_cpu_driver(drv, cpu); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu) | ||
71 | { | ||
72 | if (drv != __cpuidle_get_cpu_driver(cpu)) | ||
73 | return; | ||
74 | |||
75 | if (!WARN_ON(drv->refcnt > 0)) | ||
76 | __cpuidle_set_cpu_driver(NULL, cpu); | ||
77 | } | ||
78 | |||
79 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
80 | |||
81 | static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers); | ||
82 | |||
83 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
84 | { | ||
85 | per_cpu(cpuidle_drivers, cpu) = drv; | ||
86 | } | ||
87 | |||
88 | static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
89 | { | ||
90 | return per_cpu(cpuidle_drivers, cpu); | ||
91 | } | ||
92 | |||
93 | static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv) | ||
94 | { | ||
95 | int cpu; | ||
96 | for_each_present_cpu(cpu) | ||
97 | __cpuidle_unregister_driver(drv, cpu); | ||
98 | } | ||
99 | |||
100 | static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv) | ||
101 | { | ||
102 | int ret = 0; | ||
103 | int i, cpu; | ||
104 | |||
105 | for_each_present_cpu(cpu) { | ||
106 | ret = __cpuidle_register_driver(drv, cpu); | ||
107 | if (ret) | ||
108 | break; | ||
59 | } | 109 | } |
60 | 110 | ||
61 | if (!drv->power_specified) | 111 | if (ret) |
62 | set_power_states(drv); | 112 | for_each_present_cpu(i) { |
113 | if (i == cpu) | ||
114 | break; | ||
115 | __cpuidle_unregister_driver(drv, i); | ||
116 | } | ||
63 | 117 | ||
64 | cpuidle_curr_driver = drv; | ||
65 | 118 | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
123 | { | ||
124 | int ret; | ||
125 | |||
126 | spin_lock(&cpuidle_driver_lock); | ||
127 | ret = __cpuidle_register_driver(drv, cpu); | ||
66 | spin_unlock(&cpuidle_driver_lock); | 128 | spin_unlock(&cpuidle_driver_lock); |
67 | 129 | ||
68 | return 0; | 130 | return ret; |
131 | } | ||
132 | |||
133 | void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
134 | { | ||
135 | spin_lock(&cpuidle_driver_lock); | ||
136 | __cpuidle_unregister_driver(drv, cpu); | ||
137 | spin_unlock(&cpuidle_driver_lock); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * cpuidle_register_driver - registers a driver | ||
142 | * @drv: the driver | ||
143 | */ | ||
144 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | spin_lock(&cpuidle_driver_lock); | ||
149 | ret = __cpuidle_register_all_cpu_driver(drv); | ||
150 | spin_unlock(&cpuidle_driver_lock); | ||
151 | |||
152 | return ret; | ||
69 | } | 153 | } |
70 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | 154 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); |
71 | 155 | ||
72 | /** | 156 | /** |
73 | * cpuidle_get_driver - return the current driver | 157 | * cpuidle_unregister_driver - unregisters a driver |
158 | * @drv: the driver | ||
74 | */ | 159 | */ |
75 | struct cpuidle_driver *cpuidle_get_driver(void) | 160 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
161 | { | ||
162 | spin_lock(&cpuidle_driver_lock); | ||
163 | __cpuidle_unregister_all_cpu_driver(drv); | ||
164 | spin_unlock(&cpuidle_driver_lock); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
167 | |||
168 | #else | ||
169 | |||
170 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
171 | |||
172 | static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
173 | { | ||
174 | cpuidle_curr_driver = drv; | ||
175 | } | ||
176 | |||
177 | static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
76 | { | 178 | { |
77 | return cpuidle_curr_driver; | 179 | return cpuidle_curr_driver; |
78 | } | 180 | } |
79 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | 181 | |
182 | /** | ||
183 | * cpuidle_register_driver - registers a driver | ||
184 | * @drv: the driver | ||
185 | */ | ||
186 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
187 | { | ||
188 | int ret, cpu; | ||
189 | |||
190 | cpu = get_cpu(); | ||
191 | spin_lock(&cpuidle_driver_lock); | ||
192 | ret = __cpuidle_register_driver(drv, cpu); | ||
193 | spin_unlock(&cpuidle_driver_lock); | ||
194 | put_cpu(); | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
80 | 199 | ||
81 | /** | 200 | /** |
82 | * cpuidle_unregister_driver - unregisters a driver | 201 | * cpuidle_unregister_driver - unregisters a driver |
@@ -84,20 +203,50 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver); | |||
84 | */ | 203 | */ |
85 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | 204 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
86 | { | 205 | { |
87 | if (drv != cpuidle_curr_driver) { | 206 | int cpu; |
88 | WARN(1, "invalid cpuidle_unregister_driver(%s)\n", | ||
89 | drv->name); | ||
90 | return; | ||
91 | } | ||
92 | 207 | ||
208 | cpu = get_cpu(); | ||
93 | spin_lock(&cpuidle_driver_lock); | 209 | spin_lock(&cpuidle_driver_lock); |
210 | __cpuidle_unregister_driver(drv, cpu); | ||
211 | spin_unlock(&cpuidle_driver_lock); | ||
212 | put_cpu(); | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
215 | #endif | ||
216 | |||
217 | /** | ||
218 | * cpuidle_get_driver - return the current driver | ||
219 | */ | ||
220 | struct cpuidle_driver *cpuidle_get_driver(void) | ||
221 | { | ||
222 | struct cpuidle_driver *drv; | ||
223 | int cpu; | ||
94 | 224 | ||
95 | if (!WARN_ON(cpuidle_driver_refcount > 0)) | 225 | cpu = get_cpu(); |
96 | cpuidle_curr_driver = NULL; | 226 | drv = __cpuidle_get_cpu_driver(cpu); |
227 | put_cpu(); | ||
97 | 228 | ||
229 | return drv; | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | ||
232 | |||
233 | /** | ||
234 | * cpuidle_get_cpu_driver - return the driver tied with a cpu | ||
235 | */ | ||
236 | struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) | ||
237 | { | ||
238 | struct cpuidle_driver *drv; | ||
239 | |||
240 | if (!dev) | ||
241 | return NULL; | ||
242 | |||
243 | spin_lock(&cpuidle_driver_lock); | ||
244 | drv = __cpuidle_get_cpu_driver(dev->cpu); | ||
98 | spin_unlock(&cpuidle_driver_lock); | 245 | spin_unlock(&cpuidle_driver_lock); |
246 | |||
247 | return drv; | ||
99 | } | 248 | } |
100 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | 249 | EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); |
101 | 250 | ||
102 | struct cpuidle_driver *cpuidle_driver_ref(void) | 251 | struct cpuidle_driver *cpuidle_driver_ref(void) |
103 | { | 252 | { |
@@ -105,8 +254,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
105 | 254 | ||
106 | spin_lock(&cpuidle_driver_lock); | 255 | spin_lock(&cpuidle_driver_lock); |
107 | 256 | ||
108 | drv = cpuidle_curr_driver; | 257 | drv = cpuidle_get_driver(); |
109 | cpuidle_driver_refcount++; | 258 | drv->refcnt++; |
110 | 259 | ||
111 | spin_unlock(&cpuidle_driver_lock); | 260 | spin_unlock(&cpuidle_driver_lock); |
112 | return drv; | 261 | return drv; |
@@ -114,10 +263,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
114 | 263 | ||
115 | void cpuidle_driver_unref(void) | 264 | void cpuidle_driver_unref(void) |
116 | { | 265 | { |
266 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
267 | |||
117 | spin_lock(&cpuidle_driver_lock); | 268 | spin_lock(&cpuidle_driver_lock); |
118 | 269 | ||
119 | if (!WARN_ON(cpuidle_driver_refcount <= 0)) | 270 | if (drv && !WARN_ON(drv->refcnt <= 0)) |
120 | cpuidle_driver_refcount--; | 271 | drv->refcnt--; |
121 | 272 | ||
122 | spin_unlock(&cpuidle_driver_lock); | 273 | spin_unlock(&cpuidle_driver_lock); |
123 | } | 274 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 5b1f2c372c1f..bd40b943b6db 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -28,6 +28,13 @@ | |||
28 | #define MAX_INTERESTING 50000 | 28 | #define MAX_INTERESTING 50000 |
29 | #define STDDEV_THRESH 400 | 29 | #define STDDEV_THRESH 400 |
30 | 30 | ||
31 | /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ | ||
32 | #define MAX_DEVIATION 60 | ||
33 | |||
34 | static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); | ||
35 | static DEFINE_PER_CPU(int, hrtimer_status); | ||
36 | /* menu hrtimer mode */ | ||
37 | enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; | ||
31 | 38 | ||
32 | /* | 39 | /* |
33 | * Concepts and ideas behind the menu governor | 40 | * Concepts and ideas behind the menu governor |
@@ -109,6 +116,13 @@ | |||
109 | * | 116 | * |
110 | */ | 117 | */ |
111 | 118 | ||
119 | /* | ||
120 | * The C-state residency is so long that is is worthwhile to exit | ||
121 | * from the shallow C-state and re-enter into a deeper C-state. | ||
122 | */ | ||
123 | static unsigned int perfect_cstate_ms __read_mostly = 30; | ||
124 | module_param(perfect_cstate_ms, uint, 0000); | ||
125 | |||
112 | struct menu_device { | 126 | struct menu_device { |
113 | int last_state_idx; | 127 | int last_state_idx; |
114 | int needs_update; | 128 | int needs_update; |
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
191 | return div_u64(dividend + (divisor / 2), divisor); | 205 | return div_u64(dividend + (divisor / 2), divisor); |
192 | } | 206 | } |
193 | 207 | ||
208 | /* Cancel the hrtimer if it is not triggered yet */ | ||
209 | void menu_hrtimer_cancel(void) | ||
210 | { | ||
211 | int cpu = smp_processor_id(); | ||
212 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
213 | |||
214 | /* The timer is still not time out*/ | ||
215 | if (per_cpu(hrtimer_status, cpu)) { | ||
216 | hrtimer_cancel(hrtmr); | ||
217 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
218 | } | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); | ||
221 | |||
222 | /* Call back for hrtimer is triggered */ | ||
223 | static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) | ||
224 | { | ||
225 | int cpu = smp_processor_id(); | ||
226 | struct menu_device *data = &per_cpu(menu_devices, cpu); | ||
227 | |||
228 | /* In general case, the expected residency is much larger than | ||
229 | * deepest C-state target residency, but prediction logic still | ||
230 | * predicts a small predicted residency, so the prediction | ||
231 | * history is totally broken if the timer is triggered. | ||
232 | * So reset the correction factor. | ||
233 | */ | ||
234 | if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) | ||
235 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
236 | |||
237 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
238 | |||
239 | return HRTIMER_NORESTART; | ||
240 | } | ||
241 | |||
194 | /* | 242 | /* |
195 | * Try detecting repeating patterns by keeping track of the last 8 | 243 | * Try detecting repeating patterns by keeping track of the last 8 |
196 | * intervals, and checking if the standard deviation of that set | 244 | * intervals, and checking if the standard deviation of that set |
197 | * of points is below a threshold. If it is... then use the | 245 | * of points is below a threshold. If it is... then use the |
198 | * average of these 8 points as the estimated value. | 246 | * average of these 8 points as the estimated value. |
199 | */ | 247 | */ |
200 | static void detect_repeating_patterns(struct menu_device *data) | 248 | static u32 get_typical_interval(struct menu_device *data) |
201 | { | 249 | { |
202 | int i; | 250 | int i = 0, divisor = 0; |
203 | uint64_t avg = 0; | 251 | uint64_t max = 0, avg = 0, stddev = 0; |
204 | uint64_t stddev = 0; /* contains the square of the std deviation */ | 252 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ |
205 | 253 | unsigned int ret = 0; | |
206 | /* first calculate average and standard deviation of the past */ | ||
207 | for (i = 0; i < INTERVALS; i++) | ||
208 | avg += data->intervals[i]; | ||
209 | avg = avg / INTERVALS; | ||
210 | 254 | ||
211 | /* if the avg is beyond the known next tick, it's worthless */ | 255 | again: |
212 | if (avg > data->expected_us) | ||
213 | return; | ||
214 | 256 | ||
215 | for (i = 0; i < INTERVALS; i++) | 257 | /* first calculate average and standard deviation of the past */ |
216 | stddev += (data->intervals[i] - avg) * | 258 | max = avg = divisor = stddev = 0; |
217 | (data->intervals[i] - avg); | 259 | for (i = 0; i < INTERVALS; i++) { |
218 | 260 | int64_t value = data->intervals[i]; | |
219 | stddev = stddev / INTERVALS; | 261 | if (value <= thresh) { |
262 | avg += value; | ||
263 | divisor++; | ||
264 | if (value > max) | ||
265 | max = value; | ||
266 | } | ||
267 | } | ||
268 | do_div(avg, divisor); | ||
220 | 269 | ||
270 | for (i = 0; i < INTERVALS; i++) { | ||
271 | int64_t value = data->intervals[i]; | ||
272 | if (value <= thresh) { | ||
273 | int64_t diff = value - avg; | ||
274 | stddev += diff * diff; | ||
275 | } | ||
276 | } | ||
277 | do_div(stddev, divisor); | ||
278 | stddev = int_sqrt(stddev); | ||
221 | /* | 279 | /* |
222 | * now.. if stddev is small.. then assume we have a | 280 | * If we have outliers to the upside in our distribution, discard |
223 | * repeating pattern and predict we keep doing this. | 281 | * those by setting the threshold to exclude these outliers, then |
282 | * calculate the average and standard deviation again. Once we get | ||
283 | * down to the bottom 3/4 of our samples, stop excluding samples. | ||
284 | * | ||
285 | * This can deal with workloads that have long pauses interspersed | ||
286 | * with sporadic activity with a bunch of short pauses. | ||
287 | * | ||
288 | * The typical interval is obtained when standard deviation is small | ||
289 | * or standard deviation is small compared to the average interval. | ||
224 | */ | 290 | */ |
225 | 291 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) | |
226 | if (avg && stddev < STDDEV_THRESH) | 292 | || stddev <= 20) { |
227 | data->predicted_us = avg; | 293 | data->predicted_us = avg; |
294 | ret = 1; | ||
295 | return ret; | ||
296 | |||
297 | } else if ((divisor * 4) > INTERVALS * 3) { | ||
298 | /* Exclude the max interval */ | ||
299 | thresh = max - 1; | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return ret; | ||
228 | } | 304 | } |
229 | 305 | ||
230 | /** | 306 | /** |
@@ -240,6 +316,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
240 | int i; | 316 | int i; |
241 | int multiplier; | 317 | int multiplier; |
242 | struct timespec t; | 318 | struct timespec t; |
319 | int repeat = 0, low_predicted = 0; | ||
320 | int cpu = smp_processor_id(); | ||
321 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
243 | 322 | ||
244 | if (data->needs_update) { | 323 | if (data->needs_update) { |
245 | menu_update(drv, dev); | 324 | menu_update(drv, dev); |
@@ -274,7 +353,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
274 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | 353 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
275 | RESOLUTION * DECAY); | 354 | RESOLUTION * DECAY); |
276 | 355 | ||
277 | detect_repeating_patterns(data); | 356 | repeat = get_typical_interval(data); |
278 | 357 | ||
279 | /* | 358 | /* |
280 | * We want to default to C1 (hlt), not to busy polling | 359 | * We want to default to C1 (hlt), not to busy polling |
@@ -295,8 +374,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
295 | 374 | ||
296 | if (s->disabled || su->disable) | 375 | if (s->disabled || su->disable) |
297 | continue; | 376 | continue; |
298 | if (s->target_residency > data->predicted_us) | 377 | if (s->target_residency > data->predicted_us) { |
378 | low_predicted = 1; | ||
299 | continue; | 379 | continue; |
380 | } | ||
300 | if (s->exit_latency > latency_req) | 381 | if (s->exit_latency > latency_req) |
301 | continue; | 382 | continue; |
302 | if (s->exit_latency * multiplier > data->predicted_us) | 383 | if (s->exit_latency * multiplier > data->predicted_us) |
@@ -309,6 +390,44 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
309 | } | 390 | } |
310 | } | 391 | } |
311 | 392 | ||
393 | /* not deepest C-state chosen for low predicted residency */ | ||
394 | if (low_predicted) { | ||
395 | unsigned int timer_us = 0; | ||
396 | unsigned int perfect_us = 0; | ||
397 | |||
398 | /* | ||
399 | * Set a timer to detect whether this sleep is much | ||
400 | * longer than repeat mode predicted. If the timer | ||
401 | * triggers, the code will evaluate whether to put | ||
402 | * the CPU into a deeper C-state. | ||
403 | * The timer is cancelled on CPU wakeup. | ||
404 | */ | ||
405 | timer_us = 2 * (data->predicted_us + MAX_DEVIATION); | ||
406 | |||
407 | perfect_us = perfect_cstate_ms * 1000; | ||
408 | |||
409 | if (repeat && (4 * timer_us < data->expected_us)) { | ||
410 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
411 | ns_to_ktime(1000 * timer_us), | ||
412 | HRTIMER_MODE_REL_PINNED)); | ||
413 | /* In repeat case, menu hrtimer is started */ | ||
414 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; | ||
415 | } else if (perfect_us < data->expected_us) { | ||
416 | /* | ||
417 | * The next timer is long. This could be because | ||
418 | * we did not make a useful prediction. | ||
419 | * In that case, it makes sense to re-enter | ||
420 | * into a deeper C-state after some time. | ||
421 | */ | ||
422 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
423 | ns_to_ktime(1000 * timer_us), | ||
424 | HRTIMER_MODE_REL_PINNED)); | ||
425 | /* In general case, menu hrtimer is started */ | ||
426 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; | ||
427 | } | ||
428 | |||
429 | } | ||
430 | |||
312 | return data->last_state_idx; | 431 | return data->last_state_idx; |
313 | } | 432 | } |
314 | 433 | ||
@@ -399,6 +518,9 @@ static int menu_enable_device(struct cpuidle_driver *drv, | |||
399 | struct cpuidle_device *dev) | 518 | struct cpuidle_device *dev) |
400 | { | 519 | { |
401 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 520 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
521 | struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); | ||
522 | hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
523 | t->function = menu_hrtimer_notify; | ||
402 | 524 | ||
403 | memset(data, 0, sizeof(struct menu_device)); | 525 | memset(data, 0, sizeof(struct menu_device)); |
404 | 526 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 5f809e337b89..340942946106 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
15 | #include <linux/device.h> | ||
15 | 16 | ||
16 | #include "cpuidle.h" | 17 | #include "cpuidle.h" |
17 | 18 | ||
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
297 | NULL | 298 | NULL |
298 | }; | 299 | }; |
299 | 300 | ||
301 | struct cpuidle_state_kobj { | ||
302 | struct cpuidle_state *state; | ||
303 | struct cpuidle_state_usage *state_usage; | ||
304 | struct completion kobj_unregister; | ||
305 | struct kobject kobj; | ||
306 | }; | ||
307 | |||
300 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 308 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
301 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 309 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
302 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | 310 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) |
@@ -356,17 +364,17 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | |||
356 | } | 364 | } |
357 | 365 | ||
358 | /** | 366 | /** |
359 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | 367 | * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes |
360 | * @device: the target device | 368 | * @device: the target device |
361 | */ | 369 | */ |
362 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | 370 | static int cpuidle_add_state_sysfs(struct cpuidle_device *device) |
363 | { | 371 | { |
364 | int i, ret = -ENOMEM; | 372 | int i, ret = -ENOMEM; |
365 | struct cpuidle_state_kobj *kobj; | 373 | struct cpuidle_state_kobj *kobj; |
366 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 374 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); |
367 | 375 | ||
368 | /* state statistics */ | 376 | /* state statistics */ |
369 | for (i = 0; i < device->state_count; i++) { | 377 | for (i = 0; i < drv->state_count; i++) { |
370 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 378 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
371 | if (!kobj) | 379 | if (!kobj) |
372 | goto error_state; | 380 | goto error_state; |
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
374 | kobj->state_usage = &device->states_usage[i]; | 382 | kobj->state_usage = &device->states_usage[i]; |
375 | init_completion(&kobj->kobj_unregister); | 383 | init_completion(&kobj->kobj_unregister); |
376 | 384 | ||
377 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 385 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, |
378 | "state%d", i); | 386 | &device->kobj, "state%d", i); |
379 | if (ret) { | 387 | if (ret) { |
380 | kfree(kobj); | 388 | kfree(kobj); |
381 | goto error_state; | 389 | goto error_state; |
@@ -393,10 +401,10 @@ error_state: | |||
393 | } | 401 | } |
394 | 402 | ||
395 | /** | 403 | /** |
396 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | 404 | * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes |
397 | * @device: the target device | 405 | * @device: the target device |
398 | */ | 406 | */ |
399 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | 407 | static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) |
400 | { | 408 | { |
401 | int i; | 409 | int i; |
402 | 410 | ||
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | |||
404 | cpuidle_free_state_kobj(device, i); | 412 | cpuidle_free_state_kobj(device, i); |
405 | } | 413 | } |
406 | 414 | ||
415 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
416 | #define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj) | ||
417 | #define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr) | ||
418 | |||
419 | #define define_one_driver_ro(_name, show) \ | ||
420 | static struct cpuidle_driver_attr attr_driver_##_name = \ | ||
421 | __ATTR(_name, 0644, show, NULL) | ||
422 | |||
423 | struct cpuidle_driver_kobj { | ||
424 | struct cpuidle_driver *drv; | ||
425 | struct completion kobj_unregister; | ||
426 | struct kobject kobj; | ||
427 | }; | ||
428 | |||
429 | struct cpuidle_driver_attr { | ||
430 | struct attribute attr; | ||
431 | ssize_t (*show)(struct cpuidle_driver *, char *); | ||
432 | ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); | ||
433 | }; | ||
434 | |||
435 | static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) | ||
436 | { | ||
437 | ssize_t ret; | ||
438 | |||
439 | spin_lock(&cpuidle_driver_lock); | ||
440 | ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); | ||
441 | spin_unlock(&cpuidle_driver_lock); | ||
442 | |||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | static void cpuidle_driver_sysfs_release(struct kobject *kobj) | ||
447 | { | ||
448 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
449 | complete(&driver_kobj->kobj_unregister); | ||
450 | } | ||
451 | |||
452 | static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr, | ||
453 | char * buf) | ||
454 | { | ||
455 | int ret = -EIO; | ||
456 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
457 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
458 | |||
459 | if (dattr->show) | ||
460 | ret = dattr->show(driver_kobj->drv, buf); | ||
461 | |||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr, | ||
466 | const char *buf, size_t size) | ||
467 | { | ||
468 | int ret = -EIO; | ||
469 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
470 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
471 | |||
472 | if (dattr->store) | ||
473 | ret = dattr->store(driver_kobj->drv, buf, size); | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | define_one_driver_ro(name, show_driver_name); | ||
479 | |||
480 | static const struct sysfs_ops cpuidle_driver_sysfs_ops = { | ||
481 | .show = cpuidle_driver_show, | ||
482 | .store = cpuidle_driver_store, | ||
483 | }; | ||
484 | |||
485 | static struct attribute *cpuidle_driver_default_attrs[] = { | ||
486 | &attr_driver_name.attr, | ||
487 | NULL | ||
488 | }; | ||
489 | |||
490 | static struct kobj_type ktype_driver_cpuidle = { | ||
491 | .sysfs_ops = &cpuidle_driver_sysfs_ops, | ||
492 | .default_attrs = cpuidle_driver_default_attrs, | ||
493 | .release = cpuidle_driver_sysfs_release, | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute | ||
498 | * @device: the target device | ||
499 | */ | ||
500 | static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
501 | { | ||
502 | struct cpuidle_driver_kobj *kdrv; | ||
503 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
504 | int ret; | ||
505 | |||
506 | kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL); | ||
507 | if (!kdrv) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | kdrv->drv = drv; | ||
511 | init_completion(&kdrv->kobj_unregister); | ||
512 | |||
513 | ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, | ||
514 | &dev->kobj, "driver"); | ||
515 | if (ret) { | ||
516 | kfree(kdrv); | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | kobject_uevent(&kdrv->kobj, KOBJ_ADD); | ||
521 | dev->kobj_driver = kdrv; | ||
522 | |||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute | ||
528 | * @device: the target device | ||
529 | */ | ||
530 | static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
531 | { | ||
532 | struct cpuidle_driver_kobj *kdrv = dev->kobj_driver; | ||
533 | kobject_put(&kdrv->kobj); | ||
534 | wait_for_completion(&kdrv->kobj_unregister); | ||
535 | kfree(kdrv); | ||
536 | } | ||
537 | #else | ||
538 | static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
539 | { | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
544 | { | ||
545 | ; | ||
546 | } | ||
547 | #endif | ||
548 | |||
549 | /** | ||
550 | * cpuidle_add_device_sysfs - adds device specific sysfs attributes | ||
551 | * @device: the target device | ||
552 | */ | ||
553 | int cpuidle_add_device_sysfs(struct cpuidle_device *device) | ||
554 | { | ||
555 | int ret; | ||
556 | |||
557 | ret = cpuidle_add_state_sysfs(device); | ||
558 | if (ret) | ||
559 | return ret; | ||
560 | |||
561 | ret = cpuidle_add_driver_sysfs(device); | ||
562 | if (ret) | ||
563 | cpuidle_remove_state_sysfs(device); | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * cpuidle_remove_device_sysfs : removes device specific sysfs attributes | ||
569 | * @device : the target device | ||
570 | */ | ||
571 | void cpuidle_remove_device_sysfs(struct cpuidle_device *device) | ||
572 | { | ||
573 | cpuidle_remove_driver_sysfs(device); | ||
574 | cpuidle_remove_state_sysfs(device); | ||
575 | } | ||
576 | |||
407 | /** | 577 | /** |
408 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | 578 | * cpuidle_add_sysfs - creates a sysfs instance for the target device |
409 | * @dev: the target device | 579 | * @dev: the target device |
410 | */ | 580 | */ |
411 | int cpuidle_add_sysfs(struct device *cpu_dev) | 581 | int cpuidle_add_sysfs(struct cpuidle_device *dev) |
412 | { | 582 | { |
413 | int cpu = cpu_dev->id; | 583 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); |
414 | struct cpuidle_device *dev; | ||
415 | int error; | 584 | int error; |
416 | 585 | ||
417 | dev = per_cpu(cpuidle_devices, cpu); | 586 | init_completion(&dev->kobj_unregister); |
587 | |||
418 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, | 588 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, |
419 | "cpuidle"); | 589 | "cpuidle"); |
420 | if (!error) | 590 | if (!error) |
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev) | |||
426 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | 596 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device |
427 | * @dev: the target device | 597 | * @dev: the target device |
428 | */ | 598 | */ |
429 | void cpuidle_remove_sysfs(struct device *cpu_dev) | 599 | void cpuidle_remove_sysfs(struct cpuidle_device *dev) |
430 | { | 600 | { |
431 | int cpu = cpu_dev->id; | ||
432 | struct cpuidle_device *dev; | ||
433 | |||
434 | dev = per_cpu(cpuidle_devices, cpu); | ||
435 | kobject_put(&dev->kobj); | 601 | kobject_put(&dev->kobj); |
602 | wait_for_completion(&dev->kobj_unregister); | ||
436 | } | 603 | } |
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index f6b0a6e2ea50..0f079be13305 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig | |||
@@ -30,7 +30,7 @@ if PM_DEVFREQ | |||
30 | comment "DEVFREQ Governors" | 30 | comment "DEVFREQ Governors" |
31 | 31 | ||
32 | config DEVFREQ_GOV_SIMPLE_ONDEMAND | 32 | config DEVFREQ_GOV_SIMPLE_ONDEMAND |
33 | bool "Simple Ondemand" | 33 | tristate "Simple Ondemand" |
34 | help | 34 | help |
35 | Chooses frequency based on the recent load on the device. Works | 35 | Chooses frequency based on the recent load on the device. Works |
36 | similar as ONDEMAND governor of CPUFREQ does. A device with | 36 | similar as ONDEMAND governor of CPUFREQ does. A device with |
@@ -39,7 +39,7 @@ config DEVFREQ_GOV_SIMPLE_ONDEMAND | |||
39 | values to the governor with data field at devfreq_add_device(). | 39 | values to the governor with data field at devfreq_add_device(). |
40 | 40 | ||
41 | config DEVFREQ_GOV_PERFORMANCE | 41 | config DEVFREQ_GOV_PERFORMANCE |
42 | bool "Performance" | 42 | tristate "Performance" |
43 | help | 43 | help |
44 | Sets the frequency at the maximum available frequency. | 44 | Sets the frequency at the maximum available frequency. |
45 | This governor always returns UINT_MAX as frequency so that | 45 | This governor always returns UINT_MAX as frequency so that |
@@ -47,7 +47,7 @@ config DEVFREQ_GOV_PERFORMANCE | |||
47 | at any time. | 47 | at any time. |
48 | 48 | ||
49 | config DEVFREQ_GOV_POWERSAVE | 49 | config DEVFREQ_GOV_POWERSAVE |
50 | bool "Powersave" | 50 | tristate "Powersave" |
51 | help | 51 | help |
52 | Sets the frequency at the minimum available frequency. | 52 | Sets the frequency at the minimum available frequency. |
53 | This governor always returns 0 as frequency so that | 53 | This governor always returns 0 as frequency so that |
@@ -55,7 +55,7 @@ config DEVFREQ_GOV_POWERSAVE | |||
55 | at any time. | 55 | at any time. |
56 | 56 | ||
57 | config DEVFREQ_GOV_USERSPACE | 57 | config DEVFREQ_GOV_USERSPACE |
58 | bool "Userspace" | 58 | tristate "Userspace" |
59 | help | 59 | help |
60 | Sets the frequency at the user specified one. | 60 | Sets the frequency at the user specified one. |
61 | This governor returns the user configured frequency if there | 61 | This governor returns the user configured frequency if there |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b146d76f04cf..53766f39aadd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -27,21 +27,17 @@ | |||
27 | #include <linux/hrtimer.h> | 27 | #include <linux/hrtimer.h> |
28 | #include "governor.h" | 28 | #include "governor.h" |
29 | 29 | ||
30 | struct class *devfreq_class; | 30 | static struct class *devfreq_class; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * devfreq_work periodically monitors every registered device. | 33 | * devfreq core provides delayed work based load monitoring helper |
34 | * The minimum polling interval is one jiffy. The polling interval is | 34 | * functions. Governors can use these or can implement their own |
35 | * determined by the minimum polling period among all polling devfreq | 35 | * monitoring mechanism. |
36 | * devices. The resolution of polling interval is one jiffy. | ||
37 | */ | 36 | */ |
38 | static bool polling; | ||
39 | static struct workqueue_struct *devfreq_wq; | 37 | static struct workqueue_struct *devfreq_wq; |
40 | static struct delayed_work devfreq_work; | ||
41 | |||
42 | /* wait removing if this is to be removed */ | ||
43 | static struct devfreq *wait_remove_device; | ||
44 | 38 | ||
39 | /* The list of all device-devfreq governors */ | ||
40 | static LIST_HEAD(devfreq_governor_list); | ||
45 | /* The list of all device-devfreq */ | 41 | /* The list of all device-devfreq */ |
46 | static LIST_HEAD(devfreq_list); | 42 | static LIST_HEAD(devfreq_list); |
47 | static DEFINE_MUTEX(devfreq_list_lock); | 43 | static DEFINE_MUTEX(devfreq_list_lock); |
@@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev) | |||
73 | } | 69 | } |
74 | 70 | ||
75 | /** | 71 | /** |
72 | * devfreq_get_freq_level() - Lookup freq_table for the frequency | ||
73 | * @devfreq: the devfreq instance | ||
74 | * @freq: the target frequency | ||
75 | */ | ||
76 | static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) | ||
77 | { | ||
78 | int lev; | ||
79 | |||
80 | for (lev = 0; lev < devfreq->profile->max_state; lev++) | ||
81 | if (freq == devfreq->profile->freq_table[lev]) | ||
82 | return lev; | ||
83 | |||
84 | return -EINVAL; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * devfreq_update_status() - Update statistics of devfreq behavior | ||
89 | * @devfreq: the devfreq instance | ||
90 | * @freq: the update target frequency | ||
91 | */ | ||
92 | static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) | ||
93 | { | ||
94 | int lev, prev_lev; | ||
95 | unsigned long cur_time; | ||
96 | |||
97 | lev = devfreq_get_freq_level(devfreq, freq); | ||
98 | if (lev < 0) | ||
99 | return lev; | ||
100 | |||
101 | cur_time = jiffies; | ||
102 | devfreq->time_in_state[lev] += | ||
103 | cur_time - devfreq->last_stat_updated; | ||
104 | if (freq != devfreq->previous_freq) { | ||
105 | prev_lev = devfreq_get_freq_level(devfreq, | ||
106 | devfreq->previous_freq); | ||
107 | devfreq->trans_table[(prev_lev * | ||
108 | devfreq->profile->max_state) + lev]++; | ||
109 | devfreq->total_trans++; | ||
110 | } | ||
111 | devfreq->last_stat_updated = cur_time; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * find_devfreq_governor() - find devfreq governor from name | ||
118 | * @name: name of the governor | ||
119 | * | ||
120 | * Search the list of devfreq governors and return the matched | ||
121 | * governor's pointer. devfreq_list_lock should be held by the caller. | ||
122 | */ | ||
123 | static struct devfreq_governor *find_devfreq_governor(const char *name) | ||
124 | { | ||
125 | struct devfreq_governor *tmp_governor; | ||
126 | |||
127 | if (unlikely(IS_ERR_OR_NULL(name))) { | ||
128 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | ||
129 | return ERR_PTR(-EINVAL); | ||
130 | } | ||
131 | WARN(!mutex_is_locked(&devfreq_list_lock), | ||
132 | "devfreq_list_lock must be locked."); | ||
133 | |||
134 | list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { | ||
135 | if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) | ||
136 | return tmp_governor; | ||
137 | } | ||
138 | |||
139 | return ERR_PTR(-ENODEV); | ||
140 | } | ||
141 | |||
142 | /* Load monitoring helper functions for governors use */ | ||
143 | |||
144 | /** | ||
76 | * update_devfreq() - Reevaluate the device and configure frequency. | 145 | * update_devfreq() - Reevaluate the device and configure frequency. |
77 | * @devfreq: the devfreq instance. | 146 | * @devfreq: the devfreq instance. |
78 | * | 147 | * |
@@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq) | |||
90 | return -EINVAL; | 159 | return -EINVAL; |
91 | } | 160 | } |
92 | 161 | ||
162 | if (!devfreq->governor) | ||
163 | return -EINVAL; | ||
164 | |||
93 | /* Reevaluate the proper frequency */ | 165 | /* Reevaluate the proper frequency */ |
94 | err = devfreq->governor->get_target_freq(devfreq, &freq); | 166 | err = devfreq->governor->get_target_freq(devfreq, &freq); |
95 | if (err) | 167 | if (err) |
@@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq) | |||
116 | if (err) | 188 | if (err) |
117 | return err; | 189 | return err; |
118 | 190 | ||
191 | if (devfreq->profile->freq_table) | ||
192 | if (devfreq_update_status(devfreq, freq)) | ||
193 | dev_err(&devfreq->dev, | ||
194 | "Couldn't update frequency transition information.\n"); | ||
195 | |||
119 | devfreq->previous_freq = freq; | 196 | devfreq->previous_freq = freq; |
120 | return err; | 197 | return err; |
121 | } | 198 | } |
199 | EXPORT_SYMBOL(update_devfreq); | ||
200 | |||
201 | /** | ||
202 | * devfreq_monitor() - Periodically poll devfreq objects. | ||
203 | * @work: the work struct used to run devfreq_monitor periodically. | ||
204 | * | ||
205 | */ | ||
206 | static void devfreq_monitor(struct work_struct *work) | ||
207 | { | ||
208 | int err; | ||
209 | struct devfreq *devfreq = container_of(work, | ||
210 | struct devfreq, work.work); | ||
211 | |||
212 | mutex_lock(&devfreq->lock); | ||
213 | err = update_devfreq(devfreq); | ||
214 | if (err) | ||
215 | dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); | ||
216 | |||
217 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
218 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
219 | mutex_unlock(&devfreq->lock); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * devfreq_monitor_start() - Start load monitoring of devfreq instance | ||
224 | * @devfreq: the devfreq instance. | ||
225 | * | ||
226 | * Helper function for starting devfreq device load monitoing. By | ||
227 | * default delayed work based monitoring is supported. Function | ||
228 | * to be called from governor in response to DEVFREQ_GOV_START | ||
229 | * event when device is added to devfreq framework. | ||
230 | */ | ||
231 | void devfreq_monitor_start(struct devfreq *devfreq) | ||
232 | { | ||
233 | INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); | ||
234 | if (devfreq->profile->polling_ms) | ||
235 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
236 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
237 | } | ||
238 | EXPORT_SYMBOL(devfreq_monitor_start); | ||
239 | |||
240 | /** | ||
241 | * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance | ||
242 | * @devfreq: the devfreq instance. | ||
243 | * | ||
244 | * Helper function to stop devfreq device load monitoing. Function | ||
245 | * to be called from governor in response to DEVFREQ_GOV_STOP | ||
246 | * event when device is removed from devfreq framework. | ||
247 | */ | ||
248 | void devfreq_monitor_stop(struct devfreq *devfreq) | ||
249 | { | ||
250 | cancel_delayed_work_sync(&devfreq->work); | ||
251 | } | ||
252 | EXPORT_SYMBOL(devfreq_monitor_stop); | ||
253 | |||
254 | /** | ||
255 | * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance | ||
256 | * @devfreq: the devfreq instance. | ||
257 | * | ||
258 | * Helper function to suspend devfreq device load monitoing. Function | ||
259 | * to be called from governor in response to DEVFREQ_GOV_SUSPEND | ||
260 | * event or when polling interval is set to zero. | ||
261 | * | ||
262 | * Note: Though this function is same as devfreq_monitor_stop(), | ||
263 | * intentionally kept separate to provide hooks for collecting | ||
264 | * transition statistics. | ||
265 | */ | ||
266 | void devfreq_monitor_suspend(struct devfreq *devfreq) | ||
267 | { | ||
268 | mutex_lock(&devfreq->lock); | ||
269 | if (devfreq->stop_polling) { | ||
270 | mutex_unlock(&devfreq->lock); | ||
271 | return; | ||
272 | } | ||
273 | |||
274 | devfreq->stop_polling = true; | ||
275 | mutex_unlock(&devfreq->lock); | ||
276 | cancel_delayed_work_sync(&devfreq->work); | ||
277 | } | ||
278 | EXPORT_SYMBOL(devfreq_monitor_suspend); | ||
279 | |||
280 | /** | ||
281 | * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance | ||
282 | * @devfreq: the devfreq instance. | ||
283 | * | ||
284 | * Helper function to resume devfreq device load monitoing. Function | ||
285 | * to be called from governor in response to DEVFREQ_GOV_RESUME | ||
286 | * event or when polling interval is set to non-zero. | ||
287 | */ | ||
288 | void devfreq_monitor_resume(struct devfreq *devfreq) | ||
289 | { | ||
290 | mutex_lock(&devfreq->lock); | ||
291 | if (!devfreq->stop_polling) | ||
292 | goto out; | ||
293 | |||
294 | if (!delayed_work_pending(&devfreq->work) && | ||
295 | devfreq->profile->polling_ms) | ||
296 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
297 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
298 | devfreq->stop_polling = false; | ||
299 | |||
300 | out: | ||
301 | mutex_unlock(&devfreq->lock); | ||
302 | } | ||
303 | EXPORT_SYMBOL(devfreq_monitor_resume); | ||
304 | |||
305 | /** | ||
306 | * devfreq_interval_update() - Update device devfreq monitoring interval | ||
307 | * @devfreq: the devfreq instance. | ||
308 | * @delay: new polling interval to be set. | ||
309 | * | ||
310 | * Helper function to set new load monitoring polling interval. Function | ||
311 | * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. | ||
312 | */ | ||
313 | void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) | ||
314 | { | ||
315 | unsigned int cur_delay = devfreq->profile->polling_ms; | ||
316 | unsigned int new_delay = *delay; | ||
317 | |||
318 | mutex_lock(&devfreq->lock); | ||
319 | devfreq->profile->polling_ms = new_delay; | ||
320 | |||
321 | if (devfreq->stop_polling) | ||
322 | goto out; | ||
323 | |||
324 | /* if new delay is zero, stop polling */ | ||
325 | if (!new_delay) { | ||
326 | mutex_unlock(&devfreq->lock); | ||
327 | cancel_delayed_work_sync(&devfreq->work); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | /* if current delay is zero, start polling with new delay */ | ||
332 | if (!cur_delay) { | ||
333 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
334 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
335 | goto out; | ||
336 | } | ||
337 | |||
338 | /* if current delay is greater than new delay, restart polling */ | ||
339 | if (cur_delay > new_delay) { | ||
340 | mutex_unlock(&devfreq->lock); | ||
341 | cancel_delayed_work_sync(&devfreq->work); | ||
342 | mutex_lock(&devfreq->lock); | ||
343 | if (!devfreq->stop_polling) | ||
344 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
345 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
346 | } | ||
347 | out: | ||
348 | mutex_unlock(&devfreq->lock); | ||
349 | } | ||
350 | EXPORT_SYMBOL(devfreq_interval_update); | ||
122 | 351 | ||
123 | /** | 352 | /** |
124 | * devfreq_notifier_call() - Notify that the device frequency requirements | 353 | * devfreq_notifier_call() - Notify that the device frequency requirements |
125 | * has been changed out of devfreq framework. | 354 | * has been changed out of devfreq framework. |
126 | * @nb the notifier_block (supposed to be devfreq->nb) | 355 | * @nb: the notifier_block (supposed to be devfreq->nb) |
127 | * @type not used | 356 | * @type: not used |
128 | * @devp not used | 357 | * @devp: not used |
129 | * | 358 | * |
130 | * Called by a notifier that uses devfreq->nb. | 359 | * Called by a notifier that uses devfreq->nb. |
131 | */ | 360 | */ |
@@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, | |||
143 | } | 372 | } |
144 | 373 | ||
145 | /** | 374 | /** |
146 | * _remove_devfreq() - Remove devfreq from the device. | 375 | * _remove_devfreq() - Remove devfreq from the list and release its resources. |
147 | * @devfreq: the devfreq struct | 376 | * @devfreq: the devfreq struct |
148 | * @skip: skip calling device_unregister(). | 377 | * @skip: skip calling device_unregister(). |
149 | * | ||
150 | * Note that the caller should lock devfreq->lock before calling | ||
151 | * this. _remove_devfreq() will unlock it and free devfreq | ||
152 | * internally. devfreq_list_lock should be locked by the caller | ||
153 | * as well (not relased at return) | ||
154 | * | ||
155 | * Lock usage: | ||
156 | * devfreq->lock: locked before call. | ||
157 | * unlocked at return (and freed) | ||
158 | * devfreq_list_lock: locked before call. | ||
159 | * kept locked at return. | ||
160 | * if devfreq is centrally polled. | ||
161 | * | ||
162 | * Freed memory: | ||
163 | * devfreq | ||
164 | */ | 378 | */ |
165 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) | 379 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) |
166 | { | 380 | { |
167 | if (!mutex_is_locked(&devfreq->lock)) { | 381 | mutex_lock(&devfreq_list_lock); |
168 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | 382 | if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { |
169 | return; | 383 | mutex_unlock(&devfreq_list_lock); |
170 | } | 384 | dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); |
171 | if (!devfreq->governor->no_central_polling && | ||
172 | !mutex_is_locked(&devfreq_list_lock)) { | ||
173 | WARN(true, "devfreq_list_lock must be locked by the caller.\n"); | ||
174 | return; | 385 | return; |
175 | } | 386 | } |
387 | list_del(&devfreq->node); | ||
388 | mutex_unlock(&devfreq_list_lock); | ||
176 | 389 | ||
177 | if (devfreq->being_removed) | 390 | if (devfreq->governor) |
178 | return; | 391 | devfreq->governor->event_handler(devfreq, |
179 | 392 | DEVFREQ_GOV_STOP, NULL); | |
180 | devfreq->being_removed = true; | ||
181 | 393 | ||
182 | if (devfreq->profile->exit) | 394 | if (devfreq->profile->exit) |
183 | devfreq->profile->exit(devfreq->dev.parent); | 395 | devfreq->profile->exit(devfreq->dev.parent); |
184 | 396 | ||
185 | if (devfreq->governor->exit) | ||
186 | devfreq->governor->exit(devfreq); | ||
187 | |||
188 | if (!skip && get_device(&devfreq->dev)) { | 397 | if (!skip && get_device(&devfreq->dev)) { |
189 | device_unregister(&devfreq->dev); | 398 | device_unregister(&devfreq->dev); |
190 | put_device(&devfreq->dev); | 399 | put_device(&devfreq->dev); |
191 | } | 400 | } |
192 | 401 | ||
193 | if (!devfreq->governor->no_central_polling) | ||
194 | list_del(&devfreq->node); | ||
195 | |||
196 | mutex_unlock(&devfreq->lock); | ||
197 | mutex_destroy(&devfreq->lock); | 402 | mutex_destroy(&devfreq->lock); |
198 | |||
199 | kfree(devfreq); | 403 | kfree(devfreq); |
200 | } | 404 | } |
201 | 405 | ||
@@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip) | |||
210 | static void devfreq_dev_release(struct device *dev) | 414 | static void devfreq_dev_release(struct device *dev) |
211 | { | 415 | { |
212 | struct devfreq *devfreq = to_devfreq(dev); | 416 | struct devfreq *devfreq = to_devfreq(dev); |
213 | bool central_polling = !devfreq->governor->no_central_polling; | ||
214 | |||
215 | /* | ||
216 | * If devfreq_dev_release() was called by device_unregister() of | ||
217 | * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and | ||
218 | * being_removed is already set. This also partially checks the case | ||
219 | * where devfreq_dev_release() is called from a thread other than | ||
220 | * the one called _remove_devfreq(); however, this case is | ||
221 | * dealt completely with another following being_removed check. | ||
222 | * | ||
223 | * Because being_removed is never being | ||
224 | * unset, we do not need to worry about race conditions on | ||
225 | * being_removed. | ||
226 | */ | ||
227 | if (devfreq->being_removed) | ||
228 | return; | ||
229 | |||
230 | if (central_polling) | ||
231 | mutex_lock(&devfreq_list_lock); | ||
232 | |||
233 | mutex_lock(&devfreq->lock); | ||
234 | 417 | ||
235 | /* | ||
236 | * Check being_removed flag again for the case where | ||
237 | * devfreq_dev_release() was called in a thread other than the one | ||
238 | * possibly called _remove_devfreq(). | ||
239 | */ | ||
240 | if (devfreq->being_removed) { | ||
241 | mutex_unlock(&devfreq->lock); | ||
242 | goto out; | ||
243 | } | ||
244 | |||
245 | /* devfreq->lock is unlocked and removed in _removed_devfreq() */ | ||
246 | _remove_devfreq(devfreq, true); | 418 | _remove_devfreq(devfreq, true); |
247 | |||
248 | out: | ||
249 | if (central_polling) | ||
250 | mutex_unlock(&devfreq_list_lock); | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * devfreq_monitor() - Periodically poll devfreq objects. | ||
255 | * @work: the work struct used to run devfreq_monitor periodically. | ||
256 | * | ||
257 | */ | ||
258 | static void devfreq_monitor(struct work_struct *work) | ||
259 | { | ||
260 | static unsigned long last_polled_at; | ||
261 | struct devfreq *devfreq, *tmp; | ||
262 | int error; | ||
263 | unsigned long jiffies_passed; | ||
264 | unsigned long next_jiffies = ULONG_MAX, now = jiffies; | ||
265 | struct device *dev; | ||
266 | |||
267 | /* Initially last_polled_at = 0, polling every device at bootup */ | ||
268 | jiffies_passed = now - last_polled_at; | ||
269 | last_polled_at = now; | ||
270 | if (jiffies_passed == 0) | ||
271 | jiffies_passed = 1; | ||
272 | |||
273 | mutex_lock(&devfreq_list_lock); | ||
274 | list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { | ||
275 | mutex_lock(&devfreq->lock); | ||
276 | dev = devfreq->dev.parent; | ||
277 | |||
278 | /* Do not remove tmp for a while */ | ||
279 | wait_remove_device = tmp; | ||
280 | |||
281 | if (devfreq->governor->no_central_polling || | ||
282 | devfreq->next_polling == 0) { | ||
283 | mutex_unlock(&devfreq->lock); | ||
284 | continue; | ||
285 | } | ||
286 | mutex_unlock(&devfreq_list_lock); | ||
287 | |||
288 | /* | ||
289 | * Reduce more next_polling if devfreq_wq took an extra | ||
290 | * delay. (i.e., CPU has been idled.) | ||
291 | */ | ||
292 | if (devfreq->next_polling <= jiffies_passed) { | ||
293 | error = update_devfreq(devfreq); | ||
294 | |||
295 | /* Remove a devfreq with an error. */ | ||
296 | if (error && error != -EAGAIN) { | ||
297 | |||
298 | dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", | ||
299 | error, devfreq->governor->name); | ||
300 | |||
301 | /* | ||
302 | * Unlock devfreq before locking the list | ||
303 | * in order to avoid deadlock with | ||
304 | * find_device_devfreq or others | ||
305 | */ | ||
306 | mutex_unlock(&devfreq->lock); | ||
307 | mutex_lock(&devfreq_list_lock); | ||
308 | /* Check if devfreq is already removed */ | ||
309 | if (IS_ERR(find_device_devfreq(dev))) | ||
310 | continue; | ||
311 | mutex_lock(&devfreq->lock); | ||
312 | /* This unlocks devfreq->lock and free it */ | ||
313 | _remove_devfreq(devfreq, false); | ||
314 | continue; | ||
315 | } | ||
316 | devfreq->next_polling = devfreq->polling_jiffies; | ||
317 | } else { | ||
318 | devfreq->next_polling -= jiffies_passed; | ||
319 | } | ||
320 | |||
321 | if (devfreq->next_polling) | ||
322 | next_jiffies = (next_jiffies > devfreq->next_polling) ? | ||
323 | devfreq->next_polling : next_jiffies; | ||
324 | |||
325 | mutex_unlock(&devfreq->lock); | ||
326 | mutex_lock(&devfreq_list_lock); | ||
327 | } | ||
328 | wait_remove_device = NULL; | ||
329 | mutex_unlock(&devfreq_list_lock); | ||
330 | |||
331 | if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { | ||
332 | polling = true; | ||
333 | queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); | ||
334 | } else { | ||
335 | polling = false; | ||
336 | } | ||
337 | } | 419 | } |
338 | 420 | ||
339 | /** | 421 | /** |
340 | * devfreq_add_device() - Add devfreq feature to the device | 422 | * devfreq_add_device() - Add devfreq feature to the device |
341 | * @dev: the device to add devfreq feature. | 423 | * @dev: the device to add devfreq feature. |
342 | * @profile: device-specific profile to run devfreq. | 424 | * @profile: device-specific profile to run devfreq. |
343 | * @governor: the policy to choose frequency. | 425 | * @governor_name: name of the policy to choose frequency. |
344 | * @data: private data for the governor. The devfreq framework does not | 426 | * @data: private data for the governor. The devfreq framework does not |
345 | * touch this value. | 427 | * touch this value. |
346 | */ | 428 | */ |
347 | struct devfreq *devfreq_add_device(struct device *dev, | 429 | struct devfreq *devfreq_add_device(struct device *dev, |
348 | struct devfreq_dev_profile *profile, | 430 | struct devfreq_dev_profile *profile, |
349 | const struct devfreq_governor *governor, | 431 | const char *governor_name, |
350 | void *data) | 432 | void *data) |
351 | { | 433 | { |
352 | struct devfreq *devfreq; | 434 | struct devfreq *devfreq; |
435 | struct devfreq_governor *governor; | ||
353 | int err = 0; | 436 | int err = 0; |
354 | 437 | ||
355 | if (!dev || !profile || !governor) { | 438 | if (!dev || !profile || !governor_name) { |
356 | dev_err(dev, "%s: Invalid parameters.\n", __func__); | 439 | dev_err(dev, "%s: Invalid parameters.\n", __func__); |
357 | return ERR_PTR(-EINVAL); | 440 | return ERR_PTR(-EINVAL); |
358 | } | 441 | } |
359 | 442 | ||
360 | 443 | mutex_lock(&devfreq_list_lock); | |
361 | if (!governor->no_central_polling) { | 444 | devfreq = find_device_devfreq(dev); |
362 | mutex_lock(&devfreq_list_lock); | 445 | mutex_unlock(&devfreq_list_lock); |
363 | devfreq = find_device_devfreq(dev); | 446 | if (!IS_ERR(devfreq)) { |
364 | mutex_unlock(&devfreq_list_lock); | 447 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); |
365 | if (!IS_ERR(devfreq)) { | 448 | err = -EINVAL; |
366 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); | 449 | goto err_out; |
367 | err = -EINVAL; | ||
368 | goto err_out; | ||
369 | } | ||
370 | } | 450 | } |
371 | 451 | ||
372 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); | 452 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); |
@@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
383 | devfreq->dev.class = devfreq_class; | 463 | devfreq->dev.class = devfreq_class; |
384 | devfreq->dev.release = devfreq_dev_release; | 464 | devfreq->dev.release = devfreq_dev_release; |
385 | devfreq->profile = profile; | 465 | devfreq->profile = profile; |
386 | devfreq->governor = governor; | 466 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); |
387 | devfreq->previous_freq = profile->initial_freq; | 467 | devfreq->previous_freq = profile->initial_freq; |
388 | devfreq->data = data; | 468 | devfreq->data = data; |
389 | devfreq->next_polling = devfreq->polling_jiffies | ||
390 | = msecs_to_jiffies(devfreq->profile->polling_ms); | ||
391 | devfreq->nb.notifier_call = devfreq_notifier_call; | 469 | devfreq->nb.notifier_call = devfreq_notifier_call; |
392 | 470 | ||
471 | devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * | ||
472 | devfreq->profile->max_state * | ||
473 | devfreq->profile->max_state, | ||
474 | GFP_KERNEL); | ||
475 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * | ||
476 | devfreq->profile->max_state, | ||
477 | GFP_KERNEL); | ||
478 | devfreq->last_stat_updated = jiffies; | ||
479 | |||
393 | dev_set_name(&devfreq->dev, dev_name(dev)); | 480 | dev_set_name(&devfreq->dev, dev_name(dev)); |
394 | err = device_register(&devfreq->dev); | 481 | err = device_register(&devfreq->dev); |
395 | if (err) { | 482 | if (err) { |
396 | put_device(&devfreq->dev); | 483 | put_device(&devfreq->dev); |
484 | mutex_unlock(&devfreq->lock); | ||
397 | goto err_dev; | 485 | goto err_dev; |
398 | } | 486 | } |
399 | 487 | ||
400 | if (governor->init) | ||
401 | err = governor->init(devfreq); | ||
402 | if (err) | ||
403 | goto err_init; | ||
404 | |||
405 | mutex_unlock(&devfreq->lock); | 488 | mutex_unlock(&devfreq->lock); |
406 | 489 | ||
407 | if (governor->no_central_polling) | ||
408 | goto out; | ||
409 | |||
410 | mutex_lock(&devfreq_list_lock); | 490 | mutex_lock(&devfreq_list_lock); |
411 | |||
412 | list_add(&devfreq->node, &devfreq_list); | 491 | list_add(&devfreq->node, &devfreq_list); |
413 | 492 | ||
414 | if (devfreq_wq && devfreq->next_polling && !polling) { | 493 | governor = find_devfreq_governor(devfreq->governor_name); |
415 | polling = true; | 494 | if (!IS_ERR(governor)) |
416 | queue_delayed_work(devfreq_wq, &devfreq_work, | 495 | devfreq->governor = governor; |
417 | devfreq->next_polling); | 496 | if (devfreq->governor) |
418 | } | 497 | err = devfreq->governor->event_handler(devfreq, |
498 | DEVFREQ_GOV_START, NULL); | ||
419 | mutex_unlock(&devfreq_list_lock); | 499 | mutex_unlock(&devfreq_list_lock); |
420 | out: | 500 | if (err) { |
501 | dev_err(dev, "%s: Unable to start governor for the device\n", | ||
502 | __func__); | ||
503 | goto err_init; | ||
504 | } | ||
505 | |||
421 | return devfreq; | 506 | return devfreq; |
422 | 507 | ||
423 | err_init: | 508 | err_init: |
509 | list_del(&devfreq->node); | ||
424 | device_unregister(&devfreq->dev); | 510 | device_unregister(&devfreq->dev); |
425 | err_dev: | 511 | err_dev: |
426 | mutex_unlock(&devfreq->lock); | ||
427 | kfree(devfreq); | 512 | kfree(devfreq); |
428 | err_out: | 513 | err_out: |
429 | return ERR_PTR(err); | 514 | return ERR_PTR(err); |
430 | } | 515 | } |
516 | EXPORT_SYMBOL(devfreq_add_device); | ||
431 | 517 | ||
432 | /** | 518 | /** |
433 | * devfreq_remove_device() - Remove devfreq feature from a device. | 519 | * devfreq_remove_device() - Remove devfreq feature from a device. |
434 | * @devfreq the devfreq instance to be removed | 520 | * @devfreq: the devfreq instance to be removed |
435 | */ | 521 | */ |
436 | int devfreq_remove_device(struct devfreq *devfreq) | 522 | int devfreq_remove_device(struct devfreq *devfreq) |
437 | { | 523 | { |
438 | bool central_polling; | 524 | if (!devfreq) |
525 | return -EINVAL; | ||
526 | |||
527 | _remove_devfreq(devfreq, false); | ||
439 | 528 | ||
529 | return 0; | ||
530 | } | ||
531 | EXPORT_SYMBOL(devfreq_remove_device); | ||
532 | |||
533 | /** | ||
534 | * devfreq_suspend_device() - Suspend devfreq of a device. | ||
535 | * @devfreq: the devfreq instance to be suspended | ||
536 | */ | ||
537 | int devfreq_suspend_device(struct devfreq *devfreq) | ||
538 | { | ||
440 | if (!devfreq) | 539 | if (!devfreq) |
441 | return -EINVAL; | 540 | return -EINVAL; |
442 | 541 | ||
443 | central_polling = !devfreq->governor->no_central_polling; | 542 | if (!devfreq->governor) |
543 | return 0; | ||
544 | |||
545 | return devfreq->governor->event_handler(devfreq, | ||
546 | DEVFREQ_GOV_SUSPEND, NULL); | ||
547 | } | ||
548 | EXPORT_SYMBOL(devfreq_suspend_device); | ||
549 | |||
550 | /** | ||
551 | * devfreq_resume_device() - Resume devfreq of a device. | ||
552 | * @devfreq: the devfreq instance to be resumed | ||
553 | */ | ||
554 | int devfreq_resume_device(struct devfreq *devfreq) | ||
555 | { | ||
556 | if (!devfreq) | ||
557 | return -EINVAL; | ||
558 | |||
559 | if (!devfreq->governor) | ||
560 | return 0; | ||
561 | |||
562 | return devfreq->governor->event_handler(devfreq, | ||
563 | DEVFREQ_GOV_RESUME, NULL); | ||
564 | } | ||
565 | EXPORT_SYMBOL(devfreq_resume_device); | ||
566 | |||
567 | /** | ||
568 | * devfreq_add_governor() - Add devfreq governor | ||
569 | * @governor: the devfreq governor to be added | ||
570 | */ | ||
571 | int devfreq_add_governor(struct devfreq_governor *governor) | ||
572 | { | ||
573 | struct devfreq_governor *g; | ||
574 | struct devfreq *devfreq; | ||
575 | int err = 0; | ||
576 | |||
577 | if (!governor) { | ||
578 | pr_err("%s: Invalid parameters.\n", __func__); | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | |||
582 | mutex_lock(&devfreq_list_lock); | ||
583 | g = find_devfreq_governor(governor->name); | ||
584 | if (!IS_ERR(g)) { | ||
585 | pr_err("%s: governor %s already registered\n", __func__, | ||
586 | g->name); | ||
587 | err = -EINVAL; | ||
588 | goto err_out; | ||
589 | } | ||
444 | 590 | ||
445 | if (central_polling) { | 591 | list_add(&governor->node, &devfreq_governor_list); |
446 | mutex_lock(&devfreq_list_lock); | 592 | |
447 | while (wait_remove_device == devfreq) { | 593 | list_for_each_entry(devfreq, &devfreq_list, node) { |
448 | mutex_unlock(&devfreq_list_lock); | 594 | int ret = 0; |
449 | schedule(); | 595 | struct device *dev = devfreq->dev.parent; |
450 | mutex_lock(&devfreq_list_lock); | 596 | |
597 | if (!strncmp(devfreq->governor_name, governor->name, | ||
598 | DEVFREQ_NAME_LEN)) { | ||
599 | /* The following should never occur */ | ||
600 | if (devfreq->governor) { | ||
601 | dev_warn(dev, | ||
602 | "%s: Governor %s already present\n", | ||
603 | __func__, devfreq->governor->name); | ||
604 | ret = devfreq->governor->event_handler(devfreq, | ||
605 | DEVFREQ_GOV_STOP, NULL); | ||
606 | if (ret) { | ||
607 | dev_warn(dev, | ||
608 | "%s: Governor %s stop = %d\n", | ||
609 | __func__, | ||
610 | devfreq->governor->name, ret); | ||
611 | } | ||
612 | /* Fall through */ | ||
613 | } | ||
614 | devfreq->governor = governor; | ||
615 | ret = devfreq->governor->event_handler(devfreq, | ||
616 | DEVFREQ_GOV_START, NULL); | ||
617 | if (ret) { | ||
618 | dev_warn(dev, "%s: Governor %s start=%d\n", | ||
619 | __func__, devfreq->governor->name, | ||
620 | ret); | ||
621 | } | ||
451 | } | 622 | } |
452 | } | 623 | } |
453 | 624 | ||
454 | mutex_lock(&devfreq->lock); | 625 | err_out: |
455 | _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ | 626 | mutex_unlock(&devfreq_list_lock); |
456 | 627 | ||
457 | if (central_polling) | 628 | return err; |
458 | mutex_unlock(&devfreq_list_lock); | 629 | } |
630 | EXPORT_SYMBOL(devfreq_add_governor); | ||
459 | 631 | ||
460 | return 0; | 632 | /** |
633 | * devfreq_remove_device() - Remove devfreq feature from a device. | ||
634 | * @governor: the devfreq governor to be removed | ||
635 | */ | ||
636 | int devfreq_remove_governor(struct devfreq_governor *governor) | ||
637 | { | ||
638 | struct devfreq_governor *g; | ||
639 | struct devfreq *devfreq; | ||
640 | int err = 0; | ||
641 | |||
642 | if (!governor) { | ||
643 | pr_err("%s: Invalid parameters.\n", __func__); | ||
644 | return -EINVAL; | ||
645 | } | ||
646 | |||
647 | mutex_lock(&devfreq_list_lock); | ||
648 | g = find_devfreq_governor(governor->name); | ||
649 | if (IS_ERR(g)) { | ||
650 | pr_err("%s: governor %s not registered\n", __func__, | ||
651 | governor->name); | ||
652 | err = PTR_ERR(g); | ||
653 | goto err_out; | ||
654 | } | ||
655 | list_for_each_entry(devfreq, &devfreq_list, node) { | ||
656 | int ret; | ||
657 | struct device *dev = devfreq->dev.parent; | ||
658 | |||
659 | if (!strncmp(devfreq->governor_name, governor->name, | ||
660 | DEVFREQ_NAME_LEN)) { | ||
661 | /* we should have a devfreq governor! */ | ||
662 | if (!devfreq->governor) { | ||
663 | dev_warn(dev, "%s: Governor %s NOT present\n", | ||
664 | __func__, governor->name); | ||
665 | continue; | ||
666 | /* Fall through */ | ||
667 | } | ||
668 | ret = devfreq->governor->event_handler(devfreq, | ||
669 | DEVFREQ_GOV_STOP, NULL); | ||
670 | if (ret) { | ||
671 | dev_warn(dev, "%s: Governor %s stop=%d\n", | ||
672 | __func__, devfreq->governor->name, | ||
673 | ret); | ||
674 | } | ||
675 | devfreq->governor = NULL; | ||
676 | } | ||
677 | } | ||
678 | |||
679 | list_del(&governor->node); | ||
680 | err_out: | ||
681 | mutex_unlock(&devfreq_list_lock); | ||
682 | |||
683 | return err; | ||
461 | } | 684 | } |
685 | EXPORT_SYMBOL(devfreq_remove_governor); | ||
462 | 686 | ||
463 | static ssize_t show_governor(struct device *dev, | 687 | static ssize_t show_governor(struct device *dev, |
464 | struct device_attribute *attr, char *buf) | 688 | struct device_attribute *attr, char *buf) |
465 | { | 689 | { |
690 | if (!to_devfreq(dev)->governor) | ||
691 | return -EINVAL; | ||
692 | |||
466 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); | 693 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); |
467 | } | 694 | } |
468 | 695 | ||
696 | static ssize_t store_governor(struct device *dev, struct device_attribute *attr, | ||
697 | const char *buf, size_t count) | ||
698 | { | ||
699 | struct devfreq *df = to_devfreq(dev); | ||
700 | int ret; | ||
701 | char str_governor[DEVFREQ_NAME_LEN + 1]; | ||
702 | struct devfreq_governor *governor; | ||
703 | |||
704 | ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); | ||
705 | if (ret != 1) | ||
706 | return -EINVAL; | ||
707 | |||
708 | mutex_lock(&devfreq_list_lock); | ||
709 | governor = find_devfreq_governor(str_governor); | ||
710 | if (IS_ERR(governor)) { | ||
711 | ret = PTR_ERR(governor); | ||
712 | goto out; | ||
713 | } | ||
714 | if (df->governor == governor) | ||
715 | goto out; | ||
716 | |||
717 | if (df->governor) { | ||
718 | ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); | ||
719 | if (ret) { | ||
720 | dev_warn(dev, "%s: Governor %s not stopped(%d)\n", | ||
721 | __func__, df->governor->name, ret); | ||
722 | goto out; | ||
723 | } | ||
724 | } | ||
725 | df->governor = governor; | ||
726 | strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); | ||
727 | ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); | ||
728 | if (ret) | ||
729 | dev_warn(dev, "%s: Governor %s not started(%d)\n", | ||
730 | __func__, df->governor->name, ret); | ||
731 | out: | ||
732 | mutex_unlock(&devfreq_list_lock); | ||
733 | |||
734 | if (!ret) | ||
735 | ret = count; | ||
736 | return ret; | ||
737 | } | ||
738 | static ssize_t show_available_governors(struct device *d, | ||
739 | struct device_attribute *attr, | ||
740 | char *buf) | ||
741 | { | ||
742 | struct devfreq_governor *tmp_governor; | ||
743 | ssize_t count = 0; | ||
744 | |||
745 | mutex_lock(&devfreq_list_lock); | ||
746 | list_for_each_entry(tmp_governor, &devfreq_governor_list, node) | ||
747 | count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), | ||
748 | "%s ", tmp_governor->name); | ||
749 | mutex_unlock(&devfreq_list_lock); | ||
750 | |||
751 | /* Truncate the trailing space */ | ||
752 | if (count) | ||
753 | count--; | ||
754 | |||
755 | count += sprintf(&buf[count], "\n"); | ||
756 | |||
757 | return count; | ||
758 | } | ||
759 | |||
469 | static ssize_t show_freq(struct device *dev, | 760 | static ssize_t show_freq(struct device *dev, |
470 | struct device_attribute *attr, char *buf) | 761 | struct device_attribute *attr, char *buf) |
471 | { | 762 | { |
763 | unsigned long freq; | ||
764 | struct devfreq *devfreq = to_devfreq(dev); | ||
765 | |||
766 | if (devfreq->profile->get_cur_freq && | ||
767 | !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) | ||
768 | return sprintf(buf, "%lu\n", freq); | ||
769 | |||
770 | return sprintf(buf, "%lu\n", devfreq->previous_freq); | ||
771 | } | ||
772 | |||
773 | static ssize_t show_target_freq(struct device *dev, | ||
774 | struct device_attribute *attr, char *buf) | ||
775 | { | ||
472 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); | 776 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); |
473 | } | 777 | } |
474 | 778 | ||
@@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev, | |||
486 | unsigned int value; | 790 | unsigned int value; |
487 | int ret; | 791 | int ret; |
488 | 792 | ||
793 | if (!df->governor) | ||
794 | return -EINVAL; | ||
795 | |||
489 | ret = sscanf(buf, "%u", &value); | 796 | ret = sscanf(buf, "%u", &value); |
490 | if (ret != 1) | 797 | if (ret != 1) |
491 | goto out; | 798 | return -EINVAL; |
492 | |||
493 | mutex_lock(&df->lock); | ||
494 | df->profile->polling_ms = value; | ||
495 | df->next_polling = df->polling_jiffies | ||
496 | = msecs_to_jiffies(value); | ||
497 | mutex_unlock(&df->lock); | ||
498 | 799 | ||
800 | df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); | ||
499 | ret = count; | 801 | ret = count; |
500 | 802 | ||
501 | if (df->governor->no_central_polling) | ||
502 | goto out; | ||
503 | |||
504 | mutex_lock(&devfreq_list_lock); | ||
505 | if (df->next_polling > 0 && !polling) { | ||
506 | polling = true; | ||
507 | queue_delayed_work(devfreq_wq, &devfreq_work, | ||
508 | df->next_polling); | ||
509 | } | ||
510 | mutex_unlock(&devfreq_list_lock); | ||
511 | out: | ||
512 | return ret; | 803 | return ret; |
513 | } | 804 | } |
514 | 805 | ||
515 | static ssize_t show_central_polling(struct device *dev, | ||
516 | struct device_attribute *attr, char *buf) | ||
517 | { | ||
518 | return sprintf(buf, "%d\n", | ||
519 | !to_devfreq(dev)->governor->no_central_polling); | ||
520 | } | ||
521 | |||
522 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | 806 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, |
523 | const char *buf, size_t count) | 807 | const char *buf, size_t count) |
524 | { | 808 | { |
@@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | |||
529 | 813 | ||
530 | ret = sscanf(buf, "%lu", &value); | 814 | ret = sscanf(buf, "%lu", &value); |
531 | if (ret != 1) | 815 | if (ret != 1) |
532 | goto out; | 816 | return -EINVAL; |
533 | 817 | ||
534 | mutex_lock(&df->lock); | 818 | mutex_lock(&df->lock); |
535 | max = df->max_freq; | 819 | max = df->max_freq; |
@@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | |||
543 | ret = count; | 827 | ret = count; |
544 | unlock: | 828 | unlock: |
545 | mutex_unlock(&df->lock); | 829 | mutex_unlock(&df->lock); |
546 | out: | ||
547 | return ret; | 830 | return ret; |
548 | } | 831 | } |
549 | 832 | ||
@@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | |||
563 | 846 | ||
564 | ret = sscanf(buf, "%lu", &value); | 847 | ret = sscanf(buf, "%lu", &value); |
565 | if (ret != 1) | 848 | if (ret != 1) |
566 | goto out; | 849 | return -EINVAL; |
567 | 850 | ||
568 | mutex_lock(&df->lock); | 851 | mutex_lock(&df->lock); |
569 | min = df->min_freq; | 852 | min = df->min_freq; |
@@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | |||
577 | ret = count; | 860 | ret = count; |
578 | unlock: | 861 | unlock: |
579 | mutex_unlock(&df->lock); | 862 | mutex_unlock(&df->lock); |
580 | out: | ||
581 | return ret; | 863 | return ret; |
582 | } | 864 | } |
583 | 865 | ||
@@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr, | |||
587 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); | 869 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); |
588 | } | 870 | } |
589 | 871 | ||
872 | static ssize_t show_available_freqs(struct device *d, | ||
873 | struct device_attribute *attr, | ||
874 | char *buf) | ||
875 | { | ||
876 | struct devfreq *df = to_devfreq(d); | ||
877 | struct device *dev = df->dev.parent; | ||
878 | struct opp *opp; | ||
879 | ssize_t count = 0; | ||
880 | unsigned long freq = 0; | ||
881 | |||
882 | rcu_read_lock(); | ||
883 | do { | ||
884 | opp = opp_find_freq_ceil(dev, &freq); | ||
885 | if (IS_ERR(opp)) | ||
886 | break; | ||
887 | |||
888 | count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), | ||
889 | "%lu ", freq); | ||
890 | freq++; | ||
891 | } while (1); | ||
892 | rcu_read_unlock(); | ||
893 | |||
894 | /* Truncate the trailing space */ | ||
895 | if (count) | ||
896 | count--; | ||
897 | |||
898 | count += sprintf(&buf[count], "\n"); | ||
899 | |||
900 | return count; | ||
901 | } | ||
902 | |||
903 | static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr, | ||
904 | char *buf) | ||
905 | { | ||
906 | struct devfreq *devfreq = to_devfreq(dev); | ||
907 | ssize_t len; | ||
908 | int i, j, err; | ||
909 | unsigned int max_state = devfreq->profile->max_state; | ||
910 | |||
911 | err = devfreq_update_status(devfreq, devfreq->previous_freq); | ||
912 | if (err) | ||
913 | return 0; | ||
914 | |||
915 | len = sprintf(buf, " From : To\n"); | ||
916 | len += sprintf(buf + len, " :"); | ||
917 | for (i = 0; i < max_state; i++) | ||
918 | len += sprintf(buf + len, "%8u", | ||
919 | devfreq->profile->freq_table[i]); | ||
920 | |||
921 | len += sprintf(buf + len, " time(ms)\n"); | ||
922 | |||
923 | for (i = 0; i < max_state; i++) { | ||
924 | if (devfreq->profile->freq_table[i] | ||
925 | == devfreq->previous_freq) { | ||
926 | len += sprintf(buf + len, "*"); | ||
927 | } else { | ||
928 | len += sprintf(buf + len, " "); | ||
929 | } | ||
930 | len += sprintf(buf + len, "%8u:", | ||
931 | devfreq->profile->freq_table[i]); | ||
932 | for (j = 0; j < max_state; j++) | ||
933 | len += sprintf(buf + len, "%8u", | ||
934 | devfreq->trans_table[(i * max_state) + j]); | ||
935 | len += sprintf(buf + len, "%10u\n", | ||
936 | jiffies_to_msecs(devfreq->time_in_state[i])); | ||
937 | } | ||
938 | |||
939 | len += sprintf(buf + len, "Total transition : %u\n", | ||
940 | devfreq->total_trans); | ||
941 | return len; | ||
942 | } | ||
943 | |||
590 | static struct device_attribute devfreq_attrs[] = { | 944 | static struct device_attribute devfreq_attrs[] = { |
591 | __ATTR(governor, S_IRUGO, show_governor, NULL), | 945 | __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor), |
946 | __ATTR(available_governors, S_IRUGO, show_available_governors, NULL), | ||
592 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), | 947 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), |
593 | __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), | 948 | __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL), |
949 | __ATTR(target_freq, S_IRUGO, show_target_freq, NULL), | ||
594 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, | 950 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, |
595 | store_polling_interval), | 951 | store_polling_interval), |
596 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), | 952 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), |
597 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), | 953 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), |
954 | __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL), | ||
598 | { }, | 955 | { }, |
599 | }; | 956 | }; |
600 | 957 | ||
601 | /** | ||
602 | * devfreq_start_polling() - Initialize data structure for devfreq framework and | ||
603 | * start polling registered devfreq devices. | ||
604 | */ | ||
605 | static int __init devfreq_start_polling(void) | ||
606 | { | ||
607 | mutex_lock(&devfreq_list_lock); | ||
608 | polling = false; | ||
609 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | ||
610 | INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor); | ||
611 | mutex_unlock(&devfreq_list_lock); | ||
612 | |||
613 | devfreq_monitor(&devfreq_work.work); | ||
614 | return 0; | ||
615 | } | ||
616 | late_initcall(devfreq_start_polling); | ||
617 | |||
618 | static int __init devfreq_init(void) | 958 | static int __init devfreq_init(void) |
619 | { | 959 | { |
620 | devfreq_class = class_create(THIS_MODULE, "devfreq"); | 960 | devfreq_class = class_create(THIS_MODULE, "devfreq"); |
@@ -622,7 +962,15 @@ static int __init devfreq_init(void) | |||
622 | pr_err("%s: couldn't create class\n", __FILE__); | 962 | pr_err("%s: couldn't create class\n", __FILE__); |
623 | return PTR_ERR(devfreq_class); | 963 | return PTR_ERR(devfreq_class); |
624 | } | 964 | } |
965 | |||
966 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | ||
967 | if (IS_ERR(devfreq_wq)) { | ||
968 | class_destroy(devfreq_class); | ||
969 | pr_err("%s: couldn't create workqueue\n", __FILE__); | ||
970 | return PTR_ERR(devfreq_wq); | ||
971 | } | ||
625 | devfreq_class->dev_attrs = devfreq_attrs; | 972 | devfreq_class->dev_attrs = devfreq_attrs; |
973 | |||
626 | return 0; | 974 | return 0; |
627 | } | 975 | } |
628 | subsys_initcall(devfreq_init); | 976 | subsys_initcall(devfreq_init); |
@@ -630,6 +978,7 @@ subsys_initcall(devfreq_init); | |||
630 | static void __exit devfreq_exit(void) | 978 | static void __exit devfreq_exit(void) |
631 | { | 979 | { |
632 | class_destroy(devfreq_class); | 980 | class_destroy(devfreq_class); |
981 | destroy_workqueue(devfreq_wq); | ||
633 | } | 982 | } |
634 | module_exit(devfreq_exit); | 983 | module_exit(devfreq_exit); |
635 | 984 | ||
@@ -641,9 +990,9 @@ module_exit(devfreq_exit); | |||
641 | /** | 990 | /** |
642 | * devfreq_recommended_opp() - Helper function to get proper OPP for the | 991 | * devfreq_recommended_opp() - Helper function to get proper OPP for the |
643 | * freq value given to target callback. | 992 | * freq value given to target callback. |
644 | * @dev The devfreq user device. (parent of devfreq) | 993 | * @dev: The devfreq user device. (parent of devfreq) |
645 | * @freq The frequency given to target function | 994 | * @freq: The frequency given to target function |
646 | * @flags Flags handed from devfreq framework. | 995 | * @flags: Flags handed from devfreq framework. |
647 | * | 996 | * |
648 | */ | 997 | */ |
649 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | 998 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, |
@@ -656,14 +1005,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | |||
656 | opp = opp_find_freq_floor(dev, freq); | 1005 | opp = opp_find_freq_floor(dev, freq); |
657 | 1006 | ||
658 | /* If not available, use the closest opp */ | 1007 | /* If not available, use the closest opp */ |
659 | if (opp == ERR_PTR(-ENODEV)) | 1008 | if (opp == ERR_PTR(-ERANGE)) |
660 | opp = opp_find_freq_ceil(dev, freq); | 1009 | opp = opp_find_freq_ceil(dev, freq); |
661 | } else { | 1010 | } else { |
662 | /* The freq is an lower bound. opp should be higher */ | 1011 | /* The freq is an lower bound. opp should be higher */ |
663 | opp = opp_find_freq_ceil(dev, freq); | 1012 | opp = opp_find_freq_ceil(dev, freq); |
664 | 1013 | ||
665 | /* If not available, use the closest opp */ | 1014 | /* If not available, use the closest opp */ |
666 | if (opp == ERR_PTR(-ENODEV)) | 1015 | if (opp == ERR_PTR(-ERANGE)) |
667 | opp = opp_find_freq_floor(dev, freq); | 1016 | opp = opp_find_freq_floor(dev, freq); |
668 | } | 1017 | } |
669 | 1018 | ||
@@ -674,35 +1023,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | |||
674 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified | 1023 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified |
675 | * for any changes in the OPP availability | 1024 | * for any changes in the OPP availability |
676 | * changes | 1025 | * changes |
677 | * @dev The devfreq user device. (parent of devfreq) | 1026 | * @dev: The devfreq user device. (parent of devfreq) |
678 | * @devfreq The devfreq object. | 1027 | * @devfreq: The devfreq object. |
679 | */ | 1028 | */ |
680 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | 1029 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) |
681 | { | 1030 | { |
682 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | 1031 | struct srcu_notifier_head *nh; |
1032 | int ret = 0; | ||
683 | 1033 | ||
1034 | rcu_read_lock(); | ||
1035 | nh = opp_get_notifier(dev); | ||
684 | if (IS_ERR(nh)) | 1036 | if (IS_ERR(nh)) |
685 | return PTR_ERR(nh); | 1037 | ret = PTR_ERR(nh); |
686 | return srcu_notifier_chain_register(nh, &devfreq->nb); | 1038 | rcu_read_unlock(); |
1039 | if (!ret) | ||
1040 | ret = srcu_notifier_chain_register(nh, &devfreq->nb); | ||
1041 | |||
1042 | return ret; | ||
687 | } | 1043 | } |
688 | 1044 | ||
689 | /** | 1045 | /** |
690 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq | 1046 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq |
691 | * notified for any changes in the OPP | 1047 | * notified for any changes in the OPP |
692 | * availability changes anymore. | 1048 | * availability changes anymore. |
693 | * @dev The devfreq user device. (parent of devfreq) | 1049 | * @dev: The devfreq user device. (parent of devfreq) |
694 | * @devfreq The devfreq object. | 1050 | * @devfreq: The devfreq object. |
695 | * | 1051 | * |
696 | * At exit() callback of devfreq_dev_profile, this must be included if | 1052 | * At exit() callback of devfreq_dev_profile, this must be included if |
697 | * devfreq_recommended_opp is used. | 1053 | * devfreq_recommended_opp is used. |
698 | */ | 1054 | */ |
699 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | 1055 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) |
700 | { | 1056 | { |
701 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | 1057 | struct srcu_notifier_head *nh; |
1058 | int ret = 0; | ||
702 | 1059 | ||
1060 | rcu_read_lock(); | ||
1061 | nh = opp_get_notifier(dev); | ||
703 | if (IS_ERR(nh)) | 1062 | if (IS_ERR(nh)) |
704 | return PTR_ERR(nh); | 1063 | ret = PTR_ERR(nh); |
705 | return srcu_notifier_chain_unregister(nh, &devfreq->nb); | 1064 | rcu_read_unlock(); |
1065 | if (!ret) | ||
1066 | ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); | ||
1067 | |||
1068 | return ret; | ||
706 | } | 1069 | } |
707 | 1070 | ||
708 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | 1071 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); |
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c index 88ddc77a9bb1..741837208716 100644 --- a/drivers/devfreq/exynos4_bus.c +++ b/drivers/devfreq/exynos4_bus.c | |||
@@ -987,7 +987,7 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
987 | struct device *dev = &pdev->dev; | 987 | struct device *dev = &pdev->dev; |
988 | int err = 0; | 988 | int err = 0; |
989 | 989 | ||
990 | data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL); | 990 | data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL); |
991 | if (data == NULL) { | 991 | if (data == NULL) { |
992 | dev_err(dev, "Cannot allocate memory.\n"); | 992 | dev_err(dev, "Cannot allocate memory.\n"); |
993 | return -ENOMEM; | 993 | return -ENOMEM; |
@@ -1012,31 +1012,26 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1012 | err = -EINVAL; | 1012 | err = -EINVAL; |
1013 | } | 1013 | } |
1014 | if (err) | 1014 | if (err) |
1015 | goto err_regulator; | 1015 | return err; |
1016 | 1016 | ||
1017 | data->vdd_int = regulator_get(dev, "vdd_int"); | 1017 | data->vdd_int = devm_regulator_get(dev, "vdd_int"); |
1018 | if (IS_ERR(data->vdd_int)) { | 1018 | if (IS_ERR(data->vdd_int)) { |
1019 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); | 1019 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); |
1020 | err = PTR_ERR(data->vdd_int); | 1020 | return PTR_ERR(data->vdd_int); |
1021 | goto err_regulator; | ||
1022 | } | 1021 | } |
1023 | if (data->type == TYPE_BUSF_EXYNOS4x12) { | 1022 | if (data->type == TYPE_BUSF_EXYNOS4x12) { |
1024 | data->vdd_mif = regulator_get(dev, "vdd_mif"); | 1023 | data->vdd_mif = devm_regulator_get(dev, "vdd_mif"); |
1025 | if (IS_ERR(data->vdd_mif)) { | 1024 | if (IS_ERR(data->vdd_mif)) { |
1026 | dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); | 1025 | dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); |
1027 | err = PTR_ERR(data->vdd_mif); | 1026 | return PTR_ERR(data->vdd_mif); |
1028 | regulator_put(data->vdd_int); | ||
1029 | goto err_regulator; | ||
1030 | |||
1031 | } | 1027 | } |
1032 | } | 1028 | } |
1033 | 1029 | ||
1034 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); | 1030 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); |
1035 | if (IS_ERR(opp)) { | 1031 | if (IS_ERR(opp)) { |
1036 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", | 1032 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", |
1037 | exynos4_devfreq_profile.initial_freq); | 1033 | exynos4_devfreq_profile.initial_freq); |
1038 | err = PTR_ERR(opp); | 1034 | return PTR_ERR(opp); |
1039 | goto err_opp_add; | ||
1040 | } | 1035 | } |
1041 | data->curr_opp = opp; | 1036 | data->curr_opp = opp; |
1042 | 1037 | ||
@@ -1045,30 +1040,20 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1045 | busfreq_mon_reset(data); | 1040 | busfreq_mon_reset(data); |
1046 | 1041 | ||
1047 | data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, | 1042 | data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, |
1048 | &devfreq_simple_ondemand, NULL); | 1043 | "simple_ondemand", NULL); |
1049 | if (IS_ERR(data->devfreq)) { | 1044 | if (IS_ERR(data->devfreq)) |
1050 | err = PTR_ERR(data->devfreq); | 1045 | return PTR_ERR(data->devfreq); |
1051 | goto err_opp_add; | ||
1052 | } | ||
1053 | 1046 | ||
1054 | devfreq_register_opp_notifier(dev, data->devfreq); | 1047 | devfreq_register_opp_notifier(dev, data->devfreq); |
1055 | 1048 | ||
1056 | err = register_pm_notifier(&data->pm_notifier); | 1049 | err = register_pm_notifier(&data->pm_notifier); |
1057 | if (err) { | 1050 | if (err) { |
1058 | dev_err(dev, "Failed to setup pm notifier\n"); | 1051 | dev_err(dev, "Failed to setup pm notifier\n"); |
1059 | goto err_devfreq_add; | 1052 | devfreq_remove_device(data->devfreq); |
1053 | return err; | ||
1060 | } | 1054 | } |
1061 | 1055 | ||
1062 | return 0; | 1056 | return 0; |
1063 | err_devfreq_add: | ||
1064 | devfreq_remove_device(data->devfreq); | ||
1065 | err_opp_add: | ||
1066 | if (data->vdd_mif) | ||
1067 | regulator_put(data->vdd_mif); | ||
1068 | regulator_put(data->vdd_int); | ||
1069 | err_regulator: | ||
1070 | kfree(data); | ||
1071 | return err; | ||
1072 | } | 1057 | } |
1073 | 1058 | ||
1074 | static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) | 1059 | static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) |
@@ -1077,10 +1062,6 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) | |||
1077 | 1062 | ||
1078 | unregister_pm_notifier(&data->pm_notifier); | 1063 | unregister_pm_notifier(&data->pm_notifier); |
1079 | devfreq_remove_device(data->devfreq); | 1064 | devfreq_remove_device(data->devfreq); |
1080 | regulator_put(data->vdd_int); | ||
1081 | if (data->vdd_mif) | ||
1082 | regulator_put(data->vdd_mif); | ||
1083 | kfree(data); | ||
1084 | 1065 | ||
1085 | return 0; | 1066 | return 0; |
1086 | } | 1067 | } |
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index ea7f13c58ded..fad7d6321978 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h | |||
@@ -18,7 +18,24 @@ | |||
18 | 18 | ||
19 | #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) | 19 | #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) |
20 | 20 | ||
21 | /* Devfreq events */ | ||
22 | #define DEVFREQ_GOV_START 0x1 | ||
23 | #define DEVFREQ_GOV_STOP 0x2 | ||
24 | #define DEVFREQ_GOV_INTERVAL 0x3 | ||
25 | #define DEVFREQ_GOV_SUSPEND 0x4 | ||
26 | #define DEVFREQ_GOV_RESUME 0x5 | ||
27 | |||
21 | /* Caution: devfreq->lock must be locked before calling update_devfreq */ | 28 | /* Caution: devfreq->lock must be locked before calling update_devfreq */ |
22 | extern int update_devfreq(struct devfreq *devfreq); | 29 | extern int update_devfreq(struct devfreq *devfreq); |
23 | 30 | ||
31 | extern void devfreq_monitor_start(struct devfreq *devfreq); | ||
32 | extern void devfreq_monitor_stop(struct devfreq *devfreq); | ||
33 | extern void devfreq_monitor_suspend(struct devfreq *devfreq); | ||
34 | extern void devfreq_monitor_resume(struct devfreq *devfreq); | ||
35 | extern void devfreq_interval_update(struct devfreq *devfreq, | ||
36 | unsigned int *delay); | ||
37 | |||
38 | extern int devfreq_add_governor(struct devfreq_governor *governor); | ||
39 | extern int devfreq_remove_governor(struct devfreq_governor *governor); | ||
40 | |||
24 | #endif /* _GOVERNOR_H */ | 41 | #endif /* _GOVERNOR_H */ |
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index af75ddd4f158..c72f942f30a8 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/devfreq.h> | 12 | #include <linux/devfreq.h> |
13 | #include <linux/module.h> | ||
13 | #include "governor.h" | 14 | #include "governor.h" |
14 | 15 | ||
15 | static int devfreq_performance_func(struct devfreq *df, | 16 | static int devfreq_performance_func(struct devfreq *df, |
@@ -26,14 +27,41 @@ static int devfreq_performance_func(struct devfreq *df, | |||
26 | return 0; | 27 | return 0; |
27 | } | 28 | } |
28 | 29 | ||
29 | static int performance_init(struct devfreq *devfreq) | 30 | static int devfreq_performance_handler(struct devfreq *devfreq, |
31 | unsigned int event, void *data) | ||
30 | { | 32 | { |
31 | return update_devfreq(devfreq); | 33 | int ret = 0; |
34 | |||
35 | if (event == DEVFREQ_GOV_START) { | ||
36 | mutex_lock(&devfreq->lock); | ||
37 | ret = update_devfreq(devfreq); | ||
38 | mutex_unlock(&devfreq->lock); | ||
39 | } | ||
40 | |||
41 | return ret; | ||
32 | } | 42 | } |
33 | 43 | ||
34 | const struct devfreq_governor devfreq_performance = { | 44 | static struct devfreq_governor devfreq_performance = { |
35 | .name = "performance", | 45 | .name = "performance", |
36 | .init = performance_init, | ||
37 | .get_target_freq = devfreq_performance_func, | 46 | .get_target_freq = devfreq_performance_func, |
38 | .no_central_polling = true, | 47 | .event_handler = devfreq_performance_handler, |
39 | }; | 48 | }; |
49 | |||
50 | static int __init devfreq_performance_init(void) | ||
51 | { | ||
52 | return devfreq_add_governor(&devfreq_performance); | ||
53 | } | ||
54 | subsys_initcall(devfreq_performance_init); | ||
55 | |||
56 | static void __exit devfreq_performance_exit(void) | ||
57 | { | ||
58 | int ret; | ||
59 | |||
60 | ret = devfreq_remove_governor(&devfreq_performance); | ||
61 | if (ret) | ||
62 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
63 | |||
64 | return; | ||
65 | } | ||
66 | module_exit(devfreq_performance_exit); | ||
67 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index fec0cdbd2477..0c6bed567e6d 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/devfreq.h> | 12 | #include <linux/devfreq.h> |
13 | #include <linux/module.h> | ||
13 | #include "governor.h" | 14 | #include "governor.h" |
14 | 15 | ||
15 | static int devfreq_powersave_func(struct devfreq *df, | 16 | static int devfreq_powersave_func(struct devfreq *df, |
@@ -23,14 +24,41 @@ static int devfreq_powersave_func(struct devfreq *df, | |||
23 | return 0; | 24 | return 0; |
24 | } | 25 | } |
25 | 26 | ||
26 | static int powersave_init(struct devfreq *devfreq) | 27 | static int devfreq_powersave_handler(struct devfreq *devfreq, |
28 | unsigned int event, void *data) | ||
27 | { | 29 | { |
28 | return update_devfreq(devfreq); | 30 | int ret = 0; |
31 | |||
32 | if (event == DEVFREQ_GOV_START) { | ||
33 | mutex_lock(&devfreq->lock); | ||
34 | ret = update_devfreq(devfreq); | ||
35 | mutex_unlock(&devfreq->lock); | ||
36 | } | ||
37 | |||
38 | return ret; | ||
29 | } | 39 | } |
30 | 40 | ||
31 | const struct devfreq_governor devfreq_powersave = { | 41 | static struct devfreq_governor devfreq_powersave = { |
32 | .name = "powersave", | 42 | .name = "powersave", |
33 | .init = powersave_init, | ||
34 | .get_target_freq = devfreq_powersave_func, | 43 | .get_target_freq = devfreq_powersave_func, |
35 | .no_central_polling = true, | 44 | .event_handler = devfreq_powersave_handler, |
36 | }; | 45 | }; |
46 | |||
47 | static int __init devfreq_powersave_init(void) | ||
48 | { | ||
49 | return devfreq_add_governor(&devfreq_powersave); | ||
50 | } | ||
51 | subsys_initcall(devfreq_powersave_init); | ||
52 | |||
53 | static void __exit devfreq_powersave_exit(void) | ||
54 | { | ||
55 | int ret; | ||
56 | |||
57 | ret = devfreq_remove_governor(&devfreq_powersave); | ||
58 | if (ret) | ||
59 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
60 | |||
61 | return; | ||
62 | } | ||
63 | module_exit(devfreq_powersave_exit); | ||
64 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index a2e3eae79011..0720ba84ca92 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c | |||
@@ -10,8 +10,10 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/module.h> | ||
13 | #include <linux/devfreq.h> | 14 | #include <linux/devfreq.h> |
14 | #include <linux/math64.h> | 15 | #include <linux/math64.h> |
16 | #include "governor.h" | ||
15 | 17 | ||
16 | /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ | 18 | /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ |
17 | #define DFSO_UPTHRESHOLD (90) | 19 | #define DFSO_UPTHRESHOLD (90) |
@@ -88,7 +90,58 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
88 | return 0; | 90 | return 0; |
89 | } | 91 | } |
90 | 92 | ||
91 | const struct devfreq_governor devfreq_simple_ondemand = { | 93 | static int devfreq_simple_ondemand_handler(struct devfreq *devfreq, |
94 | unsigned int event, void *data) | ||
95 | { | ||
96 | switch (event) { | ||
97 | case DEVFREQ_GOV_START: | ||
98 | devfreq_monitor_start(devfreq); | ||
99 | break; | ||
100 | |||
101 | case DEVFREQ_GOV_STOP: | ||
102 | devfreq_monitor_stop(devfreq); | ||
103 | break; | ||
104 | |||
105 | case DEVFREQ_GOV_INTERVAL: | ||
106 | devfreq_interval_update(devfreq, (unsigned int *)data); | ||
107 | break; | ||
108 | |||
109 | case DEVFREQ_GOV_SUSPEND: | ||
110 | devfreq_monitor_suspend(devfreq); | ||
111 | break; | ||
112 | |||
113 | case DEVFREQ_GOV_RESUME: | ||
114 | devfreq_monitor_resume(devfreq); | ||
115 | break; | ||
116 | |||
117 | default: | ||
118 | break; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct devfreq_governor devfreq_simple_ondemand = { | ||
92 | .name = "simple_ondemand", | 125 | .name = "simple_ondemand", |
93 | .get_target_freq = devfreq_simple_ondemand_func, | 126 | .get_target_freq = devfreq_simple_ondemand_func, |
127 | .event_handler = devfreq_simple_ondemand_handler, | ||
94 | }; | 128 | }; |
129 | |||
130 | static int __init devfreq_simple_ondemand_init(void) | ||
131 | { | ||
132 | return devfreq_add_governor(&devfreq_simple_ondemand); | ||
133 | } | ||
134 | subsys_initcall(devfreq_simple_ondemand_init); | ||
135 | |||
136 | static void __exit devfreq_simple_ondemand_exit(void) | ||
137 | { | ||
138 | int ret; | ||
139 | |||
140 | ret = devfreq_remove_governor(&devfreq_simple_ondemand); | ||
141 | if (ret) | ||
142 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
143 | |||
144 | return; | ||
145 | } | ||
146 | module_exit(devfreq_simple_ondemand_exit); | ||
147 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 0681246fc89d..35de6e83c1fe 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/devfreq.h> | 14 | #include <linux/devfreq.h> |
15 | #include <linux/pm.h> | 15 | #include <linux/pm.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/module.h> | ||
17 | #include "governor.h" | 18 | #include "governor.h" |
18 | 19 | ||
19 | struct userspace_data { | 20 | struct userspace_data { |
@@ -116,10 +117,46 @@ static void userspace_exit(struct devfreq *devfreq) | |||
116 | devfreq->data = NULL; | 117 | devfreq->data = NULL; |
117 | } | 118 | } |
118 | 119 | ||
119 | const struct devfreq_governor devfreq_userspace = { | 120 | static int devfreq_userspace_handler(struct devfreq *devfreq, |
121 | unsigned int event, void *data) | ||
122 | { | ||
123 | int ret = 0; | ||
124 | |||
125 | switch (event) { | ||
126 | case DEVFREQ_GOV_START: | ||
127 | ret = userspace_init(devfreq); | ||
128 | break; | ||
129 | case DEVFREQ_GOV_STOP: | ||
130 | userspace_exit(devfreq); | ||
131 | break; | ||
132 | default: | ||
133 | break; | ||
134 | } | ||
135 | |||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static struct devfreq_governor devfreq_userspace = { | ||
120 | .name = "userspace", | 140 | .name = "userspace", |
121 | .get_target_freq = devfreq_userspace_func, | 141 | .get_target_freq = devfreq_userspace_func, |
122 | .init = userspace_init, | 142 | .event_handler = devfreq_userspace_handler, |
123 | .exit = userspace_exit, | ||
124 | .no_central_polling = true, | ||
125 | }; | 143 | }; |
144 | |||
145 | static int __init devfreq_userspace_init(void) | ||
146 | { | ||
147 | return devfreq_add_governor(&devfreq_userspace); | ||
148 | } | ||
149 | subsys_initcall(devfreq_userspace_init); | ||
150 | |||
151 | static void __exit devfreq_userspace_exit(void) | ||
152 | { | ||
153 | int ret; | ||
154 | |||
155 | ret = devfreq_remove_governor(&devfreq_userspace); | ||
156 | if (ret) | ||
157 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
158 | |||
159 | return; | ||
160 | } | ||
161 | module_exit(devfreq_userspace_exit); | ||
162 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 47150f5ded04..f16557690cfd 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -49,6 +49,10 @@ config OF_GPIO | |||
49 | def_bool y | 49 | def_bool y |
50 | depends on OF | 50 | depends on OF |
51 | 51 | ||
52 | config GPIO_ACPI | ||
53 | def_bool y | ||
54 | depends on ACPI | ||
55 | |||
52 | config DEBUG_GPIO | 56 | config DEBUG_GPIO |
53 | bool "Debug GPIO calls" | 57 | bool "Debug GPIO calls" |
54 | depends on DEBUG_KERNEL | 58 | depends on DEBUG_KERNEL |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 9aeed6707326..420dbaca05f1 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | |||
4 | 4 | ||
5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o | 5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o |
6 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o | 6 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o |
7 | obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o | ||
7 | 8 | ||
8 | # Device drivers. Generally keep list sorted alphabetically | 9 | # Device drivers. Generally keep list sorted alphabetically |
9 | obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o | 10 | obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c new file mode 100644 index 000000000000..cbad6e908d30 --- /dev/null +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * ACPI helpers for GPIO API | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Authors: Mathias Nyman <mathias.nyman@linux.intel.com> | ||
6 | * Mika Westerberg <mika.westerberg@linux.intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/gpio.h> | ||
15 | #include <linux/export.h> | ||
16 | #include <linux/acpi_gpio.h> | ||
17 | #include <linux/acpi.h> | ||
18 | |||
19 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | ||
20 | { | ||
21 | if (!gc->dev) | ||
22 | return false; | ||
23 | |||
24 | return ACPI_HANDLE(gc->dev) == data; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * acpi_get_gpio() - Translate ACPI GPIO pin to GPIO number usable with GPIO API | ||
29 | * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1") | ||
30 | * @pin: ACPI GPIO pin number (0-based, controller-relative) | ||
31 | * | ||
32 | * Returns GPIO number to use with Linux generic GPIO API, or errno error value | ||
33 | */ | ||
34 | |||
35 | int acpi_get_gpio(char *path, int pin) | ||
36 | { | ||
37 | struct gpio_chip *chip; | ||
38 | acpi_handle handle; | ||
39 | acpi_status status; | ||
40 | |||
41 | status = acpi_get_handle(NULL, path, &handle); | ||
42 | if (ACPI_FAILURE(status)) | ||
43 | return -ENODEV; | ||
44 | |||
45 | chip = gpiochip_find(handle, acpi_gpiochip_find); | ||
46 | if (!chip) | ||
47 | return -ENODEV; | ||
48 | |||
49 | if (!gpio_is_valid(chip->base + pin)) | ||
50 | return -EINVAL; | ||
51 | |||
52 | return chip->base + pin; | ||
53 | } | ||
54 | EXPORT_SYMBOL_GPL(acpi_get_gpio); | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index a7edf987a339..e388590b44ab 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/irqflags.h> | 39 | #include <linux/irqflags.h> |
40 | #include <linux/rwsem.h> | 40 | #include <linux/rwsem.h> |
41 | #include <linux/pm_runtime.h> | 41 | #include <linux/pm_runtime.h> |
42 | #include <linux/acpi.h> | ||
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
43 | 44 | ||
44 | #include "i2c-core.h" | 45 | #include "i2c-core.h" |
@@ -78,6 +79,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv) | |||
78 | if (of_driver_match_device(dev, drv)) | 79 | if (of_driver_match_device(dev, drv)) |
79 | return 1; | 80 | return 1; |
80 | 81 | ||
82 | /* Then ACPI style match */ | ||
83 | if (acpi_driver_match_device(dev, drv)) | ||
84 | return 1; | ||
85 | |||
81 | driver = to_i2c_driver(drv); | 86 | driver = to_i2c_driver(drv); |
82 | /* match on an id table if there is one */ | 87 | /* match on an id table if there is one */ |
83 | if (driver->id_table) | 88 | if (driver->id_table) |
@@ -539,6 +544,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) | |||
539 | client->dev.bus = &i2c_bus_type; | 544 | client->dev.bus = &i2c_bus_type; |
540 | client->dev.type = &i2c_client_type; | 545 | client->dev.type = &i2c_client_type; |
541 | client->dev.of_node = info->of_node; | 546 | client->dev.of_node = info->of_node; |
547 | ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); | ||
542 | 548 | ||
543 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ | 549 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ |
544 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), | 550 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b0f6b4c8ee14..c49c04d9c2b0 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/cpuidle.h> | 57 | #include <linux/cpuidle.h> |
58 | #include <linux/clockchips.h> | 58 | #include <linux/clockchips.h> |
59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | ||
60 | #include <trace/events/power.h> | 59 | #include <trace/events/power.h> |
61 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
62 | #include <linux/notifier.h> | 61 | #include <linux/notifier.h> |
@@ -72,6 +71,7 @@ | |||
72 | static struct cpuidle_driver intel_idle_driver = { | 71 | static struct cpuidle_driver intel_idle_driver = { |
73 | .name = "intel_idle", | 72 | .name = "intel_idle", |
74 | .owner = THIS_MODULE, | 73 | .owner = THIS_MODULE, |
74 | .en_core_tk_irqen = 1, | ||
75 | }; | 75 | }; |
76 | /* intel_idle.max_cstate=0 disables driver */ | 76 | /* intel_idle.max_cstate=0 disables driver */ |
77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | 77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; |
@@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | 282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); |
283 | unsigned int cstate; | 283 | unsigned int cstate; |
284 | ktime_t kt_before, kt_after; | ||
285 | s64 usec_delta; | ||
286 | int cpu = smp_processor_id(); | 284 | int cpu = smp_processor_id(); |
287 | 285 | ||
288 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | 286 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; |
@@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
297 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 295 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
298 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 296 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
299 | 297 | ||
300 | kt_before = ktime_get_real(); | ||
301 | |||
302 | stop_critical_timings(); | 298 | stop_critical_timings(); |
303 | if (!need_resched()) { | 299 | if (!need_resched()) { |
304 | 300 | ||
@@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
310 | 306 | ||
311 | start_critical_timings(); | 307 | start_critical_timings(); |
312 | 308 | ||
313 | kt_after = ktime_get_real(); | ||
314 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | ||
315 | |||
316 | local_irq_enable(); | ||
317 | |||
318 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 309 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
319 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 310 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
320 | 311 | ||
321 | /* Update cpuidle counters */ | ||
322 | dev->last_residency = (int)usec_delta; | ||
323 | |||
324 | return index; | 312 | return index; |
325 | } | 313 | } |
326 | 314 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 83eb1e06ff76..bebbe167fd89 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -81,6 +81,18 @@ config MMC_RICOH_MMC | |||
81 | 81 | ||
82 | If unsure, say Y. | 82 | If unsure, say Y. |
83 | 83 | ||
84 | config MMC_SDHCI_ACPI | ||
85 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" | ||
86 | depends on MMC_SDHCI && ACPI | ||
87 | help | ||
88 | This selects support for ACPI enumerated SDHCI controllers, | ||
89 | identified by ACPI Compatibility ID PNP0D40 or specific | ||
90 | ACPI Hardware IDs. | ||
91 | |||
92 | If you have a controller with this interface, say Y or M here. | ||
93 | |||
94 | If unsure, say N. | ||
95 | |||
84 | config MMC_SDHCI_PLTFM | 96 | config MMC_SDHCI_PLTFM |
85 | tristate "SDHCI platform and OF driver helper" | 97 | tristate "SDHCI platform and OF driver helper" |
86 | depends on MMC_SDHCI | 98 | depends on MMC_SDHCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 39d5e1234709..c5eddc1b4833 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o | |||
9 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o | 9 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o |
10 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o | 10 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o |
11 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o | 11 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o |
12 | obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o | ||
12 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o | 13 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o |
13 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o | 14 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o |
14 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o | 15 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c new file mode 100644 index 000000000000..12b0a78497f6 --- /dev/null +++ b/drivers/mmc/host/sdhci-acpi.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * Secure Digital Host Controller Interface ACPI driver. | ||
3 | * | ||
4 | * Copyright (c) 2012, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/device.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/compiler.h> | ||
30 | #include <linux/stddef.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/acpi.h> | ||
36 | #include <linux/pm.h> | ||
37 | #include <linux/pm_runtime.h> | ||
38 | |||
39 | #include <linux/mmc/host.h> | ||
40 | #include <linux/mmc/pm.h> | ||
41 | #include <linux/mmc/sdhci.h> | ||
42 | |||
43 | #include "sdhci.h" | ||
44 | |||
45 | enum { | ||
46 | SDHCI_ACPI_SD_CD = BIT(0), | ||
47 | SDHCI_ACPI_RUNTIME_PM = BIT(1), | ||
48 | }; | ||
49 | |||
50 | struct sdhci_acpi_chip { | ||
51 | const struct sdhci_ops *ops; | ||
52 | unsigned int quirks; | ||
53 | unsigned int quirks2; | ||
54 | unsigned long caps; | ||
55 | unsigned int caps2; | ||
56 | mmc_pm_flag_t pm_caps; | ||
57 | }; | ||
58 | |||
59 | struct sdhci_acpi_slot { | ||
60 | const struct sdhci_acpi_chip *chip; | ||
61 | unsigned int quirks; | ||
62 | unsigned int quirks2; | ||
63 | unsigned long caps; | ||
64 | unsigned int caps2; | ||
65 | mmc_pm_flag_t pm_caps; | ||
66 | unsigned int flags; | ||
67 | }; | ||
68 | |||
69 | struct sdhci_acpi_host { | ||
70 | struct sdhci_host *host; | ||
71 | const struct sdhci_acpi_slot *slot; | ||
72 | struct platform_device *pdev; | ||
73 | bool use_runtime_pm; | ||
74 | }; | ||
75 | |||
76 | static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) | ||
77 | { | ||
78 | return c->slot && (c->slot->flags & flag); | ||
79 | } | ||
80 | |||
81 | static int sdhci_acpi_enable_dma(struct sdhci_host *host) | ||
82 | { | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static const struct sdhci_ops sdhci_acpi_ops_dflt = { | ||
87 | .enable_dma = sdhci_acpi_enable_dma, | ||
88 | }; | ||
89 | |||
90 | static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { | ||
91 | .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, | ||
92 | .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, | ||
93 | .flags = SDHCI_ACPI_RUNTIME_PM, | ||
94 | .pm_caps = MMC_PM_KEEP_POWER, | ||
95 | }; | ||
96 | |||
97 | static const struct acpi_device_id sdhci_acpi_ids[] = { | ||
98 | { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio }, | ||
99 | { "PNP0D40" }, | ||
100 | { }, | ||
101 | }; | ||
102 | MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); | ||
103 | |||
104 | static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) | ||
105 | { | ||
106 | const struct acpi_device_id *id; | ||
107 | |||
108 | for (id = sdhci_acpi_ids; id->id[0]; id++) | ||
109 | if (!strcmp(id->id, hid)) | ||
110 | return (const struct sdhci_acpi_slot *)id->driver_data; | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | static int __devinit sdhci_acpi_probe(struct platform_device *pdev) | ||
115 | { | ||
116 | struct device *dev = &pdev->dev; | ||
117 | acpi_handle handle = ACPI_HANDLE(dev); | ||
118 | struct acpi_device *device; | ||
119 | struct sdhci_acpi_host *c; | ||
120 | struct sdhci_host *host; | ||
121 | struct resource *iomem; | ||
122 | resource_size_t len; | ||
123 | const char *hid; | ||
124 | int err; | ||
125 | |||
126 | if (acpi_bus_get_device(handle, &device)) | ||
127 | return -ENODEV; | ||
128 | |||
129 | if (acpi_bus_get_status(device) || !device->status.present) | ||
130 | return -ENODEV; | ||
131 | |||
132 | hid = acpi_device_hid(device); | ||
133 | |||
134 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
135 | if (!iomem) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | len = resource_size(iomem); | ||
139 | if (len < 0x100) | ||
140 | dev_err(dev, "Invalid iomem size!\n"); | ||
141 | |||
142 | if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) | ||
143 | return -ENOMEM; | ||
144 | |||
145 | host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); | ||
146 | if (IS_ERR(host)) | ||
147 | return PTR_ERR(host); | ||
148 | |||
149 | c = sdhci_priv(host); | ||
150 | c->host = host; | ||
151 | c->slot = sdhci_acpi_get_slot(hid); | ||
152 | c->pdev = pdev; | ||
153 | c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); | ||
154 | |||
155 | platform_set_drvdata(pdev, c); | ||
156 | |||
157 | host->hw_name = "ACPI"; | ||
158 | host->ops = &sdhci_acpi_ops_dflt; | ||
159 | host->irq = platform_get_irq(pdev, 0); | ||
160 | |||
161 | host->ioaddr = devm_ioremap_nocache(dev, iomem->start, | ||
162 | resource_size(iomem)); | ||
163 | if (host->ioaddr == NULL) { | ||
164 | err = -ENOMEM; | ||
165 | goto err_free; | ||
166 | } | ||
167 | |||
168 | if (!dev->dma_mask) { | ||
169 | u64 dma_mask; | ||
170 | |||
171 | if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { | ||
172 | /* 64-bit DMA is not supported at present */ | ||
173 | dma_mask = DMA_BIT_MASK(32); | ||
174 | } else { | ||
175 | dma_mask = DMA_BIT_MASK(32); | ||
176 | } | ||
177 | |||
178 | dev->dma_mask = &dev->coherent_dma_mask; | ||
179 | dev->coherent_dma_mask = dma_mask; | ||
180 | } | ||
181 | |||
182 | if (c->slot) { | ||
183 | if (c->slot->chip) { | ||
184 | host->ops = c->slot->chip->ops; | ||
185 | host->quirks |= c->slot->chip->quirks; | ||
186 | host->quirks2 |= c->slot->chip->quirks2; | ||
187 | host->mmc->caps |= c->slot->chip->caps; | ||
188 | host->mmc->caps2 |= c->slot->chip->caps2; | ||
189 | host->mmc->pm_caps |= c->slot->chip->pm_caps; | ||
190 | } | ||
191 | host->quirks |= c->slot->quirks; | ||
192 | host->quirks2 |= c->slot->quirks2; | ||
193 | host->mmc->caps |= c->slot->caps; | ||
194 | host->mmc->caps2 |= c->slot->caps2; | ||
195 | host->mmc->pm_caps |= c->slot->pm_caps; | ||
196 | } | ||
197 | |||
198 | err = sdhci_add_host(host); | ||
199 | if (err) | ||
200 | goto err_free; | ||
201 | |||
202 | if (c->use_runtime_pm) { | ||
203 | pm_suspend_ignore_children(dev, 1); | ||
204 | pm_runtime_set_autosuspend_delay(dev, 50); | ||
205 | pm_runtime_use_autosuspend(dev); | ||
206 | pm_runtime_enable(dev); | ||
207 | } | ||
208 | |||
209 | return 0; | ||
210 | |||
211 | err_free: | ||
212 | platform_set_drvdata(pdev, NULL); | ||
213 | sdhci_free_host(c->host); | ||
214 | return err; | ||
215 | } | ||
216 | |||
217 | static int __devexit sdhci_acpi_remove(struct platform_device *pdev) | ||
218 | { | ||
219 | struct sdhci_acpi_host *c = platform_get_drvdata(pdev); | ||
220 | struct device *dev = &pdev->dev; | ||
221 | int dead; | ||
222 | |||
223 | if (c->use_runtime_pm) { | ||
224 | pm_runtime_get_sync(dev); | ||
225 | pm_runtime_disable(dev); | ||
226 | pm_runtime_put_noidle(dev); | ||
227 | } | ||
228 | |||
229 | dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); | ||
230 | sdhci_remove_host(c->host, dead); | ||
231 | platform_set_drvdata(pdev, NULL); | ||
232 | sdhci_free_host(c->host); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | #ifdef CONFIG_PM_SLEEP | ||
238 | |||
239 | static int sdhci_acpi_suspend(struct device *dev) | ||
240 | { | ||
241 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
242 | |||
243 | return sdhci_suspend_host(c->host); | ||
244 | } | ||
245 | |||
246 | static int sdhci_acpi_resume(struct device *dev) | ||
247 | { | ||
248 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
249 | |||
250 | return sdhci_resume_host(c->host); | ||
251 | } | ||
252 | |||
253 | #else | ||
254 | |||
255 | #define sdhci_acpi_suspend NULL | ||
256 | #define sdhci_acpi_resume NULL | ||
257 | |||
258 | #endif | ||
259 | |||
260 | #ifdef CONFIG_PM_RUNTIME | ||
261 | |||
262 | static int sdhci_acpi_runtime_suspend(struct device *dev) | ||
263 | { | ||
264 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
265 | |||
266 | return sdhci_runtime_suspend_host(c->host); | ||
267 | } | ||
268 | |||
269 | static int sdhci_acpi_runtime_resume(struct device *dev) | ||
270 | { | ||
271 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
272 | |||
273 | return sdhci_runtime_resume_host(c->host); | ||
274 | } | ||
275 | |||
276 | static int sdhci_acpi_runtime_idle(struct device *dev) | ||
277 | { | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | #else | ||
282 | |||
283 | #define sdhci_acpi_runtime_suspend NULL | ||
284 | #define sdhci_acpi_runtime_resume NULL | ||
285 | #define sdhci_acpi_runtime_idle NULL | ||
286 | |||
287 | #endif | ||
288 | |||
289 | static const struct dev_pm_ops sdhci_acpi_pm_ops = { | ||
290 | .suspend = sdhci_acpi_suspend, | ||
291 | .resume = sdhci_acpi_resume, | ||
292 | .runtime_suspend = sdhci_acpi_runtime_suspend, | ||
293 | .runtime_resume = sdhci_acpi_runtime_resume, | ||
294 | .runtime_idle = sdhci_acpi_runtime_idle, | ||
295 | }; | ||
296 | |||
297 | static struct platform_driver sdhci_acpi_driver = { | ||
298 | .driver = { | ||
299 | .name = "sdhci-acpi", | ||
300 | .owner = THIS_MODULE, | ||
301 | .acpi_match_table = sdhci_acpi_ids, | ||
302 | .pm = &sdhci_acpi_pm_ops, | ||
303 | }, | ||
304 | .probe = sdhci_acpi_probe, | ||
305 | .remove = __devexit_p(sdhci_acpi_remove), | ||
306 | }; | ||
307 | |||
308 | module_platform_driver(sdhci_acpi_driver); | ||
309 | |||
310 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); | ||
311 | MODULE_AUTHOR("Adrian Hunter"); | ||
312 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 4fbfe96e37a1..f48ac5d80bbf 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -727,7 +727,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr) | |||
727 | 727 | ||
728 | if (!flctl->qos_request) { | 728 | if (!flctl->qos_request) { |
729 | ret = dev_pm_qos_add_request(&flctl->pdev->dev, | 729 | ret = dev_pm_qos_add_request(&flctl->pdev->dev, |
730 | &flctl->pm_qos, 100); | 730 | &flctl->pm_qos, |
731 | DEV_PM_QOS_LATENCY, | ||
732 | 100); | ||
731 | if (ret < 0) | 733 | if (ret < 0) |
732 | dev_err(&flctl->pdev->dev, | 734 | dev_err(&flctl->pdev->dev, |
733 | "PM QoS request failed: %d\n", ret); | 735 | "PM QoS request failed: %d\n", ret); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index c5792d622dc4..1af4008182fd 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -17,10 +17,9 @@ | |||
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
20 | #include <linux/pm_qos.h> | ||
20 | #include "pci.h" | 21 | #include "pci.h" |
21 | 22 | ||
22 | static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); | ||
23 | |||
24 | /** | 23 | /** |
25 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | 24 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. |
26 | * @handle: ACPI handle of a device the notification is for. | 25 | * @handle: ACPI handle of a device the notification is for. |
@@ -68,67 +67,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
68 | } | 67 | } |
69 | 68 | ||
70 | /** | 69 | /** |
71 | * add_pm_notifier - Register PM notifier for given ACPI device. | ||
72 | * @dev: ACPI device to add the notifier for. | ||
73 | * @context: PCI device or bus to check for PME status if an event is signaled. | ||
74 | * | ||
75 | * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of | ||
76 | * PM wake-up events. For example, wake-up events may be generated for bridges | ||
77 | * if one of the devices below the bridge is signaling PME, even if the bridge | ||
78 | * itself doesn't have a wake-up GPE associated with it. | ||
79 | */ | ||
80 | static acpi_status add_pm_notifier(struct acpi_device *dev, | ||
81 | acpi_notify_handler handler, | ||
82 | void *context) | ||
83 | { | ||
84 | acpi_status status = AE_ALREADY_EXISTS; | ||
85 | |||
86 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
87 | |||
88 | if (dev->wakeup.flags.notifier_present) | ||
89 | goto out; | ||
90 | |||
91 | status = acpi_install_notify_handler(dev->handle, | ||
92 | ACPI_SYSTEM_NOTIFY, | ||
93 | handler, context); | ||
94 | if (ACPI_FAILURE(status)) | ||
95 | goto out; | ||
96 | |||
97 | dev->wakeup.flags.notifier_present = true; | ||
98 | |||
99 | out: | ||
100 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
101 | return status; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
106 | * @dev: ACPI device to remove the notifier from. | ||
107 | */ | ||
108 | static acpi_status remove_pm_notifier(struct acpi_device *dev, | ||
109 | acpi_notify_handler handler) | ||
110 | { | ||
111 | acpi_status status = AE_BAD_PARAMETER; | ||
112 | |||
113 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
114 | |||
115 | if (!dev->wakeup.flags.notifier_present) | ||
116 | goto out; | ||
117 | |||
118 | status = acpi_remove_notify_handler(dev->handle, | ||
119 | ACPI_SYSTEM_NOTIFY, | ||
120 | handler); | ||
121 | if (ACPI_FAILURE(status)) | ||
122 | goto out; | ||
123 | |||
124 | dev->wakeup.flags.notifier_present = false; | ||
125 | |||
126 | out: | ||
127 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
128 | return status; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | 70 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. |
133 | * @dev: ACPI device to add the notifier for. | 71 | * @dev: ACPI device to add the notifier for. |
134 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | 72 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. |
@@ -136,7 +74,7 @@ static acpi_status remove_pm_notifier(struct acpi_device *dev, | |||
136 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | 74 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, |
137 | struct pci_bus *pci_bus) | 75 | struct pci_bus *pci_bus) |
138 | { | 76 | { |
139 | return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | 77 | return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); |
140 | } | 78 | } |
141 | 79 | ||
142 | /** | 80 | /** |
@@ -145,7 +83,7 @@ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | |||
145 | */ | 83 | */ |
146 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | 84 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) |
147 | { | 85 | { |
148 | return remove_pm_notifier(dev, pci_acpi_wake_bus); | 86 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); |
149 | } | 87 | } |
150 | 88 | ||
151 | /** | 89 | /** |
@@ -156,7 +94,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | |||
156 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | 94 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, |
157 | struct pci_dev *pci_dev) | 95 | struct pci_dev *pci_dev) |
158 | { | 96 | { |
159 | return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | 97 | return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); |
160 | } | 98 | } |
161 | 99 | ||
162 | /** | 100 | /** |
@@ -165,7 +103,7 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | |||
165 | */ | 103 | */ |
166 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | 104 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) |
167 | { | 105 | { |
168 | return remove_pm_notifier(dev, pci_acpi_wake_dev); | 106 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); |
169 | } | 107 | } |
170 | 108 | ||
171 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) | 109 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) |
@@ -257,11 +195,16 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
257 | return -ENODEV; | 195 | return -ENODEV; |
258 | 196 | ||
259 | switch (state) { | 197 | switch (state) { |
198 | case PCI_D3cold: | ||
199 | if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == | ||
200 | PM_QOS_FLAGS_ALL) { | ||
201 | error = -EBUSY; | ||
202 | break; | ||
203 | } | ||
260 | case PCI_D0: | 204 | case PCI_D0: |
261 | case PCI_D1: | 205 | case PCI_D1: |
262 | case PCI_D2: | 206 | case PCI_D2: |
263 | case PCI_D3hot: | 207 | case PCI_D3hot: |
264 | case PCI_D3cold: | ||
265 | error = acpi_bus_set_power(handle, state_conv[state]); | 208 | error = acpi_bus_set_power(handle, state_conv[state]); |
266 | } | 209 | } |
267 | 210 | ||
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index fa4e0a5db3f8..ffd53e3eb92f 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h | |||
@@ -159,6 +159,8 @@ struct pnp_resource { | |||
159 | 159 | ||
160 | void pnp_free_resource(struct pnp_resource *pnp_res); | 160 | void pnp_free_resource(struct pnp_resource *pnp_res); |
161 | 161 | ||
162 | struct pnp_resource *pnp_add_resource(struct pnp_dev *dev, | ||
163 | struct resource *res); | ||
162 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, | 164 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, |
163 | int flags); | 165 | int flags); |
164 | struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, | 166 | struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 26b5d4b18dd7..72e822e17d47 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) | |||
58 | if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ | 58 | if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ |
59 | return 0 | 59 | return 0 |
60 | #define TEST_ALPHA(c) \ | 60 | #define TEST_ALPHA(c) \ |
61 | if (!('@' <= (c) || (c) <= 'Z')) \ | 61 | if (!('A' <= (c) && (c) <= 'Z')) \ |
62 | return 0 | 62 | return 0 |
63 | static int __init ispnpidacpi(const char *id) | 63 | static int __init ispnpidacpi(const char *id) |
64 | { | 64 | { |
@@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) | |||
95 | return -ENODEV; | 95 | return -ENODEV; |
96 | } | 96 | } |
97 | 97 | ||
98 | if (WARN_ON_ONCE(acpi_dev != dev->data)) | ||
99 | dev->data = acpi_dev; | ||
100 | |||
98 | ret = pnpacpi_build_resource_template(dev, &buffer); | 101 | ret = pnpacpi_build_resource_template(dev, &buffer); |
99 | if (ret) | 102 | if (ret) |
100 | return ret; | 103 | return ret; |
@@ -242,6 +245,10 @@ static int __init pnpacpi_add_device(struct acpi_device *device) | |||
242 | char *pnpid; | 245 | char *pnpid; |
243 | struct acpi_hardware_id *id; | 246 | struct acpi_hardware_id *id; |
244 | 247 | ||
248 | /* Skip devices that are already bound */ | ||
249 | if (device->physical_node_count) | ||
250 | return 0; | ||
251 | |||
245 | /* | 252 | /* |
246 | * If a PnPacpi device is not present , the device | 253 | * If a PnPacpi device is not present , the device |
247 | * driver should not be loaded. | 254 | * driver should not be loaded. |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 5be4a392a3ae..b8f4ea7b27fc 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -28,37 +28,6 @@ | |||
28 | #include "../base.h" | 28 | #include "../base.h" |
29 | #include "pnpacpi.h" | 29 | #include "pnpacpi.h" |
30 | 30 | ||
31 | #ifdef CONFIG_IA64 | ||
32 | #define valid_IRQ(i) (1) | ||
33 | #else | ||
34 | #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * Allocated Resources | ||
39 | */ | ||
40 | static int irq_flags(int triggering, int polarity, int shareable) | ||
41 | { | ||
42 | int flags; | ||
43 | |||
44 | if (triggering == ACPI_LEVEL_SENSITIVE) { | ||
45 | if (polarity == ACPI_ACTIVE_LOW) | ||
46 | flags = IORESOURCE_IRQ_LOWLEVEL; | ||
47 | else | ||
48 | flags = IORESOURCE_IRQ_HIGHLEVEL; | ||
49 | } else { | ||
50 | if (polarity == ACPI_ACTIVE_LOW) | ||
51 | flags = IORESOURCE_IRQ_LOWEDGE; | ||
52 | else | ||
53 | flags = IORESOURCE_IRQ_HIGHEDGE; | ||
54 | } | ||
55 | |||
56 | if (shareable == ACPI_SHARED) | ||
57 | flags |= IORESOURCE_IRQ_SHAREABLE; | ||
58 | |||
59 | return flags; | ||
60 | } | ||
61 | |||
62 | static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, | 31 | static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, |
63 | int *polarity, int *shareable) | 32 | int *polarity, int *shareable) |
64 | { | 33 | { |
@@ -94,45 +63,6 @@ static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, | |||
94 | *shareable = ACPI_EXCLUSIVE; | 63 | *shareable = ACPI_EXCLUSIVE; |
95 | } | 64 | } |
96 | 65 | ||
97 | static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, | ||
98 | u32 gsi, int triggering, | ||
99 | int polarity, int shareable) | ||
100 | { | ||
101 | int irq, flags; | ||
102 | int p, t; | ||
103 | |||
104 | if (!valid_IRQ(gsi)) { | ||
105 | pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED); | ||
106 | return; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * in IO-APIC mode, use overrided attribute. Two reasons: | ||
111 | * 1. BIOS bug in DSDT | ||
112 | * 2. BIOS uses IO-APIC mode Interrupt Source Override | ||
113 | */ | ||
114 | if (!acpi_get_override_irq(gsi, &t, &p)) { | ||
115 | t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
116 | p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
117 | |||
118 | if (triggering != t || polarity != p) { | ||
119 | dev_warn(&dev->dev, "IRQ %d override to %s, %s\n", | ||
120 | gsi, t ? "edge":"level", p ? "low":"high"); | ||
121 | triggering = t; | ||
122 | polarity = p; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | flags = irq_flags(triggering, polarity, shareable); | ||
127 | irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); | ||
128 | if (irq >= 0) | ||
129 | pcibios_penalize_isa_irq(irq, 1); | ||
130 | else | ||
131 | flags |= IORESOURCE_DISABLED; | ||
132 | |||
133 | pnp_add_irq_resource(dev, irq, flags); | ||
134 | } | ||
135 | |||
136 | static int dma_flags(struct pnp_dev *dev, int type, int bus_master, | 66 | static int dma_flags(struct pnp_dev *dev, int type, int bus_master, |
137 | int transfer) | 67 | int transfer) |
138 | { | 68 | { |
@@ -177,21 +107,16 @@ static int dma_flags(struct pnp_dev *dev, int type, int bus_master, | |||
177 | return flags; | 107 | return flags; |
178 | } | 108 | } |
179 | 109 | ||
180 | static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start, | 110 | /* |
181 | u64 len, int io_decode, | 111 | * Allocated Resources |
182 | int window) | 112 | */ |
183 | { | ||
184 | int flags = 0; | ||
185 | u64 end = start + len - 1; | ||
186 | 113 | ||
187 | if (io_decode == ACPI_DECODE_16) | 114 | static void pnpacpi_add_irqresource(struct pnp_dev *dev, struct resource *r) |
188 | flags |= IORESOURCE_IO_16BIT_ADDR; | 115 | { |
189 | if (len == 0 || end >= 0x10003) | 116 | if (!(r->flags & IORESOURCE_DISABLED)) |
190 | flags |= IORESOURCE_DISABLED; | 117 | pcibios_penalize_isa_irq(r->start, 1); |
191 | if (window) | ||
192 | flags |= IORESOURCE_WINDOW; | ||
193 | 118 | ||
194 | pnp_add_io_resource(dev, start, end, flags); | 119 | pnp_add_resource(dev, r); |
195 | } | 120 | } |
196 | 121 | ||
197 | /* | 122 | /* |
@@ -249,130 +174,49 @@ static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev, | |||
249 | } | 174 | } |
250 | } | 175 | } |
251 | 176 | ||
252 | static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, | ||
253 | u64 start, u64 len, | ||
254 | int write_protect, int window) | ||
255 | { | ||
256 | int flags = 0; | ||
257 | u64 end = start + len - 1; | ||
258 | |||
259 | if (len == 0) | ||
260 | flags |= IORESOURCE_DISABLED; | ||
261 | if (write_protect == ACPI_READ_WRITE_MEMORY) | ||
262 | flags |= IORESOURCE_MEM_WRITEABLE; | ||
263 | if (window) | ||
264 | flags |= IORESOURCE_WINDOW; | ||
265 | |||
266 | pnp_add_mem_resource(dev, start, end, flags); | ||
267 | } | ||
268 | |||
269 | static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, | ||
270 | u64 start, u64 len) | ||
271 | { | ||
272 | u64 end = start + len - 1; | ||
273 | |||
274 | pnp_add_bus_resource(dev, start, end); | ||
275 | } | ||
276 | |||
277 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | ||
278 | struct acpi_resource *res) | ||
279 | { | ||
280 | struct acpi_resource_address64 addr, *p = &addr; | ||
281 | acpi_status status; | ||
282 | int window; | ||
283 | u64 len; | ||
284 | |||
285 | status = acpi_resource_to_address64(res, p); | ||
286 | if (!ACPI_SUCCESS(status)) { | ||
287 | dev_warn(&dev->dev, "failed to convert resource type %d\n", | ||
288 | res->type); | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | /* Windows apparently computes length rather than using _LEN */ | ||
293 | len = p->maximum - p->minimum + 1; | ||
294 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | ||
295 | |||
296 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
297 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, | ||
298 | p->info.mem.write_protect, window); | ||
299 | else if (p->resource_type == ACPI_IO_RANGE) | ||
300 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, | ||
301 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
302 | ACPI_DECODE_16, window); | ||
303 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | ||
304 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); | ||
305 | } | ||
306 | |||
307 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | ||
308 | struct acpi_resource *res) | ||
309 | { | ||
310 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; | ||
311 | int window; | ||
312 | u64 len; | ||
313 | |||
314 | /* Windows apparently computes length rather than using _LEN */ | ||
315 | len = p->maximum - p->minimum + 1; | ||
316 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | ||
317 | |||
318 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
319 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, | ||
320 | p->info.mem.write_protect, window); | ||
321 | else if (p->resource_type == ACPI_IO_RANGE) | ||
322 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, | ||
323 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
324 | ACPI_DECODE_16, window); | ||
325 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | ||
326 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); | ||
327 | } | ||
328 | |||
329 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | 177 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, |
330 | void *data) | 178 | void *data) |
331 | { | 179 | { |
332 | struct pnp_dev *dev = data; | 180 | struct pnp_dev *dev = data; |
333 | struct acpi_resource_irq *irq; | ||
334 | struct acpi_resource_dma *dma; | 181 | struct acpi_resource_dma *dma; |
335 | struct acpi_resource_io *io; | ||
336 | struct acpi_resource_fixed_io *fixed_io; | ||
337 | struct acpi_resource_vendor_typed *vendor_typed; | 182 | struct acpi_resource_vendor_typed *vendor_typed; |
338 | struct acpi_resource_memory24 *memory24; | 183 | struct resource r; |
339 | struct acpi_resource_memory32 *memory32; | ||
340 | struct acpi_resource_fixed_memory32 *fixed_memory32; | ||
341 | struct acpi_resource_extended_irq *extended_irq; | ||
342 | int i, flags; | 184 | int i, flags; |
343 | 185 | ||
344 | switch (res->type) { | 186 | if (acpi_dev_resource_memory(res, &r) |
345 | case ACPI_RESOURCE_TYPE_IRQ: | 187 | || acpi_dev_resource_io(res, &r) |
346 | /* | 188 | || acpi_dev_resource_address_space(res, &r) |
347 | * Per spec, only one interrupt per descriptor is allowed in | 189 | || acpi_dev_resource_ext_address_space(res, &r)) { |
348 | * _CRS, but some firmware violates this, so parse them all. | 190 | pnp_add_resource(dev, &r); |
349 | */ | 191 | return AE_OK; |
350 | irq = &res->data.irq; | 192 | } |
351 | if (irq->interrupt_count == 0) | ||
352 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
353 | else { | ||
354 | for (i = 0; i < irq->interrupt_count; i++) { | ||
355 | pnpacpi_parse_allocated_irqresource(dev, | ||
356 | irq->interrupts[i], | ||
357 | irq->triggering, | ||
358 | irq->polarity, | ||
359 | irq->sharable); | ||
360 | } | ||
361 | 193 | ||
194 | r.flags = 0; | ||
195 | if (acpi_dev_resource_interrupt(res, 0, &r)) { | ||
196 | pnpacpi_add_irqresource(dev, &r); | ||
197 | for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++) | ||
198 | pnpacpi_add_irqresource(dev, &r); | ||
199 | |||
200 | if (i > 1) { | ||
362 | /* | 201 | /* |
363 | * The IRQ encoder puts a single interrupt in each | 202 | * The IRQ encoder puts a single interrupt in each |
364 | * descriptor, so if a _CRS descriptor has more than | 203 | * descriptor, so if a _CRS descriptor has more than |
365 | * one interrupt, we won't be able to re-encode it. | 204 | * one interrupt, we won't be able to re-encode it. |
366 | */ | 205 | */ |
367 | if (pnp_can_write(dev) && irq->interrupt_count > 1) { | 206 | if (pnp_can_write(dev)) { |
368 | dev_warn(&dev->dev, "multiple interrupts in " | 207 | dev_warn(&dev->dev, "multiple interrupts in " |
369 | "_CRS descriptor; configuration can't " | 208 | "_CRS descriptor; configuration can't " |
370 | "be changed\n"); | 209 | "be changed\n"); |
371 | dev->capabilities &= ~PNP_WRITE; | 210 | dev->capabilities &= ~PNP_WRITE; |
372 | } | 211 | } |
373 | } | 212 | } |
374 | break; | 213 | return AE_OK; |
214 | } else if (r.flags & IORESOURCE_DISABLED) { | ||
215 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
216 | return AE_OK; | ||
217 | } | ||
375 | 218 | ||
219 | switch (res->type) { | ||
376 | case ACPI_RESOURCE_TYPE_DMA: | 220 | case ACPI_RESOURCE_TYPE_DMA: |
377 | dma = &res->data.dma; | 221 | dma = &res->data.dma; |
378 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) | 222 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) |
@@ -383,26 +227,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
383 | pnp_add_dma_resource(dev, dma->channels[0], flags); | 227 | pnp_add_dma_resource(dev, dma->channels[0], flags); |
384 | break; | 228 | break; |
385 | 229 | ||
386 | case ACPI_RESOURCE_TYPE_IO: | ||
387 | io = &res->data.io; | ||
388 | pnpacpi_parse_allocated_ioresource(dev, | ||
389 | io->minimum, | ||
390 | io->address_length, | ||
391 | io->io_decode, 0); | ||
392 | break; | ||
393 | |||
394 | case ACPI_RESOURCE_TYPE_START_DEPENDENT: | 230 | case ACPI_RESOURCE_TYPE_START_DEPENDENT: |
395 | case ACPI_RESOURCE_TYPE_END_DEPENDENT: | 231 | case ACPI_RESOURCE_TYPE_END_DEPENDENT: |
396 | break; | 232 | break; |
397 | 233 | ||
398 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
399 | fixed_io = &res->data.fixed_io; | ||
400 | pnpacpi_parse_allocated_ioresource(dev, | ||
401 | fixed_io->address, | ||
402 | fixed_io->address_length, | ||
403 | ACPI_DECODE_10, 0); | ||
404 | break; | ||
405 | |||
406 | case ACPI_RESOURCE_TYPE_VENDOR: | 234 | case ACPI_RESOURCE_TYPE_VENDOR: |
407 | vendor_typed = &res->data.vendor_typed; | 235 | vendor_typed = &res->data.vendor_typed; |
408 | pnpacpi_parse_allocated_vendor(dev, vendor_typed); | 236 | pnpacpi_parse_allocated_vendor(dev, vendor_typed); |
@@ -411,66 +239,6 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
411 | case ACPI_RESOURCE_TYPE_END_TAG: | 239 | case ACPI_RESOURCE_TYPE_END_TAG: |
412 | break; | 240 | break; |
413 | 241 | ||
414 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
415 | memory24 = &res->data.memory24; | ||
416 | pnpacpi_parse_allocated_memresource(dev, | ||
417 | memory24->minimum, | ||
418 | memory24->address_length, | ||
419 | memory24->write_protect, 0); | ||
420 | break; | ||
421 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
422 | memory32 = &res->data.memory32; | ||
423 | pnpacpi_parse_allocated_memresource(dev, | ||
424 | memory32->minimum, | ||
425 | memory32->address_length, | ||
426 | memory32->write_protect, 0); | ||
427 | break; | ||
428 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
429 | fixed_memory32 = &res->data.fixed_memory32; | ||
430 | pnpacpi_parse_allocated_memresource(dev, | ||
431 | fixed_memory32->address, | ||
432 | fixed_memory32->address_length, | ||
433 | fixed_memory32->write_protect, 0); | ||
434 | break; | ||
435 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
436 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
437 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
438 | pnpacpi_parse_allocated_address_space(dev, res); | ||
439 | break; | ||
440 | |||
441 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | ||
442 | pnpacpi_parse_allocated_ext_address_space(dev, res); | ||
443 | break; | ||
444 | |||
445 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | ||
446 | extended_irq = &res->data.extended_irq; | ||
447 | |||
448 | if (extended_irq->interrupt_count == 0) | ||
449 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
450 | else { | ||
451 | for (i = 0; i < extended_irq->interrupt_count; i++) { | ||
452 | pnpacpi_parse_allocated_irqresource(dev, | ||
453 | extended_irq->interrupts[i], | ||
454 | extended_irq->triggering, | ||
455 | extended_irq->polarity, | ||
456 | extended_irq->sharable); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * The IRQ encoder puts a single interrupt in each | ||
461 | * descriptor, so if a _CRS descriptor has more than | ||
462 | * one interrupt, we won't be able to re-encode it. | ||
463 | */ | ||
464 | if (pnp_can_write(dev) && | ||
465 | extended_irq->interrupt_count > 1) { | ||
466 | dev_warn(&dev->dev, "multiple interrupts in " | ||
467 | "_CRS descriptor; configuration can't " | ||
468 | "be changed\n"); | ||
469 | dev->capabilities &= ~PNP_WRITE; | ||
470 | } | ||
471 | } | ||
472 | break; | ||
473 | |||
474 | case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: | 242 | case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: |
475 | break; | 243 | break; |
476 | 244 | ||
@@ -531,7 +299,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, | |||
531 | if (p->interrupts[i]) | 299 | if (p->interrupts[i]) |
532 | __set_bit(p->interrupts[i], map.bits); | 300 | __set_bit(p->interrupts[i], map.bits); |
533 | 301 | ||
534 | flags = irq_flags(p->triggering, p->polarity, p->sharable); | 302 | flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); |
535 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 303 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
536 | } | 304 | } |
537 | 305 | ||
@@ -555,7 +323,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
555 | } | 323 | } |
556 | } | 324 | } |
557 | 325 | ||
558 | flags = irq_flags(p->triggering, p->polarity, p->sharable); | 326 | flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); |
559 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 327 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
560 | } | 328 | } |
561 | 329 | ||
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index b0ecacbe53b1..3e6db1c1dc29 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c | |||
@@ -503,6 +503,22 @@ static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev) | |||
503 | return pnp_res; | 503 | return pnp_res; |
504 | } | 504 | } |
505 | 505 | ||
506 | struct pnp_resource *pnp_add_resource(struct pnp_dev *dev, | ||
507 | struct resource *res) | ||
508 | { | ||
509 | struct pnp_resource *pnp_res; | ||
510 | |||
511 | pnp_res = pnp_new_resource(dev); | ||
512 | if (!pnp_res) { | ||
513 | dev_err(&dev->dev, "can't add resource %pR\n", res); | ||
514 | return NULL; | ||
515 | } | ||
516 | |||
517 | pnp_res->res = *res; | ||
518 | dev_dbg(&dev->dev, "%pR\n", res); | ||
519 | return pnp_res; | ||
520 | } | ||
521 | |||
506 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, | 522 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, |
507 | int flags) | 523 | int flags) |
508 | { | 524 | { |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d3e64080c409..718cc1f49230 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
38 | #include <linux/ioport.h> | ||
39 | #include <linux/acpi.h> | ||
38 | 40 | ||
39 | static void spidev_release(struct device *dev) | 41 | static void spidev_release(struct device *dev) |
40 | { | 42 | { |
@@ -93,6 +95,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) | |||
93 | if (of_driver_match_device(dev, drv)) | 95 | if (of_driver_match_device(dev, drv)) |
94 | return 1; | 96 | return 1; |
95 | 97 | ||
98 | /* Then try ACPI */ | ||
99 | if (acpi_driver_match_device(dev, drv)) | ||
100 | return 1; | ||
101 | |||
96 | if (sdrv->id_table) | 102 | if (sdrv->id_table) |
97 | return !!spi_match_id(sdrv->id_table, spi); | 103 | return !!spi_match_id(sdrv->id_table, spi); |
98 | 104 | ||
@@ -888,6 +894,100 @@ static void of_register_spi_devices(struct spi_master *master) | |||
888 | static void of_register_spi_devices(struct spi_master *master) { } | 894 | static void of_register_spi_devices(struct spi_master *master) { } |
889 | #endif | 895 | #endif |
890 | 896 | ||
897 | #ifdef CONFIG_ACPI | ||
898 | static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) | ||
899 | { | ||
900 | struct spi_device *spi = data; | ||
901 | |||
902 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
903 | struct acpi_resource_spi_serialbus *sb; | ||
904 | |||
905 | sb = &ares->data.spi_serial_bus; | ||
906 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { | ||
907 | spi->chip_select = sb->device_selection; | ||
908 | spi->max_speed_hz = sb->connection_speed; | ||
909 | |||
910 | if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) | ||
911 | spi->mode |= SPI_CPHA; | ||
912 | if (sb->clock_polarity == ACPI_SPI_START_HIGH) | ||
913 | spi->mode |= SPI_CPOL; | ||
914 | if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) | ||
915 | spi->mode |= SPI_CS_HIGH; | ||
916 | } | ||
917 | } else if (spi->irq < 0) { | ||
918 | struct resource r; | ||
919 | |||
920 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
921 | spi->irq = r.start; | ||
922 | } | ||
923 | |||
924 | /* Always tell the ACPI core to skip this resource */ | ||
925 | return 1; | ||
926 | } | ||
927 | |||
928 | static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, | ||
929 | void *data, void **return_value) | ||
930 | { | ||
931 | struct spi_master *master = data; | ||
932 | struct list_head resource_list; | ||
933 | struct acpi_device *adev; | ||
934 | struct spi_device *spi; | ||
935 | int ret; | ||
936 | |||
937 | if (acpi_bus_get_device(handle, &adev)) | ||
938 | return AE_OK; | ||
939 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
940 | return AE_OK; | ||
941 | |||
942 | spi = spi_alloc_device(master); | ||
943 | if (!spi) { | ||
944 | dev_err(&master->dev, "failed to allocate SPI device for %s\n", | ||
945 | dev_name(&adev->dev)); | ||
946 | return AE_NO_MEMORY; | ||
947 | } | ||
948 | |||
949 | ACPI_HANDLE_SET(&spi->dev, handle); | ||
950 | spi->irq = -1; | ||
951 | |||
952 | INIT_LIST_HEAD(&resource_list); | ||
953 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
954 | acpi_spi_add_resource, spi); | ||
955 | acpi_dev_free_resource_list(&resource_list); | ||
956 | |||
957 | if (ret < 0 || !spi->max_speed_hz) { | ||
958 | spi_dev_put(spi); | ||
959 | return AE_OK; | ||
960 | } | ||
961 | |||
962 | strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); | ||
963 | if (spi_add_device(spi)) { | ||
964 | dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", | ||
965 | dev_name(&adev->dev)); | ||
966 | spi_dev_put(spi); | ||
967 | } | ||
968 | |||
969 | return AE_OK; | ||
970 | } | ||
971 | |||
972 | static void acpi_register_spi_devices(struct spi_master *master) | ||
973 | { | ||
974 | acpi_status status; | ||
975 | acpi_handle handle; | ||
976 | |||
977 | handle = ACPI_HANDLE(&master->dev); | ||
978 | if (!handle) | ||
979 | return; | ||
980 | |||
981 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
982 | acpi_spi_add_device, NULL, | ||
983 | master, NULL); | ||
984 | if (ACPI_FAILURE(status)) | ||
985 | dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); | ||
986 | } | ||
987 | #else | ||
988 | static inline void acpi_register_spi_devices(struct spi_master *master) {} | ||
989 | #endif /* CONFIG_ACPI */ | ||
990 | |||
891 | static void spi_master_release(struct device *dev) | 991 | static void spi_master_release(struct device *dev) |
892 | { | 992 | { |
893 | struct spi_master *master; | 993 | struct spi_master *master; |
@@ -1023,8 +1123,9 @@ int spi_register_master(struct spi_master *master) | |||
1023 | spi_match_master_to_boardinfo(master, &bi->board_info); | 1123 | spi_match_master_to_boardinfo(master, &bi->board_info); |
1024 | mutex_unlock(&board_lock); | 1124 | mutex_unlock(&board_lock); |
1025 | 1125 | ||
1026 | /* Register devices from the device tree */ | 1126 | /* Register devices from the device tree and ACPI */ |
1027 | of_register_spi_devices(master); | 1127 | of_register_spi_devices(master); |
1128 | acpi_register_spi_devices(master); | ||
1028 | done: | 1129 | done: |
1029 | return status; | 1130 | return status; |
1030 | } | 1131 | } |
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 03f14856bd09..0943457e0fa5 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h | |||
@@ -241,6 +241,7 @@ | |||
241 | *****************************************************************************/ | 241 | *****************************************************************************/ |
242 | 242 | ||
243 | #define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ | 243 | #define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ |
244 | #define ACPI_DB_LINE_BUFFER_SIZE 512 | ||
244 | 245 | ||
245 | #define ACPI_DEBUGGER_COMMAND_PROMPT '-' | 246 | #define ACPI_DEBUGGER_COMMAND_PROMPT '-' |
246 | #define ACPI_DEBUGGER_EXECUTE_PROMPT '%' | 247 | #define ACPI_DEBUGGER_EXECUTE_PROMPT '%' |
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 19503449814f..6c3890e02140 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h | |||
@@ -122,7 +122,7 @@ | |||
122 | #define AE_CODE_TBL_MAX 0x0005 | 122 | #define AE_CODE_TBL_MAX 0x0005 |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * AML exceptions. These are caused by problems with | 125 | * AML exceptions. These are caused by problems with |
126 | * the actual AML byte stream | 126 | * the actual AML byte stream |
127 | */ | 127 | */ |
128 | #define AE_AML_BAD_OPCODE (acpi_status) (0x0001 | AE_CODE_AML) | 128 | #define AE_AML_BAD_OPCODE (acpi_status) (0x0001 | AE_CODE_AML) |
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 745dd24e3cb5..7665df663284 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #define METHOD_NAME__HID "_HID" | 50 | #define METHOD_NAME__HID "_HID" |
51 | #define METHOD_NAME__CID "_CID" | 51 | #define METHOD_NAME__CID "_CID" |
52 | #define METHOD_NAME__UID "_UID" | 52 | #define METHOD_NAME__UID "_UID" |
53 | #define METHOD_NAME__SUB "_SUB" | ||
53 | #define METHOD_NAME__ADR "_ADR" | 54 | #define METHOD_NAME__ADR "_ADR" |
54 | #define METHOD_NAME__INI "_INI" | 55 | #define METHOD_NAME__INI "_INI" |
55 | #define METHOD_NAME__STA "_STA" | 56 | #define METHOD_NAME__STA "_STA" |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0daa0fbd8654..7ced5dc20dd3 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -144,12 +144,11 @@ struct acpi_device_flags { | |||
144 | u32 bus_address:1; | 144 | u32 bus_address:1; |
145 | u32 removable:1; | 145 | u32 removable:1; |
146 | u32 ejectable:1; | 146 | u32 ejectable:1; |
147 | u32 lockable:1; | ||
148 | u32 suprise_removal_ok:1; | 147 | u32 suprise_removal_ok:1; |
149 | u32 power_manageable:1; | 148 | u32 power_manageable:1; |
150 | u32 performance_manageable:1; | 149 | u32 performance_manageable:1; |
151 | u32 eject_pending:1; | 150 | u32 eject_pending:1; |
152 | u32 reserved:23; | 151 | u32 reserved:24; |
153 | }; | 152 | }; |
154 | 153 | ||
155 | /* File System */ | 154 | /* File System */ |
@@ -180,6 +179,7 @@ struct acpi_device_pnp { | |||
180 | acpi_device_name device_name; /* Driver-determined */ | 179 | acpi_device_name device_name; /* Driver-determined */ |
181 | acpi_device_class device_class; /* " */ | 180 | acpi_device_class device_class; /* " */ |
182 | union acpi_object *str_obj; /* unicode string for _STR method */ | 181 | union acpi_object *str_obj; /* unicode string for _STR method */ |
182 | unsigned long sun; /* _SUN */ | ||
183 | }; | 183 | }; |
184 | 184 | ||
185 | #define acpi_device_bid(d) ((d)->pnp.bus_id) | 185 | #define acpi_device_bid(d) ((d)->pnp.bus_id) |
@@ -201,6 +201,7 @@ struct acpi_device_power_flags { | |||
201 | struct acpi_device_power_state { | 201 | struct acpi_device_power_state { |
202 | struct { | 202 | struct { |
203 | u8 valid:1; | 203 | u8 valid:1; |
204 | u8 os_accessible:1; | ||
204 | u8 explicit_set:1; /* _PSx present? */ | 205 | u8 explicit_set:1; /* _PSx present? */ |
205 | u8 reserved:6; | 206 | u8 reserved:6; |
206 | } flags; | 207 | } flags; |
@@ -339,6 +340,7 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle, | |||
339 | unsigned long long *sta); | 340 | unsigned long long *sta); |
340 | int acpi_bus_get_status(struct acpi_device *device); | 341 | int acpi_bus_get_status(struct acpi_device *device); |
341 | int acpi_bus_set_power(acpi_handle handle, int state); | 342 | int acpi_bus_set_power(acpi_handle handle, int state); |
343 | int acpi_device_set_power(struct acpi_device *device, int state); | ||
342 | int acpi_bus_update_power(acpi_handle handle, int *state_p); | 344 | int acpi_bus_update_power(acpi_handle handle, int *state_p); |
343 | bool acpi_bus_power_manageable(acpi_handle handle); | 345 | bool acpi_bus_power_manageable(acpi_handle handle); |
344 | bool acpi_bus_can_wakeup(acpi_handle handle); | 346 | bool acpi_bus_can_wakeup(acpi_handle handle); |
@@ -410,36 +412,100 @@ acpi_handle acpi_get_child(acpi_handle, u64); | |||
410 | int acpi_is_root_bridge(acpi_handle); | 412 | int acpi_is_root_bridge(acpi_handle); |
411 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); | 413 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); |
412 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); | 414 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); |
413 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) | 415 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) |
414 | 416 | ||
415 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); | 417 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); |
416 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); | 418 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); |
417 | 419 | ||
418 | #ifdef CONFIG_PM | 420 | #ifdef CONFIG_PM |
421 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, | ||
422 | acpi_notify_handler handler, void *context); | ||
423 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
424 | acpi_notify_handler handler); | ||
425 | int acpi_device_power_state(struct device *dev, struct acpi_device *adev, | ||
426 | u32 target_state, int d_max_in, int *d_min_p); | ||
419 | int acpi_pm_device_sleep_state(struct device *, int *, int); | 427 | int acpi_pm_device_sleep_state(struct device *, int *, int); |
420 | #else | 428 | #else |
421 | static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) | 429 | static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, |
430 | acpi_notify_handler handler, | ||
431 | void *context) | ||
432 | { | ||
433 | return AE_SUPPORT; | ||
434 | } | ||
435 | static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
436 | acpi_notify_handler handler) | ||
437 | { | ||
438 | return AE_SUPPORT; | ||
439 | } | ||
440 | static inline int __acpi_device_power_state(int m, int *p) | ||
422 | { | 441 | { |
423 | if (p) | 442 | if (p) |
424 | *p = ACPI_STATE_D0; | 443 | *p = ACPI_STATE_D0; |
425 | return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; | 444 | return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; |
426 | } | 445 | } |
446 | static inline int acpi_device_power_state(struct device *dev, | ||
447 | struct acpi_device *adev, | ||
448 | u32 target_state, int d_max_in, | ||
449 | int *d_min_p) | ||
450 | { | ||
451 | return __acpi_device_power_state(d_max_in, d_min_p); | ||
452 | } | ||
453 | static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) | ||
454 | { | ||
455 | return __acpi_device_power_state(m, p); | ||
456 | } | ||
427 | #endif | 457 | #endif |
428 | 458 | ||
429 | #ifdef CONFIG_PM_SLEEP | 459 | #ifdef CONFIG_PM_RUNTIME |
460 | int __acpi_device_run_wake(struct acpi_device *, bool); | ||
430 | int acpi_pm_device_run_wake(struct device *, bool); | 461 | int acpi_pm_device_run_wake(struct device *, bool); |
431 | int acpi_pm_device_sleep_wake(struct device *, bool); | ||
432 | #else | 462 | #else |
463 | static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en) | ||
464 | { | ||
465 | return -ENODEV; | ||
466 | } | ||
433 | static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) | 467 | static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) |
434 | { | 468 | { |
435 | return -ENODEV; | 469 | return -ENODEV; |
436 | } | 470 | } |
471 | #endif | ||
472 | |||
473 | #ifdef CONFIG_PM_SLEEP | ||
474 | int __acpi_device_sleep_wake(struct acpi_device *, u32, bool); | ||
475 | int acpi_pm_device_sleep_wake(struct device *, bool); | ||
476 | #else | ||
477 | static inline int __acpi_device_sleep_wake(struct acpi_device *adev, | ||
478 | u32 target_state, bool enable) | ||
479 | { | ||
480 | return -ENODEV; | ||
481 | } | ||
437 | static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | 482 | static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) |
438 | { | 483 | { |
439 | return -ENODEV; | 484 | return -ENODEV; |
440 | } | 485 | } |
441 | #endif | 486 | #endif |
442 | 487 | ||
488 | #ifdef CONFIG_ACPI_SLEEP | ||
489 | u32 acpi_target_system_state(void); | ||
490 | #else | ||
491 | static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; } | ||
492 | #endif | ||
493 | |||
494 | static inline bool acpi_device_power_manageable(struct acpi_device *adev) | ||
495 | { | ||
496 | return adev->flags.power_manageable; | ||
497 | } | ||
498 | |||
499 | static inline bool acpi_device_can_wakeup(struct acpi_device *adev) | ||
500 | { | ||
501 | return adev->wakeup.flags.valid; | ||
502 | } | ||
503 | |||
504 | static inline bool acpi_device_can_poweroff(struct acpi_device *adev) | ||
505 | { | ||
506 | return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible; | ||
507 | } | ||
508 | |||
443 | #else /* CONFIG_ACPI */ | 509 | #else /* CONFIG_ACPI */ |
444 | 510 | ||
445 | static inline int register_acpi_bus_type(void *bus) { return 0; } | 511 | static inline int register_acpi_bus_type(void *bus) { return 0; } |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 1222ba93d80a..43152742b46f 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -1,7 +1,6 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These | 3 | * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These |
5 | * interfaces must be implemented by OSL to interface the | 4 | * interfaces must be implemented by OSL to interface the |
6 | * ACPI components to the host operating system. | 5 | * ACPI components to the host operating system. |
7 | * | 6 | * |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 8b891dbead66..3d88395d4d6f 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acpixf.h - External interfaces to the ACPI subsystem | 3 | * Name: acpixf.h - External interfaces to the ACPI subsystem |
@@ -47,7 +46,7 @@ | |||
47 | 46 | ||
48 | /* Current ACPICA subsystem version in YYYYMMDD format */ | 47 | /* Current ACPICA subsystem version in YYYYMMDD format */ |
49 | 48 | ||
50 | #define ACPI_CA_VERSION 0x20120913 | 49 | #define ACPI_CA_VERSION 0x20121018 |
51 | 50 | ||
52 | #include <acpi/acconfig.h> | 51 | #include <acpi/acconfig.h> |
53 | #include <acpi/actypes.h> | 52 | #include <acpi/actypes.h> |
@@ -178,8 +177,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id); | |||
178 | 177 | ||
179 | acpi_status | 178 | acpi_status |
180 | acpi_get_table_header(acpi_string signature, | 179 | acpi_get_table_header(acpi_string signature, |
181 | u32 instance, | 180 | u32 instance, struct acpi_table_header *out_table_header); |
182 | struct acpi_table_header *out_table_header); | ||
183 | 181 | ||
184 | acpi_status | 182 | acpi_status |
185 | acpi_get_table_with_size(acpi_string signature, | 183 | acpi_get_table_with_size(acpi_string signature, |
@@ -190,8 +188,7 @@ acpi_get_table(acpi_string signature, | |||
190 | u32 instance, struct acpi_table_header **out_table); | 188 | u32 instance, struct acpi_table_header **out_table); |
191 | 189 | ||
192 | acpi_status | 190 | acpi_status |
193 | acpi_get_table_by_index(u32 table_index, | 191 | acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table); |
194 | struct acpi_table_header **out_table); | ||
195 | 192 | ||
196 | acpi_status | 193 | acpi_status |
197 | acpi_install_table_handler(acpi_tbl_handler handler, void *context); | 194 | acpi_install_table_handler(acpi_tbl_handler handler, void *context); |
@@ -274,7 +271,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function); | |||
274 | 271 | ||
275 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | 272 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
276 | acpi_install_global_event_handler | 273 | acpi_install_global_event_handler |
277 | (ACPI_GBL_EVENT_HANDLER handler, void *context)) | 274 | (acpi_gbl_event_handler handler, void *context)) |
278 | 275 | ||
279 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | 276 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
280 | acpi_install_fixed_event_handler(u32 | 277 | acpi_install_fixed_event_handler(u32 |
@@ -300,10 +297,9 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | |||
300 | u32 gpe_number, | 297 | u32 gpe_number, |
301 | acpi_gpe_handler | 298 | acpi_gpe_handler |
302 | address)) | 299 | address)) |
303 | acpi_status | 300 | acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type, |
304 | acpi_install_notify_handler(acpi_handle device, | 301 | acpi_notify_handler handler, |
305 | u32 handler_type, | 302 | void *context); |
306 | acpi_notify_handler handler, void *context); | ||
307 | 303 | ||
308 | acpi_status | 304 | acpi_status |
309 | acpi_remove_notify_handler(acpi_handle device, | 305 | acpi_remove_notify_handler(acpi_handle device, |
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 8c61b5fe42a4..6585141e4b97 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h | |||
@@ -277,10 +277,10 @@ struct acpi_table_gtdt { | |||
277 | ******************************************************************************/ | 277 | ******************************************************************************/ |
278 | 278 | ||
279 | #define ACPI_MPST_CHANNEL_INFO \ | 279 | #define ACPI_MPST_CHANNEL_INFO \ |
280 | u16 reserved1; \ | ||
281 | u8 channel_id; \ | 280 | u8 channel_id; \ |
282 | u8 reserved2; \ | 281 | u8 reserved1[3]; \ |
283 | u16 power_node_count; | 282 | u16 power_node_count; \ |
283 | u16 reserved2; | ||
284 | 284 | ||
285 | /* Main table */ | 285 | /* Main table */ |
286 | 286 | ||
@@ -304,9 +304,8 @@ struct acpi_mpst_power_node { | |||
304 | u32 length; | 304 | u32 length; |
305 | u64 range_address; | 305 | u64 range_address; |
306 | u64 range_length; | 306 | u64 range_length; |
307 | u8 num_power_states; | 307 | u32 num_power_states; |
308 | u8 num_physical_components; | 308 | u32 num_physical_components; |
309 | u16 reserved2; | ||
310 | }; | 309 | }; |
311 | 310 | ||
312 | /* Values for Flags field above */ | 311 | /* Values for Flags field above */ |
@@ -332,10 +331,11 @@ struct acpi_mpst_component { | |||
332 | 331 | ||
333 | struct acpi_mpst_data_hdr { | 332 | struct acpi_mpst_data_hdr { |
334 | u16 characteristics_count; | 333 | u16 characteristics_count; |
334 | u16 reserved; | ||
335 | }; | 335 | }; |
336 | 336 | ||
337 | struct acpi_mpst_power_data { | 337 | struct acpi_mpst_power_data { |
338 | u8 revision; | 338 | u8 structure_id; |
339 | u8 flags; | 339 | u8 flags; |
340 | u16 reserved1; | 340 | u16 reserved1; |
341 | u32 average_power; | 341 | u32 average_power; |
@@ -356,10 +356,10 @@ struct acpi_mpst_shared { | |||
356 | u32 signature; | 356 | u32 signature; |
357 | u16 pcc_command; | 357 | u16 pcc_command; |
358 | u16 pcc_status; | 358 | u16 pcc_status; |
359 | u16 command_register; | 359 | u32 command_register; |
360 | u16 status_register; | 360 | u32 status_register; |
361 | u16 power_state_id; | 361 | u32 power_state_id; |
362 | u16 power_node_id; | 362 | u32 power_node_id; |
363 | u64 energy_consumed; | 363 | u64 energy_consumed; |
364 | u64 average_power; | 364 | u64 average_power; |
365 | }; | 365 | }; |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a85bae968262..4f43f1fba132 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
@@ -453,10 +453,14 @@ typedef u64 acpi_integer; | |||
453 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) | 453 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) |
454 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) | 454 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) |
455 | 455 | ||
456 | /* Optimizations for 4-character (32-bit) acpi_name manipulation */ | ||
457 | |||
456 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | 458 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED |
457 | #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) | 459 | #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) |
460 | #define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) | ||
458 | #else | 461 | #else |
459 | #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) | 462 | #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) |
463 | #define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) | ||
460 | #endif | 464 | #endif |
461 | 465 | ||
462 | /******************************************************************************* | 466 | /******************************************************************************* |
@@ -796,11 +800,11 @@ typedef u8 acpi_adr_space_type; | |||
796 | 800 | ||
797 | /* Sleep function dispatch */ | 801 | /* Sleep function dispatch */ |
798 | 802 | ||
799 | typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state); | 803 | typedef acpi_status(*acpi_sleep_function) (u8 sleep_state); |
800 | 804 | ||
801 | struct acpi_sleep_functions { | 805 | struct acpi_sleep_functions { |
802 | ACPI_SLEEP_FUNCTION legacy_function; | 806 | acpi_sleep_function legacy_function; |
803 | ACPI_SLEEP_FUNCTION extended_function; | 807 | acpi_sleep_function extended_function; |
804 | }; | 808 | }; |
805 | 809 | ||
806 | /* | 810 | /* |
@@ -922,7 +926,8 @@ struct acpi_system_info { | |||
922 | /* | 926 | /* |
923 | * Types specific to the OS service interfaces | 927 | * Types specific to the OS service interfaces |
924 | */ | 928 | */ |
925 | typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); | 929 | typedef u32 |
930 | (ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); | ||
926 | 931 | ||
927 | typedef void | 932 | typedef void |
928 | (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); | 933 | (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); |
@@ -931,14 +936,15 @@ typedef void | |||
931 | * Various handlers and callback procedures | 936 | * Various handlers and callback procedures |
932 | */ | 937 | */ |
933 | typedef | 938 | typedef |
934 | void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, | 939 | void (*acpi_gbl_event_handler) (u32 event_type, |
935 | acpi_handle device, | 940 | acpi_handle device, |
936 | u32 event_number, void *context); | 941 | u32 event_number, void *context); |
937 | 942 | ||
938 | #define ACPI_EVENT_TYPE_GPE 0 | 943 | #define ACPI_EVENT_TYPE_GPE 0 |
939 | #define ACPI_EVENT_TYPE_FIXED 1 | 944 | #define ACPI_EVENT_TYPE_FIXED 1 |
940 | 945 | ||
941 | typedef u32(*acpi_event_handler) (void *context); | 946 | typedef |
947 | u32(*acpi_event_handler) (void *context); | ||
942 | 948 | ||
943 | typedef | 949 | typedef |
944 | u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); | 950 | u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); |
@@ -1018,17 +1024,17 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); | |||
1018 | 1024 | ||
1019 | #define ACPI_UUID_LENGTH 16 | 1025 | #define ACPI_UUID_LENGTH 16 |
1020 | 1026 | ||
1021 | /* Structures used for device/processor HID, UID, CID */ | 1027 | /* Structures used for device/processor HID, UID, CID, and SUB */ |
1022 | 1028 | ||
1023 | struct acpica_device_id { | 1029 | struct acpi_pnp_device_id { |
1024 | u32 length; /* Length of string + null */ | 1030 | u32 length; /* Length of string + null */ |
1025 | char *string; | 1031 | char *string; |
1026 | }; | 1032 | }; |
1027 | 1033 | ||
1028 | struct acpica_device_id_list { | 1034 | struct acpi_pnp_device_id_list { |
1029 | u32 count; /* Number of IDs in Ids array */ | 1035 | u32 count; /* Number of IDs in Ids array */ |
1030 | u32 list_size; /* Size of list, including ID strings */ | 1036 | u32 list_size; /* Size of list, including ID strings */ |
1031 | struct acpica_device_id ids[1]; /* ID array */ | 1037 | struct acpi_pnp_device_id ids[1]; /* ID array */ |
1032 | }; | 1038 | }; |
1033 | 1039 | ||
1034 | /* | 1040 | /* |
@@ -1046,9 +1052,10 @@ struct acpi_device_info { | |||
1046 | u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ | 1052 | u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ |
1047 | u32 current_status; /* _STA value */ | 1053 | u32 current_status; /* _STA value */ |
1048 | u64 address; /* _ADR value */ | 1054 | u64 address; /* _ADR value */ |
1049 | struct acpica_device_id hardware_id; /* _HID value */ | 1055 | struct acpi_pnp_device_id hardware_id; /* _HID value */ |
1050 | struct acpica_device_id unique_id; /* _UID value */ | 1056 | struct acpi_pnp_device_id unique_id; /* _UID value */ |
1051 | struct acpica_device_id_list compatible_id_list; /* _CID list <must be last> */ | 1057 | struct acpi_pnp_device_id subsystem_id; /* _SUB value */ |
1058 | struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */ | ||
1052 | }; | 1059 | }; |
1053 | 1060 | ||
1054 | /* Values for Flags field above (acpi_get_object_info) */ | 1061 | /* Values for Flags field above (acpi_get_object_info) */ |
@@ -1061,11 +1068,12 @@ struct acpi_device_info { | |||
1061 | #define ACPI_VALID_ADR 0x02 | 1068 | #define ACPI_VALID_ADR 0x02 |
1062 | #define ACPI_VALID_HID 0x04 | 1069 | #define ACPI_VALID_HID 0x04 |
1063 | #define ACPI_VALID_UID 0x08 | 1070 | #define ACPI_VALID_UID 0x08 |
1064 | #define ACPI_VALID_CID 0x10 | 1071 | #define ACPI_VALID_SUB 0x10 |
1065 | #define ACPI_VALID_SXDS 0x20 | 1072 | #define ACPI_VALID_CID 0x20 |
1066 | #define ACPI_VALID_SXWS 0x40 | 1073 | #define ACPI_VALID_SXDS 0x40 |
1074 | #define ACPI_VALID_SXWS 0x80 | ||
1067 | 1075 | ||
1068 | /* Flags for _STA method */ | 1076 | /* Flags for _STA return value (current_status above) */ |
1069 | 1077 | ||
1070 | #define ACPI_STA_DEVICE_PRESENT 0x01 | 1078 | #define ACPI_STA_DEVICE_PRESENT 0x01 |
1071 | #define ACPI_STA_DEVICE_ENABLED 0x02 | 1079 | #define ACPI_STA_DEVICE_ENABLED 0x02 |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 90be98981102..c33fa3ce9b7c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -25,7 +25,9 @@ | |||
25 | #ifndef _LINUX_ACPI_H | 25 | #ifndef _LINUX_ACPI_H |
26 | #define _LINUX_ACPI_H | 26 | #define _LINUX_ACPI_H |
27 | 27 | ||
28 | #include <linux/errno.h> | ||
28 | #include <linux/ioport.h> /* for struct resource */ | 29 | #include <linux/ioport.h> /* for struct resource */ |
30 | #include <linux/device.h> | ||
29 | 31 | ||
30 | #ifdef CONFIG_ACPI | 32 | #ifdef CONFIG_ACPI |
31 | 33 | ||
@@ -250,6 +252,26 @@ extern int pnpacpi_disabled; | |||
250 | 252 | ||
251 | #define PXM_INVAL (-1) | 253 | #define PXM_INVAL (-1) |
252 | 254 | ||
255 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); | ||
256 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); | ||
257 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
258 | struct resource *res); | ||
259 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | ||
260 | struct resource *res); | ||
261 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); | ||
262 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | ||
263 | struct resource *res); | ||
264 | |||
265 | struct resource_list_entry { | ||
266 | struct list_head node; | ||
267 | struct resource res; | ||
268 | }; | ||
269 | |||
270 | void acpi_dev_free_resource_list(struct list_head *list); | ||
271 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | ||
272 | int (*preproc)(struct acpi_resource *, void *), | ||
273 | void *preproc_data); | ||
274 | |||
253 | int acpi_check_resource_conflict(const struct resource *res); | 275 | int acpi_check_resource_conflict(const struct resource *res); |
254 | 276 | ||
255 | int acpi_check_region(resource_size_t start, resource_size_t n, | 277 | int acpi_check_region(resource_size_t start, resource_size_t n, |
@@ -257,10 +279,14 @@ int acpi_check_region(resource_size_t start, resource_size_t n, | |||
257 | 279 | ||
258 | int acpi_resources_are_enforced(void); | 280 | int acpi_resources_are_enforced(void); |
259 | 281 | ||
260 | #ifdef CONFIG_PM_SLEEP | 282 | #ifdef CONFIG_HIBERNATION |
261 | void __init acpi_no_s4_hw_signature(void); | 283 | void __init acpi_no_s4_hw_signature(void); |
284 | #endif | ||
285 | |||
286 | #ifdef CONFIG_PM_SLEEP | ||
262 | void __init acpi_old_suspend_ordering(void); | 287 | void __init acpi_old_suspend_ordering(void); |
263 | void __init acpi_nvs_nosave(void); | 288 | void __init acpi_nvs_nosave(void); |
289 | void __init acpi_nvs_nosave_s3(void); | ||
264 | #endif /* CONFIG_PM_SLEEP */ | 290 | #endif /* CONFIG_PM_SLEEP */ |
265 | 291 | ||
266 | struct acpi_osc_context { | 292 | struct acpi_osc_context { |
@@ -364,6 +390,17 @@ extern int acpi_nvs_register(__u64 start, __u64 size); | |||
364 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | 390 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
365 | void *data); | 391 | void *data); |
366 | 392 | ||
393 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | ||
394 | const struct device *dev); | ||
395 | |||
396 | static inline bool acpi_driver_match_device(struct device *dev, | ||
397 | const struct device_driver *drv) | ||
398 | { | ||
399 | return !!acpi_match_device(drv->acpi_match_table, dev); | ||
400 | } | ||
401 | |||
402 | #define ACPI_PTR(_ptr) (_ptr) | ||
403 | |||
367 | #else /* !CONFIG_ACPI */ | 404 | #else /* !CONFIG_ACPI */ |
368 | 405 | ||
369 | #define acpi_disabled 1 | 406 | #define acpi_disabled 1 |
@@ -418,6 +455,22 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | |||
418 | return 0; | 455 | return 0; |
419 | } | 456 | } |
420 | 457 | ||
458 | struct acpi_device_id; | ||
459 | |||
460 | static inline const struct acpi_device_id *acpi_match_device( | ||
461 | const struct acpi_device_id *ids, const struct device *dev) | ||
462 | { | ||
463 | return NULL; | ||
464 | } | ||
465 | |||
466 | static inline bool acpi_driver_match_device(struct device *dev, | ||
467 | const struct device_driver *drv) | ||
468 | { | ||
469 | return false; | ||
470 | } | ||
471 | |||
472 | #define ACPI_PTR(_ptr) (NULL) | ||
473 | |||
421 | #endif /* !CONFIG_ACPI */ | 474 | #endif /* !CONFIG_ACPI */ |
422 | 475 | ||
423 | #ifdef CONFIG_ACPI | 476 | #ifdef CONFIG_ACPI |
@@ -430,4 +483,84 @@ acpi_status acpi_os_prepare_sleep(u8 sleep_state, | |||
430 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) | 483 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
431 | #endif | 484 | #endif |
432 | 485 | ||
486 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) | ||
487 | int acpi_dev_runtime_suspend(struct device *dev); | ||
488 | int acpi_dev_runtime_resume(struct device *dev); | ||
489 | int acpi_subsys_runtime_suspend(struct device *dev); | ||
490 | int acpi_subsys_runtime_resume(struct device *dev); | ||
491 | #else | ||
492 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } | ||
493 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } | ||
494 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } | ||
495 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } | ||
496 | #endif | ||
497 | |||
498 | #ifdef CONFIG_ACPI_SLEEP | ||
499 | int acpi_dev_suspend_late(struct device *dev); | ||
500 | int acpi_dev_resume_early(struct device *dev); | ||
501 | int acpi_subsys_prepare(struct device *dev); | ||
502 | int acpi_subsys_suspend_late(struct device *dev); | ||
503 | int acpi_subsys_resume_early(struct device *dev); | ||
504 | #else | ||
505 | static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } | ||
506 | static inline int acpi_dev_resume_early(struct device *dev) { return 0; } | ||
507 | static inline int acpi_subsys_prepare(struct device *dev) { return 0; } | ||
508 | static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } | ||
509 | static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } | ||
510 | #endif | ||
511 | |||
512 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | ||
513 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
514 | void acpi_dev_pm_detach(struct device *dev, bool power_off); | ||
515 | #else | ||
516 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
517 | { | ||
518 | return -ENODEV; | ||
519 | } | ||
520 | static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} | ||
521 | #endif | ||
522 | |||
523 | #ifdef CONFIG_ACPI | ||
524 | __printf(3, 4) | ||
525 | void acpi_handle_printk(const char *level, acpi_handle handle, | ||
526 | const char *fmt, ...); | ||
527 | #else /* !CONFIG_ACPI */ | ||
528 | static inline __printf(3, 4) void | ||
529 | acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} | ||
530 | #endif /* !CONFIG_ACPI */ | ||
531 | |||
532 | /* | ||
533 | * acpi_handle_<level>: Print message with ACPI prefix and object path | ||
534 | * | ||
535 | * These interfaces acquire the global namespace mutex to obtain an object | ||
536 | * path. In interrupt context, it shows the object path as <n/a>. | ||
537 | */ | ||
538 | #define acpi_handle_emerg(handle, fmt, ...) \ | ||
539 | acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) | ||
540 | #define acpi_handle_alert(handle, fmt, ...) \ | ||
541 | acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) | ||
542 | #define acpi_handle_crit(handle, fmt, ...) \ | ||
543 | acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) | ||
544 | #define acpi_handle_err(handle, fmt, ...) \ | ||
545 | acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) | ||
546 | #define acpi_handle_warn(handle, fmt, ...) \ | ||
547 | acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) | ||
548 | #define acpi_handle_notice(handle, fmt, ...) \ | ||
549 | acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) | ||
550 | #define acpi_handle_info(handle, fmt, ...) \ | ||
551 | acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) | ||
552 | |||
553 | /* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ | ||
554 | #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | ||
555 | #define acpi_handle_debug(handle, fmt, ...) \ | ||
556 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) | ||
557 | #else | ||
558 | #define acpi_handle_debug(handle, fmt, ...) \ | ||
559 | ({ \ | ||
560 | if (0) \ | ||
561 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ | ||
562 | 0; \ | ||
563 | }) | ||
564 | #endif | ||
565 | |||
433 | #endif /*_LINUX_ACPI_H*/ | 566 | #endif /*_LINUX_ACPI_H*/ |
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h new file mode 100644 index 000000000000..91615a389b65 --- /dev/null +++ b/include/linux/acpi_gpio.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _LINUX_ACPI_GPIO_H_ | ||
2 | #define _LINUX_ACPI_GPIO_H_ | ||
3 | |||
4 | #include <linux/errno.h> | ||
5 | |||
6 | #ifdef CONFIG_GPIO_ACPI | ||
7 | |||
8 | int acpi_get_gpio(char *path, int pin); | ||
9 | |||
10 | #else /* CONFIG_GPIO_ACPI */ | ||
11 | |||
12 | static inline int acpi_get_gpio(char *path, int pin) | ||
13 | { | ||
14 | return -ENODEV; | ||
15 | } | ||
16 | |||
17 | #endif /* CONFIG_GPIO_ACPI */ | ||
18 | |||
19 | #endif /* _LINUX_ACPI_GPIO_H_ */ | ||
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b60f6ba01d0c..a55b88eaf96a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef _LINUX_CPUFREQ_H | 11 | #ifndef _LINUX_CPUFREQ_H |
12 | #define _LINUX_CPUFREQ_H | 12 | #define _LINUX_CPUFREQ_H |
13 | 13 | ||
14 | #include <asm/cputime.h> | ||
14 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
15 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
16 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
@@ -22,6 +23,8 @@ | |||
22 | #include <asm/div64.h> | 23 | #include <asm/div64.h> |
23 | 24 | ||
24 | #define CPUFREQ_NAME_LEN 16 | 25 | #define CPUFREQ_NAME_LEN 16 |
26 | /* Print length for names. Extra 1 space for accomodating '\n' in prints */ | ||
27 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) | ||
25 | 28 | ||
26 | 29 | ||
27 | /********************************************************************* | 30 | /********************************************************************* |
@@ -404,6 +407,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | |||
404 | unsigned int cpu); | 407 | unsigned int cpu); |
405 | 408 | ||
406 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 409 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
407 | |||
408 | |||
409 | #endif /* _LINUX_CPUFREQ_H */ | 410 | #endif /* _LINUX_CPUFREQ_H */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 279b1eaa8b73..3711b34dc4f9 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -82,13 +82,6 @@ cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) | |||
82 | st_usage->driver_data = data; | 82 | st_usage->driver_data = data; |
83 | } | 83 | } |
84 | 84 | ||
85 | struct cpuidle_state_kobj { | ||
86 | struct cpuidle_state *state; | ||
87 | struct cpuidle_state_usage *state_usage; | ||
88 | struct completion kobj_unregister; | ||
89 | struct kobject kobj; | ||
90 | }; | ||
91 | |||
92 | struct cpuidle_device { | 85 | struct cpuidle_device { |
93 | unsigned int registered:1; | 86 | unsigned int registered:1; |
94 | unsigned int enabled:1; | 87 | unsigned int enabled:1; |
@@ -98,7 +91,7 @@ struct cpuidle_device { | |||
98 | int state_count; | 91 | int state_count; |
99 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; | 92 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
100 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | 93 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
101 | 94 | struct cpuidle_driver_kobj *kobj_driver; | |
102 | struct list_head device_list; | 95 | struct list_head device_list; |
103 | struct kobject kobj; | 96 | struct kobject kobj; |
104 | struct completion kobj_unregister; | 97 | struct completion kobj_unregister; |
@@ -131,6 +124,7 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | |||
131 | struct cpuidle_driver { | 124 | struct cpuidle_driver { |
132 | const char *name; | 125 | const char *name; |
133 | struct module *owner; | 126 | struct module *owner; |
127 | int refcnt; | ||
134 | 128 | ||
135 | unsigned int power_specified:1; | 129 | unsigned int power_specified:1; |
136 | /* set to 1 to use the core cpuidle time keeping (for all states). */ | 130 | /* set to 1 to use the core cpuidle time keeping (for all states). */ |
@@ -163,6 +157,10 @@ extern int cpuidle_wrap_enter(struct cpuidle_device *dev, | |||
163 | struct cpuidle_driver *drv, int index)); | 157 | struct cpuidle_driver *drv, int index)); |
164 | extern int cpuidle_play_dead(void); | 158 | extern int cpuidle_play_dead(void); |
165 | 159 | ||
160 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | ||
161 | extern int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
162 | extern void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
163 | |||
166 | #else | 164 | #else |
167 | static inline void disable_cpuidle(void) { } | 165 | static inline void disable_cpuidle(void) { } |
168 | static inline int cpuidle_idle_call(void) { return -ENODEV; } | 166 | static inline int cpuidle_idle_call(void) { return -ENODEV; } |
@@ -189,7 +187,6 @@ static inline int cpuidle_wrap_enter(struct cpuidle_device *dev, | |||
189 | struct cpuidle_driver *drv, int index)) | 187 | struct cpuidle_driver *drv, int index)) |
190 | { return -ENODEV; } | 188 | { return -ENODEV; } |
191 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 189 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
192 | |||
193 | #endif | 190 | #endif |
194 | 191 | ||
195 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 192 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 281c72a3b9d5..e83ef39b3bea 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -25,12 +25,12 @@ struct devfreq; | |||
25 | * struct devfreq_dev_status - Data given from devfreq user device to | 25 | * struct devfreq_dev_status - Data given from devfreq user device to |
26 | * governors. Represents the performance | 26 | * governors. Represents the performance |
27 | * statistics. | 27 | * statistics. |
28 | * @total_time The total time represented by this instance of | 28 | * @total_time: The total time represented by this instance of |
29 | * devfreq_dev_status | 29 | * devfreq_dev_status |
30 | * @busy_time The time that the device was working among the | 30 | * @busy_time: The time that the device was working among the |
31 | * total_time. | 31 | * total_time. |
32 | * @current_frequency The operating frequency. | 32 | * @current_frequency: The operating frequency. |
33 | * @private_data An entry not specified by the devfreq framework. | 33 | * @private_data: An entry not specified by the devfreq framework. |
34 | * A device and a specific governor may have their | 34 | * A device and a specific governor may have their |
35 | * own protocol with private_data. However, because | 35 | * own protocol with private_data. However, because |
36 | * this is governor-specific, a governor using this | 36 | * this is governor-specific, a governor using this |
@@ -54,23 +54,27 @@ struct devfreq_dev_status { | |||
54 | 54 | ||
55 | /** | 55 | /** |
56 | * struct devfreq_dev_profile - Devfreq's user device profile | 56 | * struct devfreq_dev_profile - Devfreq's user device profile |
57 | * @initial_freq The operating frequency when devfreq_add_device() is | 57 | * @initial_freq: The operating frequency when devfreq_add_device() is |
58 | * called. | 58 | * called. |
59 | * @polling_ms The polling interval in ms. 0 disables polling. | 59 | * @polling_ms: The polling interval in ms. 0 disables polling. |
60 | * @target The device should set its operating frequency at | 60 | * @target: The device should set its operating frequency at |
61 | * freq or lowest-upper-than-freq value. If freq is | 61 | * freq or lowest-upper-than-freq value. If freq is |
62 | * higher than any operable frequency, set maximum. | 62 | * higher than any operable frequency, set maximum. |
63 | * Before returning, target function should set | 63 | * Before returning, target function should set |
64 | * freq at the current frequency. | 64 | * freq at the current frequency. |
65 | * The "flags" parameter's possible values are | 65 | * The "flags" parameter's possible values are |
66 | * explained above with "DEVFREQ_FLAG_*" macros. | 66 | * explained above with "DEVFREQ_FLAG_*" macros. |
67 | * @get_dev_status The device should provide the current performance | 67 | * @get_dev_status: The device should provide the current performance |
68 | * status to devfreq, which is used by governors. | 68 | * status to devfreq, which is used by governors. |
69 | * @exit An optional callback that is called when devfreq | 69 | * @get_cur_freq: The device should provide the current frequency |
70 | * at which it is operating. | ||
71 | * @exit: An optional callback that is called when devfreq | ||
70 | * is removing the devfreq object due to error or | 72 | * is removing the devfreq object due to error or |
71 | * from devfreq_remove_device() call. If the user | 73 | * from devfreq_remove_device() call. If the user |
72 | * has registered devfreq->nb at a notifier-head, | 74 | * has registered devfreq->nb at a notifier-head, |
73 | * this is the time to unregister it. | 75 | * this is the time to unregister it. |
76 | * @freq_table: Optional list of frequencies to support statistics. | ||
77 | * @max_state: The size of freq_table. | ||
74 | */ | 78 | */ |
75 | struct devfreq_dev_profile { | 79 | struct devfreq_dev_profile { |
76 | unsigned long initial_freq; | 80 | unsigned long initial_freq; |
@@ -79,63 +83,63 @@ struct devfreq_dev_profile { | |||
79 | int (*target)(struct device *dev, unsigned long *freq, u32 flags); | 83 | int (*target)(struct device *dev, unsigned long *freq, u32 flags); |
80 | int (*get_dev_status)(struct device *dev, | 84 | int (*get_dev_status)(struct device *dev, |
81 | struct devfreq_dev_status *stat); | 85 | struct devfreq_dev_status *stat); |
86 | int (*get_cur_freq)(struct device *dev, unsigned long *freq); | ||
82 | void (*exit)(struct device *dev); | 87 | void (*exit)(struct device *dev); |
88 | |||
89 | unsigned int *freq_table; | ||
90 | unsigned int max_state; | ||
83 | }; | 91 | }; |
84 | 92 | ||
85 | /** | 93 | /** |
86 | * struct devfreq_governor - Devfreq policy governor | 94 | * struct devfreq_governor - Devfreq policy governor |
87 | * @name Governor's name | 95 | * @node: list node - contains registered devfreq governors |
88 | * @get_target_freq Returns desired operating frequency for the device. | 96 | * @name: Governor's name |
97 | * @get_target_freq: Returns desired operating frequency for the device. | ||
89 | * Basically, get_target_freq will run | 98 | * Basically, get_target_freq will run |
90 | * devfreq_dev_profile.get_dev_status() to get the | 99 | * devfreq_dev_profile.get_dev_status() to get the |
91 | * status of the device (load = busy_time / total_time). | 100 | * status of the device (load = busy_time / total_time). |
92 | * If no_central_polling is set, this callback is called | 101 | * If no_central_polling is set, this callback is called |
93 | * only with update_devfreq() notified by OPP. | 102 | * only with update_devfreq() notified by OPP. |
94 | * @init Called when the devfreq is being attached to a device | 103 | * @event_handler: Callback for devfreq core framework to notify events |
95 | * @exit Called when the devfreq is being removed from a | 104 | * to governors. Events include per device governor |
96 | * device. Governor should stop any internal routines | 105 | * init and exit, opp changes out of devfreq, suspend |
97 | * before return because related data may be | 106 | * and resume of per device devfreq during device idle. |
98 | * freed after exit(). | ||
99 | * @no_central_polling Do not use devfreq's central polling mechanism. | ||
100 | * When this is set, devfreq will not call | ||
101 | * get_target_freq with devfreq_monitor(). However, | ||
102 | * devfreq will call get_target_freq with | ||
103 | * devfreq_update() notified by OPP framework. | ||
104 | * | 107 | * |
105 | * Note that the callbacks are called with devfreq->lock locked by devfreq. | 108 | * Note that the callbacks are called with devfreq->lock locked by devfreq. |
106 | */ | 109 | */ |
107 | struct devfreq_governor { | 110 | struct devfreq_governor { |
111 | struct list_head node; | ||
112 | |||
108 | const char name[DEVFREQ_NAME_LEN]; | 113 | const char name[DEVFREQ_NAME_LEN]; |
109 | int (*get_target_freq)(struct devfreq *this, unsigned long *freq); | 114 | int (*get_target_freq)(struct devfreq *this, unsigned long *freq); |
110 | int (*init)(struct devfreq *this); | 115 | int (*event_handler)(struct devfreq *devfreq, |
111 | void (*exit)(struct devfreq *this); | 116 | unsigned int event, void *data); |
112 | const bool no_central_polling; | ||
113 | }; | 117 | }; |
114 | 118 | ||
115 | /** | 119 | /** |
116 | * struct devfreq - Device devfreq structure | 120 | * struct devfreq - Device devfreq structure |
117 | * @node list node - contains the devices with devfreq that have been | 121 | * @node: list node - contains the devices with devfreq that have been |
118 | * registered. | 122 | * registered. |
119 | * @lock a mutex to protect accessing devfreq. | 123 | * @lock: a mutex to protect accessing devfreq. |
120 | * @dev device registered by devfreq class. dev.parent is the device | 124 | * @dev: device registered by devfreq class. dev.parent is the device |
121 | * using devfreq. | 125 | * using devfreq. |
122 | * @profile device-specific devfreq profile | 126 | * @profile: device-specific devfreq profile |
123 | * @governor method how to choose frequency based on the usage. | 127 | * @governor: method how to choose frequency based on the usage. |
124 | * @nb notifier block used to notify devfreq object that it should | 128 | * @governor_name: devfreq governor name for use with this devfreq |
129 | * @nb: notifier block used to notify devfreq object that it should | ||
125 | * reevaluate operable frequencies. Devfreq users may use | 130 | * reevaluate operable frequencies. Devfreq users may use |
126 | * devfreq.nb to the corresponding register notifier call chain. | 131 | * devfreq.nb to the corresponding register notifier call chain. |
127 | * @polling_jiffies interval in jiffies. | 132 | * @work: delayed work for load monitoring. |
128 | * @previous_freq previously configured frequency value. | 133 | * @previous_freq: previously configured frequency value. |
129 | * @next_polling the number of remaining jiffies to poll with | 134 | * @data: Private data of the governor. The devfreq framework does not |
130 | * "devfreq_monitor" executions to reevaluate | ||
131 | * frequency/voltage of the device. Set by | ||
132 | * profile's polling_ms interval. | ||
133 | * @data Private data of the governor. The devfreq framework does not | ||
134 | * touch this. | 135 | * touch this. |
135 | * @being_removed a flag to mark that this object is being removed in | 136 | * @min_freq: Limit minimum frequency requested by user (0: none) |
136 | * order to prevent trying to remove the object multiple times. | 137 | * @max_freq: Limit maximum frequency requested by user (0: none) |
137 | * @min_freq Limit minimum frequency requested by user (0: none) | 138 | * @stop_polling: devfreq polling status of a device. |
138 | * @max_freq Limit maximum frequency requested by user (0: none) | 139 | * @total_trans: Number of devfreq transitions |
140 | * @trans_table: Statistics of devfreq transitions | ||
141 | * @time_in_state: Statistics of devfreq states | ||
142 | * @last_stat_updated: The last time stat updated | ||
139 | * | 143 | * |
140 | * This structure stores the devfreq information for a give device. | 144 | * This structure stores the devfreq information for a give device. |
141 | * | 145 | * |
@@ -152,26 +156,33 @@ struct devfreq { | |||
152 | struct device dev; | 156 | struct device dev; |
153 | struct devfreq_dev_profile *profile; | 157 | struct devfreq_dev_profile *profile; |
154 | const struct devfreq_governor *governor; | 158 | const struct devfreq_governor *governor; |
159 | char governor_name[DEVFREQ_NAME_LEN]; | ||
155 | struct notifier_block nb; | 160 | struct notifier_block nb; |
161 | struct delayed_work work; | ||
156 | 162 | ||
157 | unsigned long polling_jiffies; | ||
158 | unsigned long previous_freq; | 163 | unsigned long previous_freq; |
159 | unsigned int next_polling; | ||
160 | 164 | ||
161 | void *data; /* private data for governors */ | 165 | void *data; /* private data for governors */ |
162 | 166 | ||
163 | bool being_removed; | ||
164 | |||
165 | unsigned long min_freq; | 167 | unsigned long min_freq; |
166 | unsigned long max_freq; | 168 | unsigned long max_freq; |
169 | bool stop_polling; | ||
170 | |||
171 | /* information for device freqeuncy transition */ | ||
172 | unsigned int total_trans; | ||
173 | unsigned int *trans_table; | ||
174 | unsigned long *time_in_state; | ||
175 | unsigned long last_stat_updated; | ||
167 | }; | 176 | }; |
168 | 177 | ||
169 | #if defined(CONFIG_PM_DEVFREQ) | 178 | #if defined(CONFIG_PM_DEVFREQ) |
170 | extern struct devfreq *devfreq_add_device(struct device *dev, | 179 | extern struct devfreq *devfreq_add_device(struct device *dev, |
171 | struct devfreq_dev_profile *profile, | 180 | struct devfreq_dev_profile *profile, |
172 | const struct devfreq_governor *governor, | 181 | const char *governor_name, |
173 | void *data); | 182 | void *data); |
174 | extern int devfreq_remove_device(struct devfreq *devfreq); | 183 | extern int devfreq_remove_device(struct devfreq *devfreq); |
184 | extern int devfreq_suspend_device(struct devfreq *devfreq); | ||
185 | extern int devfreq_resume_device(struct devfreq *devfreq); | ||
175 | 186 | ||
176 | /* Helper functions for devfreq user device driver with OPP. */ | 187 | /* Helper functions for devfreq user device driver with OPP. */ |
177 | extern struct opp *devfreq_recommended_opp(struct device *dev, | 188 | extern struct opp *devfreq_recommended_opp(struct device *dev, |
@@ -181,23 +192,13 @@ extern int devfreq_register_opp_notifier(struct device *dev, | |||
181 | extern int devfreq_unregister_opp_notifier(struct device *dev, | 192 | extern int devfreq_unregister_opp_notifier(struct device *dev, |
182 | struct devfreq *devfreq); | 193 | struct devfreq *devfreq); |
183 | 194 | ||
184 | #ifdef CONFIG_DEVFREQ_GOV_POWERSAVE | 195 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
185 | extern const struct devfreq_governor devfreq_powersave; | ||
186 | #endif | ||
187 | #ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE | ||
188 | extern const struct devfreq_governor devfreq_performance; | ||
189 | #endif | ||
190 | #ifdef CONFIG_DEVFREQ_GOV_USERSPACE | ||
191 | extern const struct devfreq_governor devfreq_userspace; | ||
192 | #endif | ||
193 | #ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND | ||
194 | extern const struct devfreq_governor devfreq_simple_ondemand; | ||
195 | /** | 196 | /** |
196 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq | 197 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq |
197 | * and devfreq_add_device | 198 | * and devfreq_add_device |
198 | * @ upthreshold If the load is over this value, the frequency jumps. | 199 | * @upthreshold: If the load is over this value, the frequency jumps. |
199 | * Specify 0 to use the default. Valid value = 0 to 100. | 200 | * Specify 0 to use the default. Valid value = 0 to 100. |
200 | * @ downdifferential If the load is under upthreshold - downdifferential, | 201 | * @downdifferential: If the load is under upthreshold - downdifferential, |
201 | * the governor may consider slowing the frequency down. | 202 | * the governor may consider slowing the frequency down. |
202 | * Specify 0 to use the default. Valid value = 0 to 100. | 203 | * Specify 0 to use the default. Valid value = 0 to 100. |
203 | * downdifferential < upthreshold must hold. | 204 | * downdifferential < upthreshold must hold. |
@@ -214,7 +215,7 @@ struct devfreq_simple_ondemand_data { | |||
214 | #else /* !CONFIG_PM_DEVFREQ */ | 215 | #else /* !CONFIG_PM_DEVFREQ */ |
215 | static struct devfreq *devfreq_add_device(struct device *dev, | 216 | static struct devfreq *devfreq_add_device(struct device *dev, |
216 | struct devfreq_dev_profile *profile, | 217 | struct devfreq_dev_profile *profile, |
217 | struct devfreq_governor *governor, | 218 | const char *governor_name, |
218 | void *data) | 219 | void *data) |
219 | { | 220 | { |
220 | return NULL; | 221 | return NULL; |
@@ -225,6 +226,16 @@ static int devfreq_remove_device(struct devfreq *devfreq) | |||
225 | return 0; | 226 | return 0; |
226 | } | 227 | } |
227 | 228 | ||
229 | static int devfreq_suspend_device(struct devfreq *devfreq) | ||
230 | { | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int devfreq_resume_device(struct devfreq *devfreq) | ||
235 | { | ||
236 | return 0; | ||
237 | } | ||
238 | |||
228 | static struct opp *devfreq_recommended_opp(struct device *dev, | 239 | static struct opp *devfreq_recommended_opp(struct device *dev, |
229 | unsigned long *freq, u32 flags) | 240 | unsigned long *freq, u32 flags) |
230 | { | 241 | { |
@@ -243,11 +254,6 @@ static int devfreq_unregister_opp_notifier(struct device *dev, | |||
243 | return -EINVAL; | 254 | return -EINVAL; |
244 | } | 255 | } |
245 | 256 | ||
246 | #define devfreq_powersave NULL | ||
247 | #define devfreq_performance NULL | ||
248 | #define devfreq_userspace NULL | ||
249 | #define devfreq_simple_ondemand NULL | ||
250 | |||
251 | #endif /* CONFIG_PM_DEVFREQ */ | 257 | #endif /* CONFIG_PM_DEVFREQ */ |
252 | 258 | ||
253 | #endif /* __LINUX_DEVFREQ_H__ */ | 259 | #endif /* __LINUX_DEVFREQ_H__ */ |
diff --git a/include/linux/device.h b/include/linux/device.h index 86ef6ab553b1..05292e488346 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -190,6 +190,7 @@ extern struct klist *bus_get_device_klist(struct bus_type *bus); | |||
190 | * @mod_name: Used for built-in modules. | 190 | * @mod_name: Used for built-in modules. |
191 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. | 191 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. |
192 | * @of_match_table: The open firmware table. | 192 | * @of_match_table: The open firmware table. |
193 | * @acpi_match_table: The ACPI match table. | ||
193 | * @probe: Called to query the existence of a specific device, | 194 | * @probe: Called to query the existence of a specific device, |
194 | * whether this driver can work with it, and bind the driver | 195 | * whether this driver can work with it, and bind the driver |
195 | * to a specific device. | 196 | * to a specific device. |
@@ -223,6 +224,7 @@ struct device_driver { | |||
223 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ | 224 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ |
224 | 225 | ||
225 | const struct of_device_id *of_match_table; | 226 | const struct of_device_id *of_match_table; |
227 | const struct acpi_device_id *acpi_match_table; | ||
226 | 228 | ||
227 | int (*probe) (struct device *dev); | 229 | int (*probe) (struct device *dev); |
228 | int (*remove) (struct device *dev); | 230 | int (*remove) (struct device *dev); |
@@ -576,6 +578,12 @@ struct device_dma_parameters { | |||
576 | unsigned long segment_boundary_mask; | 578 | unsigned long segment_boundary_mask; |
577 | }; | 579 | }; |
578 | 580 | ||
581 | struct acpi_dev_node { | ||
582 | #ifdef CONFIG_ACPI | ||
583 | void *handle; | ||
584 | #endif | ||
585 | }; | ||
586 | |||
579 | /** | 587 | /** |
580 | * struct device - The basic device structure | 588 | * struct device - The basic device structure |
581 | * @parent: The device's "parent" device, the device to which it is attached. | 589 | * @parent: The device's "parent" device, the device to which it is attached. |
@@ -616,6 +624,7 @@ struct device_dma_parameters { | |||
616 | * @dma_mem: Internal for coherent mem override. | 624 | * @dma_mem: Internal for coherent mem override. |
617 | * @archdata: For arch-specific additions. | 625 | * @archdata: For arch-specific additions. |
618 | * @of_node: Associated device tree node. | 626 | * @of_node: Associated device tree node. |
627 | * @acpi_node: Associated ACPI device node. | ||
619 | * @devt: For creating the sysfs "dev". | 628 | * @devt: For creating the sysfs "dev". |
620 | * @id: device instance | 629 | * @id: device instance |
621 | * @devres_lock: Spinlock to protect the resource of the device. | 630 | * @devres_lock: Spinlock to protect the resource of the device. |
@@ -680,6 +689,7 @@ struct device { | |||
680 | struct dev_archdata archdata; | 689 | struct dev_archdata archdata; |
681 | 690 | ||
682 | struct device_node *of_node; /* associated device tree node */ | 691 | struct device_node *of_node; /* associated device tree node */ |
692 | struct acpi_dev_node acpi_node; /* associated ACPI device node */ | ||
683 | 693 | ||
684 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 694 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
685 | u32 id; /* device instance */ | 695 | u32 id; /* device instance */ |
@@ -700,6 +710,14 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) | |||
700 | return container_of(kobj, struct device, kobj); | 710 | return container_of(kobj, struct device, kobj); |
701 | } | 711 | } |
702 | 712 | ||
713 | #ifdef CONFIG_ACPI | ||
714 | #define ACPI_HANDLE(dev) ((dev)->acpi_node.handle) | ||
715 | #define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_) | ||
716 | #else | ||
717 | #define ACPI_HANDLE(dev) (NULL) | ||
718 | #define ACPI_HANDLE_SET(dev, _handle_) do { } while (0) | ||
719 | #endif | ||
720 | |||
703 | /* Get the wakeup routines, which depend on struct device */ | 721 | /* Get the wakeup routines, which depend on struct device */ |
704 | #include <linux/pm_wakeup.h> | 722 | #include <linux/pm_wakeup.h> |
705 | 723 | ||
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index d09af4b67cf1..b90091af5798 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -177,6 +177,7 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; } | |||
177 | static inline void thaw_processes(void) {} | 177 | static inline void thaw_processes(void) {} |
178 | static inline void thaw_kernel_threads(void) {} | 178 | static inline void thaw_kernel_threads(void) {} |
179 | 179 | ||
180 | static inline bool try_to_freeze_nowarn(void) { return false; } | ||
180 | static inline bool try_to_freeze(void) { return false; } | 181 | static inline bool try_to_freeze(void) { return false; } |
181 | 182 | ||
182 | static inline void freezer_do_not_count(void) {} | 183 | static inline void freezer_do_not_count(void) {} |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 800de224336b..d0c4db7b4872 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -259,6 +259,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
259 | * @platform_data: stored in i2c_client.dev.platform_data | 259 | * @platform_data: stored in i2c_client.dev.platform_data |
260 | * @archdata: copied into i2c_client.dev.archdata | 260 | * @archdata: copied into i2c_client.dev.archdata |
261 | * @of_node: pointer to OpenFirmware device node | 261 | * @of_node: pointer to OpenFirmware device node |
262 | * @acpi_node: ACPI device node | ||
262 | * @irq: stored in i2c_client.irq | 263 | * @irq: stored in i2c_client.irq |
263 | * | 264 | * |
264 | * I2C doesn't actually support hardware probing, although controllers and | 265 | * I2C doesn't actually support hardware probing, although controllers and |
@@ -279,6 +280,7 @@ struct i2c_board_info { | |||
279 | void *platform_data; | 280 | void *platform_data; |
280 | struct dev_archdata *archdata; | 281 | struct dev_archdata *archdata; |
281 | struct device_node *of_node; | 282 | struct device_node *of_node; |
283 | struct acpi_dev_node acpi_node; | ||
282 | int irq; | 284 | int irq; |
283 | }; | 285 | }; |
284 | 286 | ||
@@ -501,4 +503,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) | |||
501 | i2c_del_driver) | 503 | i2c_del_driver) |
502 | 504 | ||
503 | #endif /* I2C */ | 505 | #endif /* I2C */ |
506 | |||
507 | #if IS_ENABLED(CONFIG_ACPI_I2C) | ||
508 | extern void acpi_i2c_register_devices(struct i2c_adapter *adap); | ||
509 | #else | ||
510 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {} | ||
511 | #endif | ||
512 | |||
504 | #endif /* _LINUX_I2C_H */ | 513 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 5711e9525a2a..a9ded9a3c175 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -55,6 +55,7 @@ extern int platform_add_devices(struct platform_device **, int); | |||
55 | 55 | ||
56 | struct platform_device_info { | 56 | struct platform_device_info { |
57 | struct device *parent; | 57 | struct device *parent; |
58 | struct acpi_dev_node acpi_node; | ||
58 | 59 | ||
59 | const char *name; | 60 | const char *name; |
60 | int id; | 61 | int id; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 007e687c4f69..03d7bb145311 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -546,10 +546,9 @@ struct dev_pm_info { | |||
546 | unsigned long active_jiffies; | 546 | unsigned long active_jiffies; |
547 | unsigned long suspended_jiffies; | 547 | unsigned long suspended_jiffies; |
548 | unsigned long accounting_timestamp; | 548 | unsigned long accounting_timestamp; |
549 | struct dev_pm_qos_request *pq_req; | ||
550 | #endif | 549 | #endif |
551 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ | 550 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
552 | struct pm_qos_constraints *constraints; | 551 | struct dev_pm_qos *qos; |
553 | }; | 552 | }; |
554 | 553 | ||
555 | extern void update_pm_runtime_accounting(struct device *dev); | 554 | extern void update_pm_runtime_accounting(struct device *dev); |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 9924ea1f22e0..5a95013905c8 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
@@ -20,6 +20,13 @@ enum { | |||
20 | PM_QOS_NUM_CLASSES, | 20 | PM_QOS_NUM_CLASSES, |
21 | }; | 21 | }; |
22 | 22 | ||
23 | enum pm_qos_flags_status { | ||
24 | PM_QOS_FLAGS_UNDEFINED = -1, | ||
25 | PM_QOS_FLAGS_NONE, | ||
26 | PM_QOS_FLAGS_SOME, | ||
27 | PM_QOS_FLAGS_ALL, | ||
28 | }; | ||
29 | |||
23 | #define PM_QOS_DEFAULT_VALUE -1 | 30 | #define PM_QOS_DEFAULT_VALUE -1 |
24 | 31 | ||
25 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | 32 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
@@ -27,14 +34,31 @@ enum { | |||
27 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | 34 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 |
28 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 | 35 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 |
29 | 36 | ||
37 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) | ||
38 | #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) | ||
39 | |||
30 | struct pm_qos_request { | 40 | struct pm_qos_request { |
31 | struct plist_node node; | 41 | struct plist_node node; |
32 | int pm_qos_class; | 42 | int pm_qos_class; |
33 | struct delayed_work work; /* for pm_qos_update_request_timeout */ | 43 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
34 | }; | 44 | }; |
35 | 45 | ||
46 | struct pm_qos_flags_request { | ||
47 | struct list_head node; | ||
48 | s32 flags; /* Do not change to 64 bit */ | ||
49 | }; | ||
50 | |||
51 | enum dev_pm_qos_req_type { | ||
52 | DEV_PM_QOS_LATENCY = 1, | ||
53 | DEV_PM_QOS_FLAGS, | ||
54 | }; | ||
55 | |||
36 | struct dev_pm_qos_request { | 56 | struct dev_pm_qos_request { |
37 | struct plist_node node; | 57 | enum dev_pm_qos_req_type type; |
58 | union { | ||
59 | struct plist_node pnode; | ||
60 | struct pm_qos_flags_request flr; | ||
61 | } data; | ||
38 | struct device *dev; | 62 | struct device *dev; |
39 | }; | 63 | }; |
40 | 64 | ||
@@ -45,8 +69,8 @@ enum pm_qos_type { | |||
45 | }; | 69 | }; |
46 | 70 | ||
47 | /* | 71 | /* |
48 | * Note: The lockless read path depends on the CPU accessing | 72 | * Note: The lockless read path depends on the CPU accessing target_value |
49 | * target_value atomically. Atomic access is only guaranteed on all CPU | 73 | * or effective_flags atomically. Atomic access is only guaranteed on all CPU |
50 | * types linux supports for 32 bit quantites | 74 | * types linux supports for 32 bit quantites |
51 | */ | 75 | */ |
52 | struct pm_qos_constraints { | 76 | struct pm_qos_constraints { |
@@ -57,6 +81,18 @@ struct pm_qos_constraints { | |||
57 | struct blocking_notifier_head *notifiers; | 81 | struct blocking_notifier_head *notifiers; |
58 | }; | 82 | }; |
59 | 83 | ||
84 | struct pm_qos_flags { | ||
85 | struct list_head list; | ||
86 | s32 effective_flags; /* Do not change to 64 bit */ | ||
87 | }; | ||
88 | |||
89 | struct dev_pm_qos { | ||
90 | struct pm_qos_constraints latency; | ||
91 | struct pm_qos_flags flags; | ||
92 | struct dev_pm_qos_request *latency_req; | ||
93 | struct dev_pm_qos_request *flags_req; | ||
94 | }; | ||
95 | |||
60 | /* Action requested to pm_qos_update_target */ | 96 | /* Action requested to pm_qos_update_target */ |
61 | enum pm_qos_req_action { | 97 | enum pm_qos_req_action { |
62 | PM_QOS_ADD_REQ, /* Add a new request */ | 98 | PM_QOS_ADD_REQ, /* Add a new request */ |
@@ -71,6 +107,9 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) | |||
71 | 107 | ||
72 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | 108 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
73 | enum pm_qos_req_action action, int value); | 109 | enum pm_qos_req_action action, int value); |
110 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, | ||
111 | struct pm_qos_flags_request *req, | ||
112 | enum pm_qos_req_action action, s32 val); | ||
74 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, | 113 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
75 | s32 value); | 114 | s32 value); |
76 | void pm_qos_update_request(struct pm_qos_request *req, | 115 | void pm_qos_update_request(struct pm_qos_request *req, |
@@ -86,10 +125,12 @@ int pm_qos_request_active(struct pm_qos_request *req); | |||
86 | s32 pm_qos_read_value(struct pm_qos_constraints *c); | 125 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
87 | 126 | ||
88 | #ifdef CONFIG_PM | 127 | #ifdef CONFIG_PM |
128 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); | ||
129 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); | ||
89 | s32 __dev_pm_qos_read_value(struct device *dev); | 130 | s32 __dev_pm_qos_read_value(struct device *dev); |
90 | s32 dev_pm_qos_read_value(struct device *dev); | 131 | s32 dev_pm_qos_read_value(struct device *dev); |
91 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | 132 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
92 | s32 value); | 133 | enum dev_pm_qos_req_type type, s32 value); |
93 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); | 134 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
94 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); | 135 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); |
95 | int dev_pm_qos_add_notifier(struct device *dev, | 136 | int dev_pm_qos_add_notifier(struct device *dev, |
@@ -103,12 +144,19 @@ void dev_pm_qos_constraints_destroy(struct device *dev); | |||
103 | int dev_pm_qos_add_ancestor_request(struct device *dev, | 144 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
104 | struct dev_pm_qos_request *req, s32 value); | 145 | struct dev_pm_qos_request *req, s32 value); |
105 | #else | 146 | #else |
147 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, | ||
148 | s32 mask) | ||
149 | { return PM_QOS_FLAGS_UNDEFINED; } | ||
150 | static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, | ||
151 | s32 mask) | ||
152 | { return PM_QOS_FLAGS_UNDEFINED; } | ||
106 | static inline s32 __dev_pm_qos_read_value(struct device *dev) | 153 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
107 | { return 0; } | 154 | { return 0; } |
108 | static inline s32 dev_pm_qos_read_value(struct device *dev) | 155 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
109 | { return 0; } | 156 | { return 0; } |
110 | static inline int dev_pm_qos_add_request(struct device *dev, | 157 | static inline int dev_pm_qos_add_request(struct device *dev, |
111 | struct dev_pm_qos_request *req, | 158 | struct dev_pm_qos_request *req, |
159 | enum dev_pm_qos_req_type type, | ||
112 | s32 value) | 160 | s32 value) |
113 | { return 0; } | 161 | { return 0; } |
114 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | 162 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, |
@@ -144,10 +192,31 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
144 | #ifdef CONFIG_PM_RUNTIME | 192 | #ifdef CONFIG_PM_RUNTIME |
145 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | 193 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
146 | void dev_pm_qos_hide_latency_limit(struct device *dev); | 194 | void dev_pm_qos_hide_latency_limit(struct device *dev); |
195 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
196 | void dev_pm_qos_hide_flags(struct device *dev); | ||
197 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
198 | |||
199 | static inline s32 dev_pm_qos_requested_latency(struct device *dev) | ||
200 | { | ||
201 | return dev->power.qos->latency_req->data.pnode.prio; | ||
202 | } | ||
203 | |||
204 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
205 | { | ||
206 | return dev->power.qos->flags_req->data.flr.flags; | ||
207 | } | ||
147 | #else | 208 | #else |
148 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | 209 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
149 | { return 0; } | 210 | { return 0; } |
150 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | 211 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
212 | static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) | ||
213 | { return 0; } | ||
214 | static inline void dev_pm_qos_hide_flags(struct device *dev) {} | ||
215 | static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) | ||
216 | { return 0; } | ||
217 | |||
218 | static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; } | ||
219 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } | ||
151 | #endif | 220 | #endif |
152 | 221 | ||
153 | #endif | 222 | #endif |
diff --git a/include/linux/tick.h b/include/linux/tick.h index f37fceb69b73..1a6567b48492 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -142,4 +142,10 @@ static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | |||
142 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | 142 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } |
143 | # endif /* !NO_HZ */ | 143 | # endif /* !NO_HZ */ |
144 | 144 | ||
145 | # ifdef CONFIG_CPU_IDLE_GOV_MENU | ||
146 | extern void menu_hrtimer_cancel(void); | ||
147 | # else | ||
148 | static inline void menu_hrtimer_cancel(void) {} | ||
149 | # endif /* CONFIG_CPU_IDLE_GOV_MENU */ | ||
150 | |||
145 | #endif | 151 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 42bd331ee0ab..f45657f1eb8e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -348,11 +348,13 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
348 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 348 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
349 | struct task_struct *idle; | 349 | struct task_struct *idle; |
350 | 350 | ||
351 | if (cpu_online(cpu) || !cpu_present(cpu)) | ||
352 | return -EINVAL; | ||
353 | |||
354 | cpu_hotplug_begin(); | 351 | cpu_hotplug_begin(); |
355 | 352 | ||
353 | if (cpu_online(cpu) || !cpu_present(cpu)) { | ||
354 | ret = -EINVAL; | ||
355 | goto out; | ||
356 | } | ||
357 | |||
356 | idle = idle_thread_get(cpu); | 358 | idle = idle_thread_get(cpu); |
357 | if (IS_ERR(idle)) { | 359 | if (IS_ERR(idle)) { |
358 | ret = PTR_ERR(idle); | 360 | ret = PTR_ERR(idle); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index f458238109cc..1c16f9167de1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -59,7 +59,7 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
59 | { | 59 | { |
60 | unsigned long val; | 60 | unsigned long val; |
61 | 61 | ||
62 | if (strict_strtoul(buf, 10, &val)) | 62 | if (kstrtoul(buf, 10, &val)) |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | 64 | ||
65 | if (val > 1) | 65 | if (val > 1) |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 846bd42c7ed1..9322ff7eaad6 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -213,6 +213,69 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
216 | * pm_qos_flags_remove_req - Remove device PM QoS flags request. | ||
217 | * @pqf: Device PM QoS flags set to remove the request from. | ||
218 | * @req: Request to remove from the set. | ||
219 | */ | ||
220 | static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, | ||
221 | struct pm_qos_flags_request *req) | ||
222 | { | ||
223 | s32 val = 0; | ||
224 | |||
225 | list_del(&req->node); | ||
226 | list_for_each_entry(req, &pqf->list, node) | ||
227 | val |= req->flags; | ||
228 | |||
229 | pqf->effective_flags = val; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * pm_qos_update_flags - Update a set of PM QoS flags. | ||
234 | * @pqf: Set of flags to update. | ||
235 | * @req: Request to add to the set, to modify, or to remove from the set. | ||
236 | * @action: Action to take on the set. | ||
237 | * @val: Value of the request to add or modify. | ||
238 | * | ||
239 | * Update the given set of PM QoS flags and call notifiers if the aggregate | ||
240 | * value has changed. Returns 1 if the aggregate constraint value has changed, | ||
241 | * 0 otherwise. | ||
242 | */ | ||
243 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, | ||
244 | struct pm_qos_flags_request *req, | ||
245 | enum pm_qos_req_action action, s32 val) | ||
246 | { | ||
247 | unsigned long irqflags; | ||
248 | s32 prev_value, curr_value; | ||
249 | |||
250 | spin_lock_irqsave(&pm_qos_lock, irqflags); | ||
251 | |||
252 | prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
253 | |||
254 | switch (action) { | ||
255 | case PM_QOS_REMOVE_REQ: | ||
256 | pm_qos_flags_remove_req(pqf, req); | ||
257 | break; | ||
258 | case PM_QOS_UPDATE_REQ: | ||
259 | pm_qos_flags_remove_req(pqf, req); | ||
260 | case PM_QOS_ADD_REQ: | ||
261 | req->flags = val; | ||
262 | INIT_LIST_HEAD(&req->node); | ||
263 | list_add_tail(&req->node, &pqf->list); | ||
264 | pqf->effective_flags |= val; | ||
265 | break; | ||
266 | default: | ||
267 | /* no action */ | ||
268 | ; | ||
269 | } | ||
270 | |||
271 | curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
272 | |||
273 | spin_unlock_irqrestore(&pm_qos_lock, irqflags); | ||
274 | |||
275 | return prev_value != curr_value; | ||
276 | } | ||
277 | |||
278 | /** | ||
216 | * pm_qos_request - returns current system wide qos expectation | 279 | * pm_qos_request - returns current system wide qos expectation |
217 | * @pm_qos_class: identification of which qos value is requested | 280 | * @pm_qos_class: identification of which qos value is requested |
218 | * | 281 | * |
@@ -500,7 +563,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
500 | } else { | 563 | } else { |
501 | ascii_value[count] = '\0'; | 564 | ascii_value[count] = '\0'; |
502 | } | 565 | } |
503 | ret = strict_strtoul(ascii_value, 16, &ulval); | 566 | ret = kstrtoul(ascii_value, 16, &ulval); |
504 | if (ret) { | 567 | if (ret) { |
505 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); | 568 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); |
506 | return -EINVAL; | 569 | return -EINVAL; |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 3c9d764eb0d8..7c33ed200410 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -126,7 +126,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) | |||
126 | 126 | ||
127 | /* Figure out where to put the new node */ | 127 | /* Figure out where to put the new node */ |
128 | while (*new) { | 128 | while (*new) { |
129 | ext = container_of(*new, struct swsusp_extent, node); | 129 | ext = rb_entry(*new, struct swsusp_extent, node); |
130 | parent = *new; | 130 | parent = *new; |
131 | if (swap_offset < ext->start) { | 131 | if (swap_offset < ext->start) { |
132 | /* Try to merge */ | 132 | /* Try to merge */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a40260885265..6f337068dc4c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -526,6 +526,8 @@ void tick_nohz_irq_exit(void) | |||
526 | if (!ts->inidle) | 526 | if (!ts->inidle) |
527 | return; | 527 | return; |
528 | 528 | ||
529 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
530 | menu_hrtimer_cancel(); | ||
529 | __tick_nohz_idle_enter(ts); | 531 | __tick_nohz_idle_enter(ts); |
530 | } | 532 | } |
531 | 533 | ||
@@ -621,6 +623,8 @@ void tick_nohz_idle_exit(void) | |||
621 | 623 | ||
622 | ts->inidle = 0; | 624 | ts->inidle = 0; |
623 | 625 | ||
626 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
627 | menu_hrtimer_cancel(); | ||
624 | if (ts->idle_active || ts->tick_stopped) | 628 | if (ts->idle_active || ts->tick_stopped) |
625 | now = ktime_get(); | 629 | now = ktime_get(); |
626 | 630 | ||
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore index 8a83dd2ffc11..d42073f12609 100644 --- a/tools/power/cpupower/.gitignore +++ b/tools/power/cpupower/.gitignore | |||
@@ -20,3 +20,10 @@ utils/cpufreq-set.o | |||
20 | utils/cpufreq-aperf.o | 20 | utils/cpufreq-aperf.o |
21 | cpupower | 21 | cpupower |
22 | bench/cpufreq-bench | 22 | bench/cpufreq-bench |
23 | debug/kernel/Module.symvers | ||
24 | debug/i386/centrino-decode | ||
25 | debug/i386/dump_psb | ||
26 | debug/i386/intel_gsic | ||
27 | debug/i386/powernow-k8-decode | ||
28 | debug/x86_64/centrino-decode | ||
29 | debug/x86_64/powernow-k8-decode | ||
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index cf397bd26d0c..d875a74a3bdf 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -253,7 +253,8 @@ clean: | |||
253 | | xargs rm -f | 253 | | xargs rm -f |
254 | -rm -f $(OUTPUT)cpupower | 254 | -rm -f $(OUTPUT)cpupower |
255 | -rm -f $(OUTPUT)libcpupower.so* | 255 | -rm -f $(OUTPUT)libcpupower.so* |
256 | -rm -rf $(OUTPUT)po/*.{gmo,pot} | 256 | -rm -rf $(OUTPUT)po/*.gmo |
257 | -rm -rf $(OUTPUT)po/*.pot | ||
257 | $(MAKE) -C bench O=$(OUTPUT) clean | 258 | $(MAKE) -C bench O=$(OUTPUT) clean |
258 | 259 | ||
259 | 260 | ||
diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile index 3ba158f0e287..c05cc0ac80c7 100644 --- a/tools/power/cpupower/debug/i386/Makefile +++ b/tools/power/cpupower/debug/i386/Makefile | |||
@@ -26,7 +26,10 @@ $(OUTPUT)powernow-k8-decode: powernow-k8-decode.c | |||
26 | all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode | 26 | all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode |
27 | 27 | ||
28 | clean: | 28 | clean: |
29 | rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode} | 29 | rm -rf $(OUTPUT)centrino-decode |
30 | rm -rf $(OUTPUT)dump_psb | ||
31 | rm -rf $(OUTPUT)intel_gsic | ||
32 | rm -rf $(OUTPUT)powernow-k8-decode | ||
30 | 33 | ||
31 | install: | 34 | install: |
32 | $(INSTALL) -d $(DESTDIR)${bindir} | 35 | $(INSTALL) -d $(DESTDIR)${bindir} |
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1 index 1141c2073719..e01c35d13b6e 100644 --- a/tools/power/cpupower/man/cpupower-monitor.1 +++ b/tools/power/cpupower/man/cpupower-monitor.1 | |||
@@ -7,11 +7,11 @@ cpupower\-monitor \- Report processor frequency and idle statistics | |||
7 | .RB "\-l" | 7 | .RB "\-l" |
8 | 8 | ||
9 | .B cpupower monitor | 9 | .B cpupower monitor |
10 | .RB [ "\-m <mon1>," [ "<mon2>,..." ] ] | 10 | .RB [ -c ] [ "\-m <mon1>," [ "<mon2>,..." ] ] |
11 | .RB [ "\-i seconds" ] | 11 | .RB [ "\-i seconds" ] |
12 | .br | 12 | .br |
13 | .B cpupower monitor | 13 | .B cpupower monitor |
14 | .RB [ "\-m <mon1>," [ "<mon2>,..." ] ] | 14 | .RB [ -c ][ "\-m <mon1>," [ "<mon2>,..." ] ] |
15 | .RB command | 15 | .RB command |
16 | .br | 16 | .br |
17 | .SH DESCRIPTION | 17 | .SH DESCRIPTION |
@@ -64,6 +64,17 @@ Only display specific monitors. Use the monitor string(s) provided by \-l option | |||
64 | Measure intervall. | 64 | Measure intervall. |
65 | .RE | 65 | .RE |
66 | .PP | 66 | .PP |
67 | \-c | ||
68 | .RS 4 | ||
69 | Schedule the process on every core before starting and ending measuring. | ||
70 | This could be needed for the Idle_Stats monitor when no other MSR based | ||
71 | monitor (has to be run on the core that is measured) is run in parallel. | ||
72 | This is to wake up the processors from deeper sleep states and let the | ||
73 | kernel re | ||
74 | -account its cpuidle (C-state) information before reading the | ||
75 | cpuidle timings from sysfs. | ||
76 | .RE | ||
77 | .PP | ||
67 | command | 78 | command |
68 | .RS 4 | 79 | .RS 4 |
69 | Measure idle and frequency characteristics of an arbitrary command/workload. | 80 | Measure idle and frequency characteristics of an arbitrary command/workload. |
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 906895d21cce..93b0aa74ca03 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c | |||
@@ -158,6 +158,8 @@ out: | |||
158 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; | 158 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; |
159 | case 0x2A: /* SNB */ | 159 | case 0x2A: /* SNB */ |
160 | case 0x2D: /* SNB Xeon */ | 160 | case 0x2D: /* SNB Xeon */ |
161 | case 0x3A: /* IVB */ | ||
162 | case 0x3E: /* IVB Xeon */ | ||
161 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; | 163 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; |
162 | cpu_info->caps |= CPUPOWER_CAP_IS_SNB; | 164 | cpu_info->caps |= CPUPOWER_CAP_IS_SNB; |
163 | break; | 165 | break; |
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 2eb584cf2f55..aa9e95486a2d 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h | |||
@@ -92,6 +92,14 @@ extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info); | |||
92 | extern struct cpupower_cpu_info cpupower_cpu_info; | 92 | extern struct cpupower_cpu_info cpupower_cpu_info; |
93 | /* cpuid and cpuinfo helpers **************************/ | 93 | /* cpuid and cpuinfo helpers **************************/ |
94 | 94 | ||
95 | struct cpuid_core_info { | ||
96 | int pkg; | ||
97 | int core; | ||
98 | int cpu; | ||
99 | |||
100 | /* flags */ | ||
101 | unsigned int is_online:1; | ||
102 | }; | ||
95 | 103 | ||
96 | /* CPU topology/hierarchy parsing ******************/ | 104 | /* CPU topology/hierarchy parsing ******************/ |
97 | struct cpupower_topology { | 105 | struct cpupower_topology { |
@@ -101,18 +109,12 @@ struct cpupower_topology { | |||
101 | unsigned int threads; /* per core */ | 109 | unsigned int threads; /* per core */ |
102 | 110 | ||
103 | /* Array gets mallocated with cores entries, holding per core info */ | 111 | /* Array gets mallocated with cores entries, holding per core info */ |
104 | struct { | 112 | struct cpuid_core_info *core_info; |
105 | int pkg; | ||
106 | int core; | ||
107 | int cpu; | ||
108 | |||
109 | /* flags */ | ||
110 | unsigned int is_online:1; | ||
111 | } *core_info; | ||
112 | }; | 113 | }; |
113 | 114 | ||
114 | extern int get_cpu_topology(struct cpupower_topology *cpu_top); | 115 | extern int get_cpu_topology(struct cpupower_topology *cpu_top); |
115 | extern void cpu_topology_release(struct cpupower_topology cpu_top); | 116 | extern void cpu_topology_release(struct cpupower_topology cpu_top); |
117 | |||
116 | /* CPU topology/hierarchy parsing ******************/ | 118 | /* CPU topology/hierarchy parsing ******************/ |
117 | 119 | ||
118 | /* X86 ONLY ****************************************/ | 120 | /* X86 ONLY ****************************************/ |
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 96e28c124b5c..38ab91629463 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c | |||
@@ -37,25 +37,6 @@ unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) | |||
37 | return (unsigned int) numread; | 37 | return (unsigned int) numread; |
38 | } | 38 | } |
39 | 39 | ||
40 | static unsigned int sysfs_write_file(const char *path, | ||
41 | const char *value, size_t len) | ||
42 | { | ||
43 | int fd; | ||
44 | ssize_t numwrite; | ||
45 | |||
46 | fd = open(path, O_WRONLY); | ||
47 | if (fd == -1) | ||
48 | return 0; | ||
49 | |||
50 | numwrite = write(fd, value, len); | ||
51 | if (numwrite < 1) { | ||
52 | close(fd); | ||
53 | return 0; | ||
54 | } | ||
55 | close(fd); | ||
56 | return (unsigned int) numwrite; | ||
57 | } | ||
58 | |||
59 | /* | 40 | /* |
60 | * Detect whether a CPU is online | 41 | * Detect whether a CPU is online |
61 | * | 42 | * |
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 4eae2c47ba48..c13120af519b 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c | |||
@@ -20,9 +20,8 @@ | |||
20 | #include <helpers/sysfs.h> | 20 | #include <helpers/sysfs.h> |
21 | 21 | ||
22 | /* returns -1 on failure, 0 on success */ | 22 | /* returns -1 on failure, 0 on success */ |
23 | int sysfs_topology_read_file(unsigned int cpu, const char *fname) | 23 | static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) |
24 | { | 24 | { |
25 | unsigned long value; | ||
26 | char linebuf[MAX_LINE_LEN]; | 25 | char linebuf[MAX_LINE_LEN]; |
27 | char *endp; | 26 | char *endp; |
28 | char path[SYSFS_PATH_MAX]; | 27 | char path[SYSFS_PATH_MAX]; |
@@ -31,20 +30,12 @@ int sysfs_topology_read_file(unsigned int cpu, const char *fname) | |||
31 | cpu, fname); | 30 | cpu, fname); |
32 | if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) | 31 | if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) |
33 | return -1; | 32 | return -1; |
34 | value = strtoul(linebuf, &endp, 0); | 33 | *result = strtol(linebuf, &endp, 0); |
35 | if (endp == linebuf || errno == ERANGE) | 34 | if (endp == linebuf || errno == ERANGE) |
36 | return -1; | 35 | return -1; |
37 | return value; | 36 | return 0; |
38 | } | 37 | } |
39 | 38 | ||
40 | struct cpuid_core_info { | ||
41 | unsigned int pkg; | ||
42 | unsigned int thread; | ||
43 | unsigned int cpu; | ||
44 | /* flags */ | ||
45 | unsigned int is_online:1; | ||
46 | }; | ||
47 | |||
48 | static int __compare(const void *t1, const void *t2) | 39 | static int __compare(const void *t1, const void *t2) |
49 | { | 40 | { |
50 | struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; | 41 | struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; |
@@ -53,9 +44,9 @@ static int __compare(const void *t1, const void *t2) | |||
53 | return -1; | 44 | return -1; |
54 | else if (top1->pkg > top2->pkg) | 45 | else if (top1->pkg > top2->pkg) |
55 | return 1; | 46 | return 1; |
56 | else if (top1->thread < top2->thread) | 47 | else if (top1->core < top2->core) |
57 | return -1; | 48 | return -1; |
58 | else if (top1->thread > top2->thread) | 49 | else if (top1->core > top2->core) |
59 | return 1; | 50 | return 1; |
60 | else if (top1->cpu < top2->cpu) | 51 | else if (top1->cpu < top2->cpu) |
61 | return -1; | 52 | return -1; |
@@ -73,28 +64,42 @@ static int __compare(const void *t1, const void *t2) | |||
73 | */ | 64 | */ |
74 | int get_cpu_topology(struct cpupower_topology *cpu_top) | 65 | int get_cpu_topology(struct cpupower_topology *cpu_top) |
75 | { | 66 | { |
76 | int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF); | 67 | int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); |
77 | 68 | ||
78 | cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus); | 69 | cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); |
79 | if (cpu_top->core_info == NULL) | 70 | if (cpu_top->core_info == NULL) |
80 | return -ENOMEM; | 71 | return -ENOMEM; |
81 | cpu_top->pkgs = cpu_top->cores = 0; | 72 | cpu_top->pkgs = cpu_top->cores = 0; |
82 | for (cpu = 0; cpu < cpus; cpu++) { | 73 | for (cpu = 0; cpu < cpus; cpu++) { |
83 | cpu_top->core_info[cpu].cpu = cpu; | 74 | cpu_top->core_info[cpu].cpu = cpu; |
84 | cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); | 75 | cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); |
85 | cpu_top->core_info[cpu].pkg = | 76 | if(sysfs_topology_read_file( |
86 | sysfs_topology_read_file(cpu, "physical_package_id"); | 77 | cpu, |
87 | if ((int)cpu_top->core_info[cpu].pkg != -1 && | 78 | "physical_package_id", |
88 | cpu_top->core_info[cpu].pkg > cpu_top->pkgs) | 79 | &(cpu_top->core_info[cpu].pkg)) < 0) |
89 | cpu_top->pkgs = cpu_top->core_info[cpu].pkg; | 80 | return -1; |
90 | cpu_top->core_info[cpu].core = | 81 | if(sysfs_topology_read_file( |
91 | sysfs_topology_read_file(cpu, "core_id"); | 82 | cpu, |
83 | "core_id", | ||
84 | &(cpu_top->core_info[cpu].core)) < 0) | ||
85 | return -1; | ||
92 | } | 86 | } |
93 | cpu_top->pkgs++; | ||
94 | 87 | ||
95 | qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), | 88 | qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), |
96 | __compare); | 89 | __compare); |
97 | 90 | ||
91 | /* Count the number of distinct pkgs values. This works | ||
92 | because the primary sort of the core_info struct was just | ||
93 | done by pkg value. */ | ||
94 | last_pkg = cpu_top->core_info[0].pkg; | ||
95 | for(cpu = 1; cpu < cpus; cpu++) { | ||
96 | if(cpu_top->core_info[cpu].pkg != last_pkg) { | ||
97 | last_pkg = cpu_top->core_info[cpu].pkg; | ||
98 | cpu_top->pkgs++; | ||
99 | } | ||
100 | } | ||
101 | cpu_top->pkgs++; | ||
102 | |||
98 | /* Intel's cores count is not consecutively numbered, there may | 103 | /* Intel's cores count is not consecutively numbered, there may |
99 | * be a core_id of 3, but none of 2. Assume there always is 0 | 104 | * be a core_id of 3, but none of 2. Assume there always is 0 |
100 | * Get amount of cores by counting duplicates in a package | 105 | * Get amount of cores by counting duplicates in a package |
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index 0d6571e418db..c4bae9203a69 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c | |||
@@ -39,6 +39,7 @@ static int mode; | |||
39 | static int interval = 1; | 39 | static int interval = 1; |
40 | static char *show_monitors_param; | 40 | static char *show_monitors_param; |
41 | static struct cpupower_topology cpu_top; | 41 | static struct cpupower_topology cpu_top; |
42 | static unsigned int wake_cpus; | ||
42 | 43 | ||
43 | /* ToDo: Document this in the manpage */ | 44 | /* ToDo: Document this in the manpage */ |
44 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; | 45 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; |
@@ -84,7 +85,7 @@ int fill_string_with_spaces(char *s, int n) | |||
84 | void print_header(int topology_depth) | 85 | void print_header(int topology_depth) |
85 | { | 86 | { |
86 | int unsigned mon; | 87 | int unsigned mon; |
87 | int state, need_len, pr_mon_len; | 88 | int state, need_len; |
88 | cstate_t s; | 89 | cstate_t s; |
89 | char buf[128] = ""; | 90 | char buf[128] = ""; |
90 | int percent_width = 4; | 91 | int percent_width = 4; |
@@ -93,7 +94,6 @@ void print_header(int topology_depth) | |||
93 | printf("%s|", buf); | 94 | printf("%s|", buf); |
94 | 95 | ||
95 | for (mon = 0; mon < avail_monitors; mon++) { | 96 | for (mon = 0; mon < avail_monitors; mon++) { |
96 | pr_mon_len = 0; | ||
97 | need_len = monitors[mon]->hw_states_num * (percent_width + 3) | 97 | need_len = monitors[mon]->hw_states_num * (percent_width + 3) |
98 | - 1; | 98 | - 1; |
99 | if (mon != 0) { | 99 | if (mon != 0) { |
@@ -315,16 +315,28 @@ int fork_it(char **argv) | |||
315 | int do_interval_measure(int i) | 315 | int do_interval_measure(int i) |
316 | { | 316 | { |
317 | unsigned int num; | 317 | unsigned int num; |
318 | int cpu; | ||
319 | |||
320 | if (wake_cpus) | ||
321 | for (cpu = 0; cpu < cpu_count; cpu++) | ||
322 | bind_cpu(cpu); | ||
318 | 323 | ||
319 | for (num = 0; num < avail_monitors; num++) { | 324 | for (num = 0; num < avail_monitors; num++) { |
320 | dprint("HW C-state residency monitor: %s - States: %d\n", | 325 | dprint("HW C-state residency monitor: %s - States: %d\n", |
321 | monitors[num]->name, monitors[num]->hw_states_num); | 326 | monitors[num]->name, monitors[num]->hw_states_num); |
322 | monitors[num]->start(); | 327 | monitors[num]->start(); |
323 | } | 328 | } |
329 | |||
324 | sleep(i); | 330 | sleep(i); |
331 | |||
332 | if (wake_cpus) | ||
333 | for (cpu = 0; cpu < cpu_count; cpu++) | ||
334 | bind_cpu(cpu); | ||
335 | |||
325 | for (num = 0; num < avail_monitors; num++) | 336 | for (num = 0; num < avail_monitors; num++) |
326 | monitors[num]->stop(); | 337 | monitors[num]->stop(); |
327 | 338 | ||
339 | |||
328 | return 0; | 340 | return 0; |
329 | } | 341 | } |
330 | 342 | ||
@@ -333,7 +345,7 @@ static void cmdline(int argc, char *argv[]) | |||
333 | int opt; | 345 | int opt; |
334 | progname = basename(argv[0]); | 346 | progname = basename(argv[0]); |
335 | 347 | ||
336 | while ((opt = getopt(argc, argv, "+li:m:")) != -1) { | 348 | while ((opt = getopt(argc, argv, "+lci:m:")) != -1) { |
337 | switch (opt) { | 349 | switch (opt) { |
338 | case 'l': | 350 | case 'l': |
339 | if (mode) | 351 | if (mode) |
@@ -352,6 +364,9 @@ static void cmdline(int argc, char *argv[]) | |||
352 | mode = show; | 364 | mode = show; |
353 | show_monitors_param = optarg; | 365 | show_monitors_param = optarg; |
354 | break; | 366 | break; |
367 | case 'c': | ||
368 | wake_cpus = 1; | ||
369 | break; | ||
355 | default: | 370 | default: |
356 | print_wrong_arg_exit(); | 371 | print_wrong_arg_exit(); |
357 | } | 372 | } |
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h index 9312ee1f2dbc..9e43f3371fbc 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h | |||
@@ -65,4 +65,21 @@ extern long long timespec_diff_us(struct timespec start, struct timespec end); | |||
65 | "could be inaccurate\n"), mes, ov); \ | 65 | "could be inaccurate\n"), mes, ov); \ |
66 | } | 66 | } |
67 | 67 | ||
68 | |||
69 | /* Taken over from x86info project sources -> return 0 on success */ | ||
70 | #include <sched.h> | ||
71 | #include <sys/types.h> | ||
72 | #include <unistd.h> | ||
73 | static inline int bind_cpu(int cpu) | ||
74 | { | ||
75 | cpu_set_t set; | ||
76 | |||
77 | if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { | ||
78 | CPU_ZERO(&set); | ||
79 | CPU_SET(cpu, &set); | ||
80 | return sched_setaffinity(getpid(), sizeof(set), &set); | ||
81 | } | ||
82 | return 1; | ||
83 | } | ||
84 | |||
68 | #endif /* __CPUIDLE_INFO_HW__ */ | 85 | #endif /* __CPUIDLE_INFO_HW__ */ |
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c index a1bc07cd53e1..a99b43b97d6d 100644 --- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c | |||
@@ -150,9 +150,15 @@ static struct cpuidle_monitor *snb_register(void) | |||
150 | || cpupower_cpu_info.family != 6) | 150 | || cpupower_cpu_info.family != 6) |
151 | return NULL; | 151 | return NULL; |
152 | 152 | ||
153 | if (cpupower_cpu_info.model != 0x2A | 153 | switch (cpupower_cpu_info.model) { |
154 | && cpupower_cpu_info.model != 0x2D) | 154 | case 0x2A: /* SNB */ |
155 | case 0x2D: /* SNB Xeon */ | ||
156 | case 0x3A: /* IVB */ | ||
157 | case 0x3E: /* IVB Xeon */ | ||
158 | break; | ||
159 | default: | ||
155 | return NULL; | 160 | return NULL; |
161 | } | ||
156 | 162 | ||
157 | is_valid = calloc(cpu_count, sizeof(int)); | 163 | is_valid = calloc(cpu_count, sizeof(int)); |
158 | for (num = 0; num < SNB_CSTATE_COUNT; num++) { | 164 | for (num = 0; num < SNB_CSTATE_COUNT; num++) { |