diff options
547 files changed, 12617 insertions, 4445 deletions
@@ -1823,6 +1823,11 @@ S: Kattreinstr 38 | |||
1823 | S: D-64295 | 1823 | S: D-64295 |
1824 | S: Germany | 1824 | S: Germany |
1825 | 1825 | ||
1826 | N: Avi Kivity | ||
1827 | E: avi.kivity@gmail.com | ||
1828 | D: Kernel-based Virtual Machine (KVM) | ||
1829 | S: Ra'annana, Israel | ||
1830 | |||
1826 | N: Andi Kleen | 1831 | N: Andi Kleen |
1827 | E: andi@firstfloor.org | 1832 | E: andi@firstfloor.org |
1828 | U: http://www.halobates.de | 1833 | U: http://www.halobates.de |
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 23d78b5aab11..0ba6ea2f89d9 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq | |||
@@ -11,7 +11,7 @@ What: /sys/class/devfreq/.../governor | |||
11 | Date: September 2011 | 11 | Date: September 2011 |
12 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 12 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
13 | Description: | 13 | Description: |
14 | The /sys/class/devfreq/.../governor shows the name of the | 14 | The /sys/class/devfreq/.../governor show or set the name of the |
15 | governor used by the corresponding devfreq object. | 15 | governor used by the corresponding devfreq object. |
16 | 16 | ||
17 | What: /sys/class/devfreq/.../cur_freq | 17 | What: /sys/class/devfreq/.../cur_freq |
@@ -19,15 +19,16 @@ Date: September 2011 | |||
19 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 19 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
20 | Description: | 20 | Description: |
21 | The /sys/class/devfreq/.../cur_freq shows the current | 21 | The /sys/class/devfreq/.../cur_freq shows the current |
22 | frequency of the corresponding devfreq object. | 22 | frequency of the corresponding devfreq object. Same as |
23 | target_freq when get_cur_freq() is not implemented by | ||
24 | devfreq driver. | ||
23 | 25 | ||
24 | What: /sys/class/devfreq/.../central_polling | 26 | What: /sys/class/devfreq/.../target_freq |
25 | Date: September 2011 | 27 | Date: September 2012 |
26 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 28 | Contact: Rajagopal Venkat <rajagopal.venkat@linaro.org> |
27 | Description: | 29 | Description: |
28 | The /sys/class/devfreq/.../central_polling shows whether | 30 | The /sys/class/devfreq/.../target_freq shows the next governor |
29 | the devfreq ojbect is using devfreq-provided central | 31 | predicted target frequency of the corresponding devfreq object. |
30 | polling mechanism or not. | ||
31 | 32 | ||
32 | What: /sys/class/devfreq/.../polling_interval | 33 | What: /sys/class/devfreq/.../polling_interval |
33 | Date: September 2011 | 34 | Date: September 2011 |
@@ -43,6 +44,17 @@ Description: | |||
43 | (/sys/class/devfreq/.../central_polling is 0), this value | 44 | (/sys/class/devfreq/.../central_polling is 0), this value |
44 | may be useless. | 45 | may be useless. |
45 | 46 | ||
47 | What: /sys/class/devfreq/.../trans_stat | ||
48 | Date: October 2012 | ||
49 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | ||
50 | Descrtiption: | ||
51 | This ABI shows the statistics of devfreq behavior on a | ||
52 | specific device. It shows the time spent in each state and | ||
53 | the number of transitions between states. | ||
54 | In order to activate this ABI, the devfreq target device | ||
55 | driver should provide the list of available frequencies | ||
56 | with its profile. | ||
57 | |||
46 | What: /sys/class/devfreq/.../userspace/set_freq | 58 | What: /sys/class/devfreq/.../userspace/set_freq |
47 | Date: September 2011 | 59 | Date: September 2011 |
48 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> | 60 | Contact: MyungJoo Ham <myungjoo.ham@samsung.com> |
@@ -50,3 +62,19 @@ Description: | |||
50 | The /sys/class/devfreq/.../userspace/set_freq shows and | 62 | The /sys/class/devfreq/.../userspace/set_freq shows and |
51 | sets the requested frequency for the devfreq object if | 63 | sets the requested frequency for the devfreq object if |
52 | userspace governor is in effect. | 64 | userspace governor is in effect. |
65 | |||
66 | What: /sys/class/devfreq/.../available_frequencies | ||
67 | Date: October 2012 | ||
68 | Contact: Nishanth Menon <nm@ti.com> | ||
69 | Description: | ||
70 | The /sys/class/devfreq/.../available_frequencies shows | ||
71 | the available frequencies of the corresponding devfreq object. | ||
72 | This is a snapshot of available frequencies and not limited | ||
73 | by the min/max frequency restrictions. | ||
74 | |||
75 | What: /sys/class/devfreq/.../available_governors | ||
76 | Date: October 2012 | ||
77 | Contact: Nishanth Menon <nm@ti.com> | ||
78 | Description: | ||
79 | The /sys/class/devfreq/.../available_governors shows | ||
80 | currently available governors in the system. | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 45000f0db4d4..7fc2997b23a6 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power | |||
@@ -204,3 +204,34 @@ Description: | |||
204 | 204 | ||
205 | This attribute has no effect on system-wide suspend/resume and | 205 | This attribute has no effect on system-wide suspend/resume and |
206 | hibernation. | 206 | hibernation. |
207 | |||
208 | What: /sys/devices/.../power/pm_qos_no_power_off | ||
209 | Date: September 2012 | ||
210 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
211 | Description: | ||
212 | The /sys/devices/.../power/pm_qos_no_power_off attribute | ||
213 | is used for manipulating the PM QoS "no power off" flag. If | ||
214 | set, this flag indicates to the kernel that power should not | ||
215 | be removed entirely from the device. | ||
216 | |||
217 | Not all drivers support this attribute. If it isn't supported, | ||
218 | it is not present. | ||
219 | |||
220 | This attribute has no effect on system-wide suspend/resume and | ||
221 | hibernation. | ||
222 | |||
223 | What: /sys/devices/.../power/pm_qos_remote_wakeup | ||
224 | Date: September 2012 | ||
225 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
226 | Description: | ||
227 | The /sys/devices/.../power/pm_qos_remote_wakeup attribute | ||
228 | is used for manipulating the PM QoS "remote wakeup required" | ||
229 | flag. If set, this flag indicates to the kernel that the | ||
230 | device is a source of user events that have to be signaled from | ||
231 | its low-power states. | ||
232 | |||
233 | Not all drivers support this attribute. If it isn't supported, | ||
234 | it is not present. | ||
235 | |||
236 | This attribute has no effect on system-wide suspend/resume and | ||
237 | hibernation. | ||
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt new file mode 100644 index 000000000000..4f27785ca0c8 --- /dev/null +++ b/Documentation/acpi/enumeration.txt | |||
@@ -0,0 +1,227 @@ | |||
1 | ACPI based device enumeration | ||
2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
3 | ACPI 5 introduced a set of new resources (UartTSerialBus, I2cSerialBus, | ||
4 | SpiSerialBus, GpioIo and GpioInt) which can be used in enumerating slave | ||
5 | devices behind serial bus controllers. | ||
6 | |||
7 | In addition we are starting to see peripherals integrated in the | ||
8 | SoC/Chipset to appear only in ACPI namespace. These are typically devices | ||
9 | that are accessed through memory-mapped registers. | ||
10 | |||
11 | In order to support this and re-use the existing drivers as much as | ||
12 | possible we decided to do following: | ||
13 | |||
14 | o Devices that have no bus connector resource are represented as | ||
15 | platform devices. | ||
16 | |||
17 | o Devices behind real busses where there is a connector resource | ||
18 | are represented as struct spi_device or struct i2c_device | ||
19 | (standard UARTs are not busses so there is no struct uart_device). | ||
20 | |||
21 | As both ACPI and Device Tree represent a tree of devices (and their | ||
22 | resources) this implementation follows the Device Tree way as much as | ||
23 | possible. | ||
24 | |||
25 | The ACPI implementation enumerates devices behind busses (platform, SPI and | ||
26 | I2C), creates the physical devices and binds them to their ACPI handle in | ||
27 | the ACPI namespace. | ||
28 | |||
29 | This means that when ACPI_HANDLE(dev) returns non-NULL the device was | ||
30 | enumerated from ACPI namespace. This handle can be used to extract other | ||
31 | device-specific configuration. There is an example of this below. | ||
32 | |||
33 | Platform bus support | ||
34 | ~~~~~~~~~~~~~~~~~~~~ | ||
35 | Since we are using platform devices to represent devices that are not | ||
36 | connected to any physical bus we only need to implement a platform driver | ||
37 | for the device and add supported ACPI IDs. If this same IP-block is used on | ||
38 | some other non-ACPI platform, the driver might work out of the box or needs | ||
39 | some minor changes. | ||
40 | |||
41 | Adding ACPI support for an existing driver should be pretty | ||
42 | straightforward. Here is the simplest example: | ||
43 | |||
44 | #ifdef CONFIG_ACPI | ||
45 | static struct acpi_device_id mydrv_acpi_match[] = { | ||
46 | /* ACPI IDs here */ | ||
47 | { } | ||
48 | }; | ||
49 | MODULE_DEVICE_TABLE(acpi, mydrv_acpi_match); | ||
50 | #endif | ||
51 | |||
52 | static struct platform_driver my_driver = { | ||
53 | ... | ||
54 | .driver = { | ||
55 | .acpi_match_table = ACPI_PTR(mydrv_acpi_match), | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | If the driver needs to perform more complex initialization like getting and | ||
60 | configuring GPIOs it can get its ACPI handle and extract this information | ||
61 | from ACPI tables. | ||
62 | |||
63 | Currently the kernel is not able to automatically determine from which ACPI | ||
64 | device it should make the corresponding platform device so we need to add | ||
65 | the ACPI device explicitly to acpi_platform_device_ids list defined in | ||
66 | drivers/acpi/scan.c. This limitation is only for the platform devices, SPI | ||
67 | and I2C devices are created automatically as described below. | ||
68 | |||
69 | SPI serial bus support | ||
70 | ~~~~~~~~~~~~~~~~~~~~~~ | ||
71 | Slave devices behind SPI bus have SpiSerialBus resource attached to them. | ||
72 | This is extracted automatically by the SPI core and the slave devices are | ||
73 | enumerated once spi_register_master() is called by the bus driver. | ||
74 | |||
75 | Here is what the ACPI namespace for a SPI slave might look like: | ||
76 | |||
77 | Device (EEP0) | ||
78 | { | ||
79 | Name (_ADR, 1) | ||
80 | Name (_CID, Package() { | ||
81 | "ATML0025", | ||
82 | "AT25", | ||
83 | }) | ||
84 | ... | ||
85 | Method (_CRS, 0, NotSerialized) | ||
86 | { | ||
87 | SPISerialBus(1, PolarityLow, FourWireMode, 8, | ||
88 | ControllerInitiated, 1000000, ClockPolarityLow, | ||
89 | ClockPhaseFirst, "\\_SB.PCI0.SPI1",) | ||
90 | } | ||
91 | ... | ||
92 | |||
93 | The SPI device drivers only need to add ACPI IDs in a similar way than with | ||
94 | the platform device drivers. Below is an example where we add ACPI support | ||
95 | to at25 SPI eeprom driver (this is meant for the above ACPI snippet): | ||
96 | |||
97 | #ifdef CONFIG_ACPI | ||
98 | static struct acpi_device_id at25_acpi_match[] = { | ||
99 | { "AT25", 0 }, | ||
100 | { }, | ||
101 | }; | ||
102 | MODULE_DEVICE_TABLE(acpi, at25_acpi_match); | ||
103 | #endif | ||
104 | |||
105 | static struct spi_driver at25_driver = { | ||
106 | .driver = { | ||
107 | ... | ||
108 | .acpi_match_table = ACPI_PTR(at25_acpi_match), | ||
109 | }, | ||
110 | }; | ||
111 | |||
112 | Note that this driver actually needs more information like page size of the | ||
113 | eeprom etc. but at the time writing this there is no standard way of | ||
114 | passing those. One idea is to return this in _DSM method like: | ||
115 | |||
116 | Device (EEP0) | ||
117 | { | ||
118 | ... | ||
119 | Method (_DSM, 4, NotSerialized) | ||
120 | { | ||
121 | Store (Package (6) | ||
122 | { | ||
123 | "byte-len", 1024, | ||
124 | "addr-mode", 2, | ||
125 | "page-size, 32 | ||
126 | }, Local0) | ||
127 | |||
128 | // Check UUIDs etc. | ||
129 | |||
130 | Return (Local0) | ||
131 | } | ||
132 | |||
133 | Then the at25 SPI driver can get this configation by calling _DSM on its | ||
134 | ACPI handle like: | ||
135 | |||
136 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
137 | struct acpi_object_list input; | ||
138 | acpi_status status; | ||
139 | |||
140 | /* Fill in the input buffer */ | ||
141 | |||
142 | status = acpi_evaluate_object(ACPI_HANDLE(&spi->dev), "_DSM", | ||
143 | &input, &output); | ||
144 | if (ACPI_FAILURE(status)) | ||
145 | /* Handle the error */ | ||
146 | |||
147 | /* Extract the data here */ | ||
148 | |||
149 | kfree(output.pointer); | ||
150 | |||
151 | I2C serial bus support | ||
152 | ~~~~~~~~~~~~~~~~~~~~~~ | ||
153 | The slaves behind I2C bus controller only need to add the ACPI IDs like | ||
154 | with the platform and SPI drivers. However the I2C bus controller driver | ||
155 | needs to call acpi_i2c_register_devices() after it has added the adapter. | ||
156 | |||
157 | An I2C bus (controller) driver does: | ||
158 | |||
159 | ... | ||
160 | ret = i2c_add_numbered_adapter(adapter); | ||
161 | if (ret) | ||
162 | /* handle error */ | ||
163 | |||
164 | of_i2c_register_devices(adapter); | ||
165 | /* Enumerate the slave devices behind this bus via ACPI */ | ||
166 | acpi_i2c_register_devices(adapter); | ||
167 | |||
168 | Below is an example of how to add ACPI support to the existing mpu3050 | ||
169 | input driver: | ||
170 | |||
171 | #ifdef CONFIG_ACPI | ||
172 | static struct acpi_device_id mpu3050_acpi_match[] = { | ||
173 | { "MPU3050", 0 }, | ||
174 | { }, | ||
175 | }; | ||
176 | MODULE_DEVICE_TABLE(acpi, mpu3050_acpi_match); | ||
177 | #endif | ||
178 | |||
179 | static struct i2c_driver mpu3050_i2c_driver = { | ||
180 | .driver = { | ||
181 | .name = "mpu3050", | ||
182 | .owner = THIS_MODULE, | ||
183 | .pm = &mpu3050_pm, | ||
184 | .of_match_table = mpu3050_of_match, | ||
185 | .acpi_match_table ACPI_PTR(mpu3050_acpi_match), | ||
186 | }, | ||
187 | .probe = mpu3050_probe, | ||
188 | .remove = __devexit_p(mpu3050_remove), | ||
189 | .id_table = mpu3050_ids, | ||
190 | }; | ||
191 | |||
192 | GPIO support | ||
193 | ~~~~~~~~~~~~ | ||
194 | ACPI 5 introduced two new resources to describe GPIO connections: GpioIo | ||
195 | and GpioInt. These resources are used be used to pass GPIO numbers used by | ||
196 | the device to the driver. For example: | ||
197 | |||
198 | Method (_CRS, 0, NotSerialized) | ||
199 | { | ||
200 | Name (SBUF, ResourceTemplate() | ||
201 | { | ||
202 | GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, | ||
203 | IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0", | ||
204 | 0x00, ResourceConsumer,,) | ||
205 | { | ||
206 | // Pin List | ||
207 | 0x0055 | ||
208 | } | ||
209 | ... | ||
210 | |||
211 | Return (SBUF) | ||
212 | } | ||
213 | } | ||
214 | |||
215 | These GPIO numbers are controller relative and path "\\_SB.PCI0.GPI0" | ||
216 | specifies the path to the controller. In order to use these GPIOs in Linux | ||
217 | we need to translate them to the Linux GPIO numbers. | ||
218 | |||
219 | The driver can do this by including <linux/acpi_gpio.h> and then calling | ||
220 | acpi_get_gpio(path, gpio). This will return the Linux GPIO number or | ||
221 | negative errno if there was no translation found. | ||
222 | |||
223 | Other GpioIo parameters must be converted first by the driver to be | ||
224 | suitable to the gpiolib before passing them. | ||
225 | |||
226 | In case of GpioInt resource an additional call to gpio_to_irq() must be | ||
227 | done before calling request_irq(). | ||
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index c07f7b4fb88d..71c4da413444 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt | |||
@@ -466,6 +466,10 @@ Note: | |||
466 | 5.3 swappiness | 466 | 5.3 swappiness |
467 | 467 | ||
468 | Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. | 468 | Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. |
469 | Please note that unlike the global swappiness, memcg knob set to 0 | ||
470 | really prevents from any swapping even if there is a swap storage | ||
471 | available. This might lead to memcg OOM killer if there are no file | ||
472 | pages to reclaim. | ||
469 | 473 | ||
470 | Following cgroups' swappiness can't be changed. | 474 | Following cgroups' swappiness can't be changed. |
471 | - root cgroup (uses /proc/sys/vm/swappiness). | 475 | - root cgroup (uses /proc/sys/vm/swappiness). |
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt new file mode 100644 index 000000000000..f3d44984d91c --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt | |||
@@ -0,0 +1,42 @@ | |||
1 | SPEAr cpufreq driver | ||
2 | ------------------- | ||
3 | |||
4 | SPEAr SoC cpufreq driver for CPU frequency scaling. | ||
5 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems | ||
6 | which share clock across all CPUs. | ||
7 | |||
8 | Required properties: | ||
9 | - cpufreq_tbl: Table of frequencies CPU could be transitioned into, in the | ||
10 | increasing order. | ||
11 | |||
12 | Optional properties: | ||
13 | - clock-latency: Specify the possible maximum transition latency for clock, in | ||
14 | unit of nanoseconds. | ||
15 | |||
16 | Both required and optional properties listed above must be defined under node | ||
17 | /cpus/cpu@0. | ||
18 | |||
19 | Examples: | ||
20 | -------- | ||
21 | cpus { | ||
22 | |||
23 | <...> | ||
24 | |||
25 | cpu@0 { | ||
26 | compatible = "arm,cortex-a9"; | ||
27 | reg = <0>; | ||
28 | |||
29 | <...> | ||
30 | |||
31 | cpufreq_tbl = < 166000 | ||
32 | 200000 | ||
33 | 250000 | ||
34 | 300000 | ||
35 | 400000 | ||
36 | 500000 | ||
37 | 600000 >; | ||
38 | }; | ||
39 | |||
40 | <...> | ||
41 | |||
42 | }; | ||
diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt index bc9549529014..c79bab025369 100644 --- a/Documentation/devicetree/bindings/net/mdio-gpio.txt +++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt | |||
@@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order: | |||
8 | 8 | ||
9 | MDC, MDIO. | 9 | MDC, MDIO. |
10 | 10 | ||
11 | Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases" | ||
12 | node. | ||
13 | |||
11 | Example: | 14 | Example: |
12 | 15 | ||
13 | mdio { | 16 | aliases { |
17 | mdio-gpio0 = <&mdio0>; | ||
18 | }; | ||
19 | |||
20 | mdio0: mdio { | ||
14 | compatible = "virtual,mdio-gpio"; | 21 | compatible = "virtual,mdio-gpio"; |
15 | #address-cells = <1>; | 22 | #address-cells = <1>; |
16 | #size-cells = <0>; | 23 | #size-cells = <0>; |
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index a1793d670cd0..3844d21d6ca3 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -33,7 +33,7 @@ Table of Contents | |||
33 | 2 Modifying System Parameters | 33 | 2 Modifying System Parameters |
34 | 34 | ||
35 | 3 Per-Process Parameters | 35 | 3 Per-Process Parameters |
36 | 3.1 /proc/<pid>/oom_score_adj - Adjust the oom-killer | 36 | 3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer |
37 | score | 37 | score |
38 | 3.2 /proc/<pid>/oom_score - Display current oom-killer score | 38 | 3.2 /proc/<pid>/oom_score - Display current oom-killer score |
39 | 3.3 /proc/<pid>/io - Display the IO accounting fields | 39 | 3.3 /proc/<pid>/io - Display the IO accounting fields |
@@ -1320,10 +1320,10 @@ of the kernel. | |||
1320 | CHAPTER 3: PER-PROCESS PARAMETERS | 1320 | CHAPTER 3: PER-PROCESS PARAMETERS |
1321 | ------------------------------------------------------------------------------ | 1321 | ------------------------------------------------------------------------------ |
1322 | 1322 | ||
1323 | 3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score | 1323 | 3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score |
1324 | -------------------------------------------------------------------------------- | 1324 | -------------------------------------------------------------------------------- |
1325 | 1325 | ||
1326 | This file can be used to adjust the badness heuristic used to select which | 1326 | These file can be used to adjust the badness heuristic used to select which |
1327 | process gets killed in out of memory conditions. | 1327 | process gets killed in out of memory conditions. |
1328 | 1328 | ||
1329 | The badness heuristic assigns a value to each candidate task ranging from 0 | 1329 | The badness heuristic assigns a value to each candidate task ranging from 0 |
@@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least | |||
1361 | equivalent to discounting 50% of the task's allowed memory from being considered | 1361 | equivalent to discounting 50% of the task's allowed memory from being considered |
1362 | as scoring against the task. | 1362 | as scoring against the task. |
1363 | 1363 | ||
1364 | For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also | ||
1365 | be used to tune the badness score. Its acceptable values range from -16 | ||
1366 | (OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17 | ||
1367 | (OOM_DISABLE) to disable oom killing entirely for that task. Its value is | ||
1368 | scaled linearly with /proc/<pid>/oom_score_adj. | ||
1369 | |||
1364 | The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last | 1370 | The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last |
1365 | value set by a CAP_SYS_RESOURCE process. To reduce the value any lower | 1371 | value set by a CAP_SYS_RESOURCE process. To reduce the value any lower |
1366 | requires CAP_SYS_RESOURCE. | 1372 | requires CAP_SYS_RESOURCE. |
@@ -1375,7 +1381,9 @@ minimal amount of work. | |||
1375 | ------------------------------------------------------------- | 1381 | ------------------------------------------------------------- |
1376 | 1382 | ||
1377 | This file can be used to check the current score used by the oom-killer is for | 1383 | This file can be used to check the current score used by the oom-killer is for |
1378 | any given <pid>. | 1384 | any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which |
1385 | process should be killed in an out-of-memory situation. | ||
1386 | |||
1379 | 1387 | ||
1380 | 3.3 /proc/<pid>/io - Display the IO accounting fields | 1388 | 3.3 /proc/<pid>/io - Display the IO accounting fields |
1381 | ------------------------------------------------------- | 1389 | ------------------------------------------------------- |
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt index 4164f5c02e4b..f310edec8a77 100644 --- a/Documentation/networking/netdev-features.txt +++ b/Documentation/networking/netdev-features.txt | |||
@@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet. | |||
164 | This requests that the NIC receive all possible frames, including errored | 164 | This requests that the NIC receive all possible frames, including errored |
165 | frames (such as bad FCS, etc). This can be helpful when sniffing a link with | 165 | frames (such as bad FCS, etc). This can be helpful when sniffing a link with |
166 | bad packets on it. Some NICs may receive more packets if also put into normal | 166 | bad packets on it. Some NICs may receive more packets if also put into normal |
167 | PROMISC mdoe. | 167 | PROMISC mode. |
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt index 5b34b762d7d5..6d993510f091 100644 --- a/Documentation/networking/vxlan.txt +++ b/Documentation/networking/vxlan.txt | |||
@@ -32,7 +32,7 @@ no entry is in the forwarding table. | |||
32 | # ip link delete vxlan0 | 32 | # ip link delete vxlan0 |
33 | 33 | ||
34 | 3. Show vxlan info | 34 | 3. Show vxlan info |
35 | # ip -d show vxlan0 | 35 | # ip -d link show vxlan0 |
36 | 36 | ||
37 | It is possible to create, destroy and display the vxlan | 37 | It is possible to create, destroy and display the vxlan |
38 | forwarding table using the new bridge command. | 38 | forwarding table using the new bridge command. |
@@ -41,7 +41,7 @@ forwarding table using the new bridge command. | |||
41 | # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0 | 41 | # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0 |
42 | 42 | ||
43 | 2. Delete forwarding table entry | 43 | 2. Delete forwarding table entry |
44 | # bridge fdb delete 00:17:42:8a:b4:05 | 44 | # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0 |
45 | 45 | ||
46 | 3. Show forwarding table | 46 | 3. Show forwarding table |
47 | # bridge fdb show dev vxlan0 | 47 | # bridge fdb show dev vxlan0 |
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index 17e130a80347..79a2a58425ee 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt | |||
@@ -99,7 +99,7 @@ reading the aggregated value does not require any locking mechanism. | |||
99 | 99 | ||
100 | From kernel mode the use of this interface is the following: | 100 | From kernel mode the use of this interface is the following: |
101 | 101 | ||
102 | int dev_pm_qos_add_request(device, handle, value): | 102 | int dev_pm_qos_add_request(device, handle, type, value): |
103 | Will insert an element into the list for that identified device with the | 103 | Will insert an element into the list for that identified device with the |
104 | target value. Upon change to this list the new target is recomputed and any | 104 | target value. Upon change to this list the new target is recomputed and any |
105 | registered notifiers are called only if the target value is now different. | 105 | registered notifiers are called only if the target value is now different. |
diff --git a/MAINTAINERS b/MAINTAINERS index 59203e77ce9e..9386a63ea8f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -526,17 +526,17 @@ F: drivers/video/geode/ | |||
526 | F: arch/x86/include/asm/geode.h | 526 | F: arch/x86/include/asm/geode.h |
527 | 527 | ||
528 | AMD IOMMU (AMD-VI) | 528 | AMD IOMMU (AMD-VI) |
529 | M: Joerg Roedel <joerg.roedel@amd.com> | 529 | M: Joerg Roedel <joro@8bytes.org> |
530 | L: iommu@lists.linux-foundation.org | 530 | L: iommu@lists.linux-foundation.org |
531 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git | 531 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git |
532 | S: Supported | 532 | S: Maintained |
533 | F: drivers/iommu/amd_iommu*.[ch] | 533 | F: drivers/iommu/amd_iommu*.[ch] |
534 | F: include/linux/amd-iommu.h | 534 | F: include/linux/amd-iommu.h |
535 | 535 | ||
536 | AMD MICROCODE UPDATE SUPPORT | 536 | AMD MICROCODE UPDATE SUPPORT |
537 | M: Andreas Herrmann <andreas.herrmann3@amd.com> | 537 | M: Andreas Herrmann <herrmann.der.user@googlemail.com> |
538 | L: amd64-microcode@amd64.org | 538 | L: amd64-microcode@amd64.org |
539 | S: Supported | 539 | S: Maintained |
540 | F: arch/x86/kernel/microcode_amd.c | 540 | F: arch/x86/kernel/microcode_amd.c |
541 | 541 | ||
542 | AMS (Apple Motion Sensor) DRIVER | 542 | AMS (Apple Motion Sensor) DRIVER |
@@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git | |||
841 | F: arch/arm/mach-sa1100/jornada720.c | 841 | F: arch/arm/mach-sa1100/jornada720.c |
842 | F: arch/arm/mach-sa1100/include/mach/jornada720.h | 842 | F: arch/arm/mach-sa1100/include/mach/jornada720.h |
843 | 843 | ||
844 | ARM/IGEP MACHINE SUPPORT | ||
845 | M: Enric Balletbo i Serra <eballetbo@gmail.com> | ||
846 | M: Javier Martinez Canillas <javier@dowhile0.org> | ||
847 | L: linux-omap@vger.kernel.org | ||
848 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
849 | S: Maintained | ||
850 | F: arch/arm/mach-omap2/board-igep0020.c | ||
851 | |||
844 | ARM/INCOME PXA270 SUPPORT | 852 | ARM/INCOME PXA270 SUPPORT |
845 | M: Marek Vasut <marek.vasut@gmail.com> | 853 | M: Marek Vasut <marek.vasut@gmail.com> |
846 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 854 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -2708,10 +2716,10 @@ F: include/linux/edac.h | |||
2708 | 2716 | ||
2709 | EDAC-AMD64 | 2717 | EDAC-AMD64 |
2710 | M: Doug Thompson <dougthompson@xmission.com> | 2718 | M: Doug Thompson <dougthompson@xmission.com> |
2711 | M: Borislav Petkov <borislav.petkov@amd.com> | 2719 | M: Borislav Petkov <bp@alien8.de> |
2712 | L: linux-edac@vger.kernel.org | 2720 | L: linux-edac@vger.kernel.org |
2713 | W: bluesmoke.sourceforge.net | 2721 | W: bluesmoke.sourceforge.net |
2714 | S: Supported | 2722 | S: Maintained |
2715 | F: drivers/edac/amd64_edac* | 2723 | F: drivers/edac/amd64_edac* |
2716 | 2724 | ||
2717 | EDAC-E752X | 2725 | EDAC-E752X |
@@ -3598,6 +3606,49 @@ F: drivers/hid/hid-hyperv.c | |||
3598 | F: drivers/net/hyperv/ | 3606 | F: drivers/net/hyperv/ |
3599 | F: drivers/staging/hv/ | 3607 | F: drivers/staging/hv/ |
3600 | 3608 | ||
3609 | I2C OVER PARALLEL PORT | ||
3610 | M: Jean Delvare <khali@linux-fr.org> | ||
3611 | L: linux-i2c@vger.kernel.org | ||
3612 | S: Maintained | ||
3613 | F: Documentation/i2c/busses/i2c-parport | ||
3614 | F: Documentation/i2c/busses/i2c-parport-light | ||
3615 | F: drivers/i2c/busses/i2c-parport.c | ||
3616 | F: drivers/i2c/busses/i2c-parport-light.c | ||
3617 | |||
3618 | I2C/SMBUS CONTROLLER DRIVERS FOR PC | ||
3619 | M: Jean Delvare <khali@linux-fr.org> | ||
3620 | L: linux-i2c@vger.kernel.org | ||
3621 | S: Maintained | ||
3622 | F: Documentation/i2c/busses/i2c-ali1535 | ||
3623 | F: Documentation/i2c/busses/i2c-ali1563 | ||
3624 | F: Documentation/i2c/busses/i2c-ali15x3 | ||
3625 | F: Documentation/i2c/busses/i2c-amd756 | ||
3626 | F: Documentation/i2c/busses/i2c-amd8111 | ||
3627 | F: Documentation/i2c/busses/i2c-i801 | ||
3628 | F: Documentation/i2c/busses/i2c-nforce2 | ||
3629 | F: Documentation/i2c/busses/i2c-piix4 | ||
3630 | F: Documentation/i2c/busses/i2c-sis5595 | ||
3631 | F: Documentation/i2c/busses/i2c-sis630 | ||
3632 | F: Documentation/i2c/busses/i2c-sis96x | ||
3633 | F: Documentation/i2c/busses/i2c-via | ||
3634 | F: Documentation/i2c/busses/i2c-viapro | ||
3635 | F: drivers/i2c/busses/i2c-ali1535.c | ||
3636 | F: drivers/i2c/busses/i2c-ali1563.c | ||
3637 | F: drivers/i2c/busses/i2c-ali15x3.c | ||
3638 | F: drivers/i2c/busses/i2c-amd756.c | ||
3639 | F: drivers/i2c/busses/i2c-amd756-s4882.c | ||
3640 | F: drivers/i2c/busses/i2c-amd8111.c | ||
3641 | F: drivers/i2c/busses/i2c-i801.c | ||
3642 | F: drivers/i2c/busses/i2c-isch.c | ||
3643 | F: drivers/i2c/busses/i2c-nforce2.c | ||
3644 | F: drivers/i2c/busses/i2c-nforce2-s4985.c | ||
3645 | F: drivers/i2c/busses/i2c-piix4.c | ||
3646 | F: drivers/i2c/busses/i2c-sis5595.c | ||
3647 | F: drivers/i2c/busses/i2c-sis630.c | ||
3648 | F: drivers/i2c/busses/i2c-sis96x.c | ||
3649 | F: drivers/i2c/busses/i2c-via.c | ||
3650 | F: drivers/i2c/busses/i2c-viapro.c | ||
3651 | |||
3601 | I2C/SMBUS STUB DRIVER | 3652 | I2C/SMBUS STUB DRIVER |
3602 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 3653 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
3603 | L: linux-i2c@vger.kernel.org | 3654 | L: linux-i2c@vger.kernel.org |
@@ -3605,9 +3656,8 @@ S: Maintained | |||
3605 | F: drivers/i2c/busses/i2c-stub.c | 3656 | F: drivers/i2c/busses/i2c-stub.c |
3606 | 3657 | ||
3607 | I2C SUBSYSTEM | 3658 | I2C SUBSYSTEM |
3608 | M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org> | 3659 | M: Wolfram Sang <w.sang@pengutronix.de> |
3609 | M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org> | 3660 | M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org> |
3610 | M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de> | ||
3611 | L: linux-i2c@vger.kernel.org | 3661 | L: linux-i2c@vger.kernel.org |
3612 | W: http://i2c.wiki.kernel.org/ | 3662 | W: http://i2c.wiki.kernel.org/ |
3613 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ | 3663 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ |
@@ -3618,6 +3668,13 @@ F: drivers/i2c/ | |||
3618 | F: include/linux/i2c.h | 3668 | F: include/linux/i2c.h |
3619 | F: include/linux/i2c-*.h | 3669 | F: include/linux/i2c-*.h |
3620 | 3670 | ||
3671 | I2C-TAOS-EVM DRIVER | ||
3672 | M: Jean Delvare <khali@linux-fr.org> | ||
3673 | L: linux-i2c@vger.kernel.org | ||
3674 | S: Maintained | ||
3675 | F: Documentation/i2c/busses/i2c-taos-evm | ||
3676 | F: drivers/i2c/busses/i2c-taos-evm.c | ||
3677 | |||
3621 | I2C-TINY-USB DRIVER | 3678 | I2C-TINY-USB DRIVER |
3622 | M: Till Harbaum <till@harbaum.org> | 3679 | M: Till Harbaum <till@harbaum.org> |
3623 | L: linux-i2c@vger.kernel.org | 3680 | L: linux-i2c@vger.kernel.org |
@@ -3704,7 +3761,7 @@ S: Maintained | |||
3704 | F: drivers/platform/x86/ideapad-laptop.c | 3761 | F: drivers/platform/x86/ideapad-laptop.c |
3705 | 3762 | ||
3706 | IDE/ATAPI DRIVERS | 3763 | IDE/ATAPI DRIVERS |
3707 | M: Borislav Petkov <petkovbb@gmail.com> | 3764 | M: Borislav Petkov <bp@alien8.de> |
3708 | L: linux-ide@vger.kernel.org | 3765 | L: linux-ide@vger.kernel.org |
3709 | S: Maintained | 3766 | S: Maintained |
3710 | F: Documentation/cdrom/ide-cd | 3767 | F: Documentation/cdrom/ide-cd |
@@ -4231,8 +4288,8 @@ F: include/linux/lockd/ | |||
4231 | F: include/linux/sunrpc/ | 4288 | F: include/linux/sunrpc/ |
4232 | 4289 | ||
4233 | KERNEL VIRTUAL MACHINE (KVM) | 4290 | KERNEL VIRTUAL MACHINE (KVM) |
4234 | M: Avi Kivity <avi@redhat.com> | ||
4235 | M: Marcelo Tosatti <mtosatti@redhat.com> | 4291 | M: Marcelo Tosatti <mtosatti@redhat.com> |
4292 | M: Gleb Natapov <gleb@redhat.com> | ||
4236 | L: kvm@vger.kernel.org | 4293 | L: kvm@vger.kernel.org |
4237 | W: http://kvm.qumranet.com | 4294 | W: http://kvm.qumranet.com |
4238 | S: Supported | 4295 | S: Supported |
@@ -5364,7 +5421,7 @@ S: Maintained | |||
5364 | F: sound/drivers/opl4/ | 5421 | F: sound/drivers/opl4/ |
5365 | 5422 | ||
5366 | OPROFILE | 5423 | OPROFILE |
5367 | M: Robert Richter <robert.richter@amd.com> | 5424 | M: Robert Richter <rric@kernel.org> |
5368 | L: oprofile-list@lists.sf.net | 5425 | L: oprofile-list@lists.sf.net |
5369 | S: Maintained | 5426 | S: Maintained |
5370 | F: arch/*/include/asm/oprofile*.h | 5427 | F: arch/*/include/asm/oprofile*.h |
@@ -7210,6 +7267,14 @@ L: linux-xtensa@linux-xtensa.org | |||
7210 | S: Maintained | 7267 | S: Maintained |
7211 | F: arch/xtensa/ | 7268 | F: arch/xtensa/ |
7212 | 7269 | ||
7270 | THERMAL | ||
7271 | M: Zhang Rui <rui.zhang@intel.com> | ||
7272 | L: linux-pm@vger.kernel.org | ||
7273 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git | ||
7274 | S: Supported | ||
7275 | F: drivers/thermal/ | ||
7276 | F: include/linux/thermal.h | ||
7277 | |||
7213 | THINKPAD ACPI EXTRAS DRIVER | 7278 | THINKPAD ACPI EXTRAS DRIVER |
7214 | M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br> | 7279 | M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br> |
7215 | L: ibm-acpi-devel@lists.sourceforge.net | 7280 | L: ibm-acpi-devel@lists.sourceforge.net |
@@ -7887,13 +7952,6 @@ M: Roger Luethi <rl@hellgate.ch> | |||
7887 | S: Maintained | 7952 | S: Maintained |
7888 | F: drivers/net/ethernet/via/via-rhine.c | 7953 | F: drivers/net/ethernet/via/via-rhine.c |
7889 | 7954 | ||
7890 | VIAPRO SMBUS DRIVER | ||
7891 | M: Jean Delvare <khali@linux-fr.org> | ||
7892 | L: linux-i2c@vger.kernel.org | ||
7893 | S: Maintained | ||
7894 | F: Documentation/i2c/busses/i2c-viapro | ||
7895 | F: drivers/i2c/busses/i2c-viapro.c | ||
7896 | |||
7897 | VIA SD/MMC CARD CONTROLLER DRIVER | 7955 | VIA SD/MMC CARD CONTROLLER DRIVER |
7898 | M: Bruce Chang <brucechang@via.com.tw> | 7956 | M: Bruce Chang <brucechang@via.com.tw> |
7899 | M: Harald Welte <HaraldWelte@viatech.com> | 7957 | M: Harald Welte <HaraldWelte@viatech.com> |
@@ -8148,7 +8206,7 @@ F: drivers/platform/x86 | |||
8148 | 8206 | ||
8149 | X86 MCE INFRASTRUCTURE | 8207 | X86 MCE INFRASTRUCTURE |
8150 | M: Tony Luck <tony.luck@intel.com> | 8208 | M: Tony Luck <tony.luck@intel.com> |
8151 | M: Borislav Petkov <bp@amd64.org> | 8209 | M: Borislav Petkov <bp@alien8.de> |
8152 | L: linux-edac@vger.kernel.org | 8210 | L: linux-edac@vger.kernel.org |
8153 | S: Maintained | 8211 | S: Maintained |
8154 | F: arch/x86/kernel/cpu/mcheck/* | 8212 | F: arch/x86/kernel/cpu/mcheck/* |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 7 | 2 | PATCHLEVEL = 7 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Terrified Chipmunk | 5 | NAME = Terrified Chipmunk |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 1e6956a90608..14db93e4c8a8 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -445,7 +445,7 @@ struct procfs_args { | |||
445 | * unhappy with OSF UFS. [CHECKME] | 445 | * unhappy with OSF UFS. [CHECKME] |
446 | */ | 446 | */ |
447 | static int | 447 | static int |
448 | osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) | 448 | osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags) |
449 | { | 449 | { |
450 | int retval; | 450 | int retval; |
451 | struct cdfs_args tmp; | 451 | struct cdfs_args tmp; |
@@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) | |||
465 | } | 465 | } |
466 | 466 | ||
467 | static int | 467 | static int |
468 | osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) | 468 | osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags) |
469 | { | 469 | { |
470 | int retval; | 470 | int retval; |
471 | struct cdfs_args tmp; | 471 | struct cdfs_args tmp; |
@@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) | |||
485 | } | 485 | } |
486 | 486 | ||
487 | static int | 487 | static int |
488 | osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) | 488 | osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags) |
489 | { | 489 | { |
490 | struct procfs_args tmp; | 490 | struct procfs_args tmp; |
491 | 491 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ade7e924bef5..159e99737e31 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -904,6 +904,7 @@ config ARCH_NOMADIK | |||
904 | 904 | ||
905 | config PLAT_SPEAR | 905 | config PLAT_SPEAR |
906 | bool "ST SPEAr" | 906 | bool "ST SPEAr" |
907 | select ARCH_HAS_CPUFREQ | ||
907 | select ARCH_REQUIRE_GPIOLIB | 908 | select ARCH_REQUIRE_GPIOLIB |
908 | select ARM_AMBA | 909 | select ARM_AMBA |
909 | select CLKDEV_LOOKUP | 910 | select CLKDEV_LOOKUP |
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index f2aa09eb658e..9137df539b61 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile | |||
@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y) | |||
33 | 33 | ||
34 | $(obj)/xipImage: vmlinux FORCE | 34 | $(obj)/xipImage: vmlinux FORCE |
35 | $(call if_changed,objcopy) | 35 | $(call if_changed,objcopy) |
36 | $(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' | 36 | @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' |
37 | 37 | ||
38 | $(obj)/Image $(obj)/zImage: FORCE | 38 | $(obj)/Image $(obj)/zImage: FORCE |
39 | @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' | 39 | @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' |
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE | |||
48 | 48 | ||
49 | $(obj)/Image: vmlinux FORCE | 49 | $(obj)/Image: vmlinux FORCE |
50 | $(call if_changed,objcopy) | 50 | $(call if_changed,objcopy) |
51 | $(kecho) ' Kernel: $@ is ready' | 51 | @$(kecho) ' Kernel: $@ is ready' |
52 | 52 | ||
53 | $(obj)/compressed/vmlinux: $(obj)/Image FORCE | 53 | $(obj)/compressed/vmlinux: $(obj)/Image FORCE |
54 | $(Q)$(MAKE) $(build)=$(obj)/compressed $@ | 54 | $(Q)$(MAKE) $(build)=$(obj)/compressed $@ |
55 | 55 | ||
56 | $(obj)/zImage: $(obj)/compressed/vmlinux FORCE | 56 | $(obj)/zImage: $(obj)/compressed/vmlinux FORCE |
57 | $(call if_changed,objcopy) | 57 | $(call if_changed,objcopy) |
58 | $(kecho) ' Kernel: $@ is ready' | 58 | @$(kecho) ' Kernel: $@ is ready' |
59 | 59 | ||
60 | endif | 60 | endif |
61 | 61 | ||
@@ -90,7 +90,7 @@ fi | |||
90 | $(obj)/uImage: $(obj)/zImage FORCE | 90 | $(obj)/uImage: $(obj)/zImage FORCE |
91 | @$(check_for_multiple_loadaddr) | 91 | @$(check_for_multiple_loadaddr) |
92 | $(call if_changed,uimage) | 92 | $(call if_changed,uimage) |
93 | $(kecho) ' Image $@ is ready' | 93 | @$(kecho) ' Image $@ is ready' |
94 | 94 | ||
95 | $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE | 95 | $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE |
96 | $(Q)$(MAKE) $(build)=$(obj)/bootp $@ | 96 | $(Q)$(MAKE) $(build)=$(obj)/bootp $@ |
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE | |||
98 | 98 | ||
99 | $(obj)/bootpImage: $(obj)/bootp/bootp FORCE | 99 | $(obj)/bootpImage: $(obj)/bootp/bootp FORCE |
100 | $(call if_changed,objcopy) | 100 | $(call if_changed,objcopy) |
101 | $(kecho) ' Kernel: $@ is ready' | 101 | @$(kecho) ' Kernel: $@ is ready' |
102 | 102 | ||
103 | PHONY += initrd FORCE | 103 | PHONY += initrd FORCE |
104 | initrd: | 104 | initrd: |
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index b1497c7d7d68..df7f2270fc91 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi | |||
@@ -73,8 +73,8 @@ | |||
73 | 73 | ||
74 | pinmux: pinmux { | 74 | pinmux: pinmux { |
75 | compatible = "nvidia,tegra30-pinmux"; | 75 | compatible = "nvidia,tegra30-pinmux"; |
76 | reg = <0x70000868 0xd0 /* Pad control registers */ | 76 | reg = <0x70000868 0xd4 /* Pad control registers */ |
77 | 0x70003000 0x3e0>; /* Mux registers */ | 77 | 0x70003000 0x3e4>; /* Mux registers */ |
78 | }; | 78 | }; |
79 | 79 | ||
80 | serial@70006000 { | 80 | serial@70006000 { |
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 1e122bcd7845..3cee0e6ea7c3 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c | |||
@@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) | |||
68 | 68 | ||
69 | /* Enable overcurrent notification */ | 69 | /* Enable overcurrent notification */ |
70 | for (i = 0; i < data->ports; i++) { | 70 | for (i = 0; i < data->ports; i++) { |
71 | if (data->overcurrent_pin[i]) | 71 | if (gpio_is_valid(data->overcurrent_pin[i])) |
72 | at91_set_gpio_input(data->overcurrent_pin[i], 1); | 72 | at91_set_gpio_input(data->overcurrent_pin[i], 1); |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index aa1e58729885..414bd855fb0c 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c | |||
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) | |||
72 | 72 | ||
73 | /* Enable overcurrent notification */ | 73 | /* Enable overcurrent notification */ |
74 | for (i = 0; i < data->ports; i++) { | 74 | for (i = 0; i < data->ports; i++) { |
75 | if (data->overcurrent_pin[i]) | 75 | if (gpio_is_valid(data->overcurrent_pin[i])) |
76 | at91_set_gpio_input(data->overcurrent_pin[i], 1); | 76 | at91_set_gpio_input(data->overcurrent_pin[i], 1); |
77 | } | 77 | } |
78 | 78 | ||
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index b9487696b7be..cd604aad8e96 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c | |||
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) | |||
72 | 72 | ||
73 | /* Enable overcurrent notification */ | 73 | /* Enable overcurrent notification */ |
74 | for (i = 0; i < data->ports; i++) { | 74 | for (i = 0; i < data->ports; i++) { |
75 | if (data->overcurrent_pin[i]) | 75 | if (gpio_is_valid(data->overcurrent_pin[i])) |
76 | at91_set_gpio_input(data->overcurrent_pin[i], 1); | 76 | at91_set_gpio_input(data->overcurrent_pin[i], 1); |
77 | } | 77 | } |
78 | 78 | ||
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index cb85da2eccea..9c61e59a2104 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c | |||
@@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) | |||
78 | 78 | ||
79 | /* Enable overcurrent notification */ | 79 | /* Enable overcurrent notification */ |
80 | for (i = 0; i < data->ports; i++) { | 80 | for (i = 0; i < data->ports; i++) { |
81 | if (data->overcurrent_pin[i]) | 81 | if (gpio_is_valid(data->overcurrent_pin[i])) |
82 | at91_set_gpio_input(data->overcurrent_pin[i], 1); | 82 | at91_set_gpio_input(data->overcurrent_pin[i], 1); |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index b1596072dcc2..fcd233cb33d2 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = { | |||
1841 | .flags = IORESOURCE_MEM, | 1841 | .flags = IORESOURCE_MEM, |
1842 | }, | 1842 | }, |
1843 | [1] = { | 1843 | [1] = { |
1844 | .start = AT91SAM9G45_ID_AESTDESSHA, | 1844 | .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1845 | .end = AT91SAM9G45_ID_AESTDESSHA, | 1845 | .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1846 | .flags = IORESOURCE_IRQ, | 1846 | .flags = IORESOURCE_IRQ, |
1847 | }, | 1847 | }, |
1848 | }; | 1848 | }; |
@@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = { | |||
1874 | .flags = IORESOURCE_MEM, | 1874 | .flags = IORESOURCE_MEM, |
1875 | }, | 1875 | }, |
1876 | [1] = { | 1876 | [1] = { |
1877 | .start = AT91SAM9G45_ID_AESTDESSHA, | 1877 | .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1878 | .end = AT91SAM9G45_ID_AESTDESSHA, | 1878 | .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1879 | .flags = IORESOURCE_IRQ, | 1879 | .flags = IORESOURCE_IRQ, |
1880 | }, | 1880 | }, |
1881 | }; | 1881 | }; |
@@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = { | |||
1910 | .flags = IORESOURCE_MEM, | 1910 | .flags = IORESOURCE_MEM, |
1911 | }, | 1911 | }, |
1912 | [1] = { | 1912 | [1] = { |
1913 | .start = AT91SAM9G45_ID_AESTDESSHA, | 1913 | .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1914 | .end = AT91SAM9G45_ID_AESTDESSHA, | 1914 | .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, |
1915 | .flags = IORESOURCE_IRQ, | 1915 | .flags = IORESOURCE_IRQ, |
1916 | }, | 1916 | }, |
1917 | }; | 1917 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index cd0c8b1e1ecf..14e9947bad6e 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, | |||
713 | break; | 713 | break; |
714 | case VPBE_ENC_CUSTOM_TIMINGS: | 714 | case VPBE_ENC_CUSTOM_TIMINGS: |
715 | if (pclock <= 27000000) { | 715 | if (pclock <= 27000000) { |
716 | v |= DM644X_VPSS_MUXSEL_PLL2_MODE | | 716 | v |= DM644X_VPSS_DACCLKEN; |
717 | DM644X_VPSS_DACCLKEN; | ||
718 | writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); | 717 | writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); |
719 | } else { | 718 | } else { |
720 | /* | 719 | /* |
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index 21d568b3b149..87e07d6fc615 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c | |||
@@ -275,6 +275,9 @@ static int __init exynos_dma_init(void) | |||
275 | exynos_pdma1_pdata.nr_valid_peri = | 275 | exynos_pdma1_pdata.nr_valid_peri = |
276 | ARRAY_SIZE(exynos4210_pdma1_peri); | 276 | ARRAY_SIZE(exynos4210_pdma1_peri); |
277 | exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri; | 277 | exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri; |
278 | |||
279 | if (samsung_rev() == EXYNOS4210_REV_0) | ||
280 | exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1; | ||
278 | } else if (soc_is_exynos4212() || soc_is_exynos4412()) { | 281 | } else if (soc_is_exynos4212() || soc_is_exynos4412()) { |
279 | exynos_pdma0_pdata.nr_valid_peri = | 282 | exynos_pdma0_pdata.nr_valid_peri = |
280 | ARRAY_SIZE(exynos4212_pdma0_peri); | 283 | ARRAY_SIZE(exynos4212_pdma0_peri); |
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h index 8480849affb9..ed4da4544cd2 100644 --- a/arch/arm/mach-exynos/include/mach/map.h +++ b/arch/arm/mach-exynos/include/mach/map.h | |||
@@ -90,6 +90,7 @@ | |||
90 | 90 | ||
91 | #define EXYNOS4_PA_MDMA0 0x10810000 | 91 | #define EXYNOS4_PA_MDMA0 0x10810000 |
92 | #define EXYNOS4_PA_MDMA1 0x12850000 | 92 | #define EXYNOS4_PA_MDMA1 0x12850000 |
93 | #define EXYNOS4_PA_S_MDMA1 0x12840000 | ||
93 | #define EXYNOS4_PA_PDMA0 0x12680000 | 94 | #define EXYNOS4_PA_PDMA0 0x12680000 |
94 | #define EXYNOS4_PA_PDMA1 0x12690000 | 95 | #define EXYNOS4_PA_PDMA1 0x12690000 |
95 | #define EXYNOS5_PA_MDMA0 0x10800000 | 96 | #define EXYNOS5_PA_MDMA0 0x10800000 |
diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c index 82c27230d4a9..86e37cd9376c 100644 --- a/arch/arm/mach-highbank/system.c +++ b/arch/arm/mach-highbank/system.c | |||
@@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd) | |||
28 | hignbank_set_pwr_soft_reset(); | 28 | hignbank_set_pwr_soft_reset(); |
29 | 29 | ||
30 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); | 30 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); |
31 | cpu_do_idle(); | 31 | while (1) |
32 | cpu_do_idle(); | ||
32 | } | 33 | } |
33 | 34 | ||
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 3c1b8ff9a0a6..cc49c7ae186e 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c | |||
@@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, | |||
112 | 112 | ||
113 | clk = clk_register(dev, &gate->hw); | 113 | clk = clk_register(dev, &gate->hw); |
114 | if (IS_ERR(clk)) | 114 | if (IS_ERR(clk)) |
115 | kfree(clk); | 115 | kfree(gate); |
116 | 116 | ||
117 | return clk; | 117 | return clk; |
118 | } | 118 | } |
diff --git a/arch/arm/mach-imx/ehci-imx25.c b/arch/arm/mach-imx/ehci-imx25.c index 412c583a24b0..576af7446952 100644 --- a/arch/arm/mach-imx/ehci-imx25.c +++ b/arch/arm/mach-imx/ehci-imx25.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define MX25_H1_SIC_SHIFT 21 | 30 | #define MX25_H1_SIC_SHIFT 21 |
31 | #define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT) | 31 | #define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT) |
32 | #define MX25_H1_PP_BIT (1 << 18) | 32 | #define MX25_H1_PP_BIT (1 << 18) |
33 | #define MX25_H1_PM_BIT (1 << 8) | 33 | #define MX25_H1_PM_BIT (1 << 16) |
34 | #define MX25_H1_IPPUE_UP_BIT (1 << 7) | 34 | #define MX25_H1_IPPUE_UP_BIT (1 << 7) |
35 | #define MX25_H1_IPPUE_DOWN_BIT (1 << 6) | 35 | #define MX25_H1_IPPUE_DOWN_BIT (1 << 6) |
36 | #define MX25_H1_TLL_BIT (1 << 5) | 36 | #define MX25_H1_TLL_BIT (1 << 5) |
diff --git a/arch/arm/mach-imx/ehci-imx35.c b/arch/arm/mach-imx/ehci-imx35.c index 779e16eb65cb..293397852e4e 100644 --- a/arch/arm/mach-imx/ehci-imx35.c +++ b/arch/arm/mach-imx/ehci-imx35.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define MX35_H1_SIC_SHIFT 21 | 30 | #define MX35_H1_SIC_SHIFT 21 |
31 | #define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT) | 31 | #define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT) |
32 | #define MX35_H1_PP_BIT (1 << 18) | 32 | #define MX35_H1_PP_BIT (1 << 18) |
33 | #define MX35_H1_PM_BIT (1 << 8) | 33 | #define MX35_H1_PM_BIT (1 << 16) |
34 | #define MX35_H1_IPPUE_UP_BIT (1 << 7) | 34 | #define MX35_H1_IPPUE_UP_BIT (1 << 7) |
35 | #define MX35_H1_IPPUE_DOWN_BIT (1 << 6) | 35 | #define MX35_H1_IPPUE_DOWN_BIT (1 << 6) |
36 | #define MX35_H1_TLL_BIT (1 << 5) | 36 | #define MX35_H1_TLL_BIT (1 << 5) |
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 48d5e41dfbfa..378590694447 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c | |||
@@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void) | |||
580 | } else | 580 | } else |
581 | return; | 581 | return; |
582 | 582 | ||
583 | /* Make sure that the GPIO pins are muxed correctly */ | ||
584 | omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT); | ||
585 | omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT); | ||
586 | omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT); | ||
587 | |||
583 | err = gpio_request_array(igep_wlan_bt_gpios, | 588 | err = gpio_request_array(igep_wlan_bt_gpios, |
584 | ARRAY_SIZE(igep_wlan_bt_gpios)); | 589 | ARRAY_SIZE(igep_wlan_bt_gpios)); |
585 | if (err) { | 590 | if (err) { |
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c index b56d06b48782..95192a062d5d 100644 --- a/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/arch/arm/mach-omap2/clockdomains44xx_data.c | |||
@@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = { | |||
359 | .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS, | 359 | .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS, |
360 | .wkdep_srcs = iss_wkup_sleep_deps, | 360 | .wkdep_srcs = iss_wkup_sleep_deps, |
361 | .sleepdep_srcs = iss_wkup_sleep_deps, | 361 | .sleepdep_srcs = iss_wkup_sleep_deps, |
362 | .flags = CLKDM_CAN_HWSUP_SWSUP, | 362 | .flags = CLKDM_CAN_SWSUP, |
363 | }; | 363 | }; |
364 | 364 | ||
365 | static struct clockdomain l3_dss_44xx_clkdm = { | 365 | static struct clockdomain l3_dss_44xx_clkdm = { |
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c index 48daac2581b4..84551f205e46 100644 --- a/arch/arm/mach-omap2/common-board-devices.c +++ b/arch/arm/mach-omap2/common-board-devices.c | |||
@@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, | |||
64 | struct spi_board_info *spi_bi = &ads7846_spi_board_info; | 64 | struct spi_board_info *spi_bi = &ads7846_spi_board_info; |
65 | int err; | 65 | int err; |
66 | 66 | ||
67 | err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); | 67 | /* |
68 | if (err) { | 68 | * If a board defines get_pendown_state() function, request the pendown |
69 | pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); | 69 | * GPIO and set the GPIO debounce time. |
70 | return; | 70 | * If a board does not define the get_pendown_state() function, then |
71 | } | 71 | * the ads7846 driver will setup the pendown GPIO itself. |
72 | */ | ||
73 | if (board_pdata && board_pdata->get_pendown_state) { | ||
74 | err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); | ||
75 | if (err) { | ||
76 | pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); | ||
77 | return; | ||
78 | } | ||
72 | 79 | ||
73 | if (gpio_debounce) | 80 | if (gpio_debounce) |
74 | gpio_set_debounce(gpio_pendown, gpio_debounce); | 81 | gpio_set_debounce(gpio_pendown, gpio_debounce); |
82 | |||
83 | gpio_export(gpio_pendown, 0); | ||
84 | } | ||
75 | 85 | ||
76 | spi_bi->bus_num = bus_num; | 86 | spi_bi->bus_num = bus_num; |
77 | spi_bi->irq = gpio_to_irq(gpio_pendown); | 87 | spi_bi->irq = gpio_to_irq(gpio_pendown); |
78 | 88 | ||
89 | ads7846_config.gpio_pendown = gpio_pendown; | ||
90 | |||
79 | if (board_pdata) { | 91 | if (board_pdata) { |
80 | board_pdata->gpio_pendown = gpio_pendown; | 92 | board_pdata->gpio_pendown = gpio_pendown; |
93 | board_pdata->gpio_pendown_debounce = gpio_debounce; | ||
81 | spi_bi->platform_data = board_pdata; | 94 | spi_bi->platform_data = board_pdata; |
82 | if (board_pdata->get_pendown_state) | ||
83 | gpio_export(gpio_pendown, 0); | ||
84 | } else { | ||
85 | ads7846_config.gpio_pendown = gpio_pendown; | ||
86 | } | 95 | } |
87 | 96 | ||
88 | if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state)) | ||
89 | gpio_free(gpio_pendown); | ||
90 | |||
91 | spi_register_board_info(&ads7846_spi_board_info, 1); | 97 | spi_register_board_info(&ads7846_spi_board_info, 1); |
92 | } | 98 | } |
93 | #else | 99 | #else |
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index cba60e05e32e..c72b5a727720 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/pinctrl/machine.h> | 20 | #include <linux/pinctrl/machine.h> |
21 | #include <linux/platform_data/omap4-keypad.h> | 21 | #include <linux/platform_data/omap4-keypad.h> |
22 | #include <linux/platform_data/omap_ocp2scp.h> | ||
22 | 23 | ||
23 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
@@ -613,6 +614,83 @@ static void omap_init_vout(void) | |||
613 | static inline void omap_init_vout(void) {} | 614 | static inline void omap_init_vout(void) {} |
614 | #endif | 615 | #endif |
615 | 616 | ||
617 | #if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE) | ||
618 | static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev) | ||
619 | { | ||
620 | int cnt = 0; | ||
621 | |||
622 | while (ocp2scp_dev->drv_name != NULL) { | ||
623 | cnt++; | ||
624 | ocp2scp_dev++; | ||
625 | } | ||
626 | |||
627 | return cnt; | ||
628 | } | ||
629 | |||
630 | static void omap_init_ocp2scp(void) | ||
631 | { | ||
632 | struct omap_hwmod *oh; | ||
633 | struct platform_device *pdev; | ||
634 | int bus_id = -1, dev_cnt = 0, i; | ||
635 | struct omap_ocp2scp_dev *ocp2scp_dev; | ||
636 | const char *oh_name, *name; | ||
637 | struct omap_ocp2scp_platform_data *pdata; | ||
638 | |||
639 | if (!cpu_is_omap44xx()) | ||
640 | return; | ||
641 | |||
642 | oh_name = "ocp2scp_usb_phy"; | ||
643 | name = "omap-ocp2scp"; | ||
644 | |||
645 | oh = omap_hwmod_lookup(oh_name); | ||
646 | if (!oh) { | ||
647 | pr_err("%s: could not find omap_hwmod for %s\n", __func__, | ||
648 | oh_name); | ||
649 | return; | ||
650 | } | ||
651 | |||
652 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | ||
653 | if (!pdata) { | ||
654 | pr_err("%s: No memory for ocp2scp pdata\n", __func__); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | ocp2scp_dev = oh->dev_attr; | ||
659 | dev_cnt = count_ocp2scp_devices(ocp2scp_dev); | ||
660 | |||
661 | if (!dev_cnt) { | ||
662 | pr_err("%s: No devices connected to ocp2scp\n", __func__); | ||
663 | kfree(pdata); | ||
664 | return; | ||
665 | } | ||
666 | |||
667 | pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *) | ||
668 | * dev_cnt, GFP_KERNEL); | ||
669 | if (!pdata->devices) { | ||
670 | pr_err("%s: No memory for ocp2scp pdata devices\n", __func__); | ||
671 | kfree(pdata); | ||
672 | return; | ||
673 | } | ||
674 | |||
675 | for (i = 0; i < dev_cnt; i++, ocp2scp_dev++) | ||
676 | pdata->devices[i] = ocp2scp_dev; | ||
677 | |||
678 | pdata->dev_cnt = dev_cnt; | ||
679 | |||
680 | pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL, | ||
681 | 0, false); | ||
682 | if (IS_ERR(pdev)) { | ||
683 | pr_err("Could not build omap_device for %s %s\n", | ||
684 | name, oh_name); | ||
685 | kfree(pdata->devices); | ||
686 | kfree(pdata); | ||
687 | return; | ||
688 | } | ||
689 | } | ||
690 | #else | ||
691 | static inline void omap_init_ocp2scp(void) { } | ||
692 | #endif | ||
693 | |||
616 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
617 | 695 | ||
618 | static int __init omap2_init_devices(void) | 696 | static int __init omap2_init_devices(void) |
@@ -640,6 +718,7 @@ static int __init omap2_init_devices(void) | |||
640 | omap_init_sham(); | 718 | omap_init_sham(); |
641 | omap_init_aes(); | 719 | omap_init_aes(); |
642 | omap_init_vout(); | 720 | omap_init_vout(); |
721 | omap_init_ocp2scp(); | ||
643 | 722 | ||
644 | return 0; | 723 | return 0; |
645 | } | 724 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index b969ab1d258b..87cc6d058de2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -422,6 +422,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) | |||
422 | } | 422 | } |
423 | 423 | ||
424 | /** | 424 | /** |
425 | * _wait_softreset_complete - wait for an OCP softreset to complete | ||
426 | * @oh: struct omap_hwmod * to wait on | ||
427 | * | ||
428 | * Wait until the IP block represented by @oh reports that its OCP | ||
429 | * softreset is complete. This can be triggered by software (see | ||
430 | * _ocp_softreset()) or by hardware upon returning from off-mode (one | ||
431 | * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT | ||
432 | * microseconds. Returns the number of microseconds waited. | ||
433 | */ | ||
434 | static int _wait_softreset_complete(struct omap_hwmod *oh) | ||
435 | { | ||
436 | struct omap_hwmod_class_sysconfig *sysc; | ||
437 | u32 softrst_mask; | ||
438 | int c = 0; | ||
439 | |||
440 | sysc = oh->class->sysc; | ||
441 | |||
442 | if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS) | ||
443 | omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs) | ||
444 | & SYSS_RESETDONE_MASK), | ||
445 | MAX_MODULE_SOFTRESET_WAIT, c); | ||
446 | else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { | ||
447 | softrst_mask = (0x1 << sysc->sysc_fields->srst_shift); | ||
448 | omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs) | ||
449 | & softrst_mask), | ||
450 | MAX_MODULE_SOFTRESET_WAIT, c); | ||
451 | } | ||
452 | |||
453 | return c; | ||
454 | } | ||
455 | |||
456 | /** | ||
425 | * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v | 457 | * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v |
426 | * @oh: struct omap_hwmod * | 458 | * @oh: struct omap_hwmod * |
427 | * | 459 | * |
@@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh) | |||
1282 | if (!oh->class->sysc) | 1314 | if (!oh->class->sysc) |
1283 | return; | 1315 | return; |
1284 | 1316 | ||
1317 | /* | ||
1318 | * Wait until reset has completed, this is needed as the IP | ||
1319 | * block is reset automatically by hardware in some cases | ||
1320 | * (off-mode for example), and the drivers require the | ||
1321 | * IP to be ready when they access it | ||
1322 | */ | ||
1323 | if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) | ||
1324 | _enable_optional_clocks(oh); | ||
1325 | _wait_softreset_complete(oh); | ||
1326 | if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) | ||
1327 | _disable_optional_clocks(oh); | ||
1328 | |||
1285 | v = oh->_sysc_cache; | 1329 | v = oh->_sysc_cache; |
1286 | sf = oh->class->sysc->sysc_flags; | 1330 | sf = oh->class->sysc->sysc_flags; |
1287 | 1331 | ||
@@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh) | |||
1804 | */ | 1848 | */ |
1805 | static int _ocp_softreset(struct omap_hwmod *oh) | 1849 | static int _ocp_softreset(struct omap_hwmod *oh) |
1806 | { | 1850 | { |
1807 | u32 v, softrst_mask; | 1851 | u32 v; |
1808 | int c = 0; | 1852 | int c = 0; |
1809 | int ret = 0; | 1853 | int ret = 0; |
1810 | 1854 | ||
@@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh) | |||
1834 | if (oh->class->sysc->srst_udelay) | 1878 | if (oh->class->sysc->srst_udelay) |
1835 | udelay(oh->class->sysc->srst_udelay); | 1879 | udelay(oh->class->sysc->srst_udelay); |
1836 | 1880 | ||
1837 | if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) | 1881 | c = _wait_softreset_complete(oh); |
1838 | omap_test_timeout((omap_hwmod_read(oh, | ||
1839 | oh->class->sysc->syss_offs) | ||
1840 | & SYSS_RESETDONE_MASK), | ||
1841 | MAX_MODULE_SOFTRESET_WAIT, c); | ||
1842 | else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { | ||
1843 | softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); | ||
1844 | omap_test_timeout(!(omap_hwmod_read(oh, | ||
1845 | oh->class->sysc->sysc_offs) | ||
1846 | & softrst_mask), | ||
1847 | MAX_MODULE_SOFTRESET_WAIT, c); | ||
1848 | } | ||
1849 | |||
1850 | if (c == MAX_MODULE_SOFTRESET_WAIT) | 1882 | if (c == MAX_MODULE_SOFTRESET_WAIT) |
1851 | pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", | 1883 | pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", |
1852 | oh->name, MAX_MODULE_SOFTRESET_WAIT); | 1884 | oh->name, MAX_MODULE_SOFTRESET_WAIT); |
@@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh) | |||
2352 | if (oh->_state != _HWMOD_STATE_INITIALIZED) | 2384 | if (oh->_state != _HWMOD_STATE_INITIALIZED) |
2353 | return -EINVAL; | 2385 | return -EINVAL; |
2354 | 2386 | ||
2387 | if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK) | ||
2388 | return -EPERM; | ||
2389 | |||
2355 | if (oh->rst_lines_cnt == 0) { | 2390 | if (oh->rst_lines_cnt == 0) { |
2356 | r = _enable(oh); | 2391 | r = _enable(oh); |
2357 | if (r) { | 2392 | if (r) { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 652d0285bd6d..0b1249e00398 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/platform_data/gpio-omap.h> | 22 | #include <linux/platform_data/gpio-omap.h> |
23 | #include <linux/power/smartreflex.h> | 23 | #include <linux/power/smartreflex.h> |
24 | #include <linux/platform_data/omap_ocp2scp.h> | ||
24 | 25 | ||
25 | #include <plat/omap_hwmod.h> | 26 | #include <plat/omap_hwmod.h> |
26 | #include <plat/i2c.h> | 27 | #include <plat/i2c.h> |
@@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = { | |||
2125 | .name = "mcpdm", | 2126 | .name = "mcpdm", |
2126 | .class = &omap44xx_mcpdm_hwmod_class, | 2127 | .class = &omap44xx_mcpdm_hwmod_class, |
2127 | .clkdm_name = "abe_clkdm", | 2128 | .clkdm_name = "abe_clkdm", |
2129 | /* | ||
2130 | * It's suspected that the McPDM requires an off-chip main | ||
2131 | * functional clock, controlled via I2C. This IP block is | ||
2132 | * currently reset very early during boot, before I2C is | ||
2133 | * available, so it doesn't seem that we have any choice in | ||
2134 | * the kernel other than to avoid resetting it. | ||
2135 | */ | ||
2136 | .flags = HWMOD_EXT_OPT_MAIN_CLK, | ||
2128 | .mpu_irqs = omap44xx_mcpdm_irqs, | 2137 | .mpu_irqs = omap44xx_mcpdm_irqs, |
2129 | .sdma_reqs = omap44xx_mcpdm_sdma_reqs, | 2138 | .sdma_reqs = omap44xx_mcpdm_sdma_reqs, |
2130 | .main_clk = "mcpdm_fck", | 2139 | .main_clk = "mcpdm_fck", |
@@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = { | |||
2681 | .sysc = &omap44xx_ocp2scp_sysc, | 2690 | .sysc = &omap44xx_ocp2scp_sysc, |
2682 | }; | 2691 | }; |
2683 | 2692 | ||
2693 | /* ocp2scp dev_attr */ | ||
2694 | static struct resource omap44xx_usb_phy_and_pll_addrs[] = { | ||
2695 | { | ||
2696 | .name = "usb_phy", | ||
2697 | .start = 0x4a0ad080, | ||
2698 | .end = 0x4a0ae000, | ||
2699 | .flags = IORESOURCE_MEM, | ||
2700 | }, | ||
2701 | { | ||
2702 | /* XXX: Remove this once control module driver is in place */ | ||
2703 | .name = "ctrl_dev", | ||
2704 | .start = 0x4a002300, | ||
2705 | .end = 0x4a002303, | ||
2706 | .flags = IORESOURCE_MEM, | ||
2707 | }, | ||
2708 | { } | ||
2709 | }; | ||
2710 | |||
2711 | static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { | ||
2712 | { | ||
2713 | .drv_name = "omap-usb2", | ||
2714 | .res = omap44xx_usb_phy_and_pll_addrs, | ||
2715 | }, | ||
2716 | { } | ||
2717 | }; | ||
2718 | |||
2684 | /* ocp2scp_usb_phy */ | 2719 | /* ocp2scp_usb_phy */ |
2685 | static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { | 2720 | static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { |
2686 | .name = "ocp2scp_usb_phy", | 2721 | .name = "ocp2scp_usb_phy", |
@@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { | |||
2694 | .modulemode = MODULEMODE_HWCTRL, | 2729 | .modulemode = MODULEMODE_HWCTRL, |
2695 | }, | 2730 | }, |
2696 | }, | 2731 | }, |
2732 | .dev_attr = ocp2scp_dev_attr, | ||
2697 | }; | 2733 | }; |
2698 | 2734 | ||
2699 | /* | 2735 | /* |
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 635e109f5ad3..a256135d8e48 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c | |||
@@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type, | |||
73 | { | 73 | { |
74 | /* PMIC part*/ | 74 | /* PMIC part*/ |
75 | omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); | 75 | omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); |
76 | omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT); | ||
76 | omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data); | 77 | omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data); |
77 | 78 | ||
78 | /* Register additional devices on i2c1 bus if needed */ | 79 | /* Register additional devices on i2c1 bus if needed */ |
@@ -366,7 +367,7 @@ static struct regulator_init_data omap4_clk32kg_idata = { | |||
366 | }; | 367 | }; |
367 | 368 | ||
368 | static struct regulator_consumer_supply omap4_vdd1_supply[] = { | 369 | static struct regulator_consumer_supply omap4_vdd1_supply[] = { |
369 | REGULATOR_SUPPLY("vcc", "mpu.0"), | 370 | REGULATOR_SUPPLY("vcc", "cpu0"), |
370 | }; | 371 | }; |
371 | 372 | ||
372 | static struct regulator_consumer_supply omap4_vdd2_supply[] = { | 373 | static struct regulator_consumer_supply omap4_vdd2_supply[] = { |
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index 880249b17012..75878c37959b 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c | |||
@@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) | |||
264 | 264 | ||
265 | if (initialized) { | 265 | if (initialized) { |
266 | if (voltdm->pmic->i2c_high_speed != i2c_high_speed) | 266 | if (voltdm->pmic->i2c_high_speed != i2c_high_speed) |
267 | pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).", | 267 | pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n", |
268 | __func__, voltdm->name, i2c_high_speed); | 268 | __func__, voltdm->name, i2c_high_speed); |
269 | return; | 269 | return; |
270 | } | 270 | } |
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 5ecbd17b5641..e2c6391863fe 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mfd/asic3.h> | 28 | #include <linux/mfd/asic3.h> |
29 | #include <linux/mtd/physmap.h> | 29 | #include <linux/mtd/physmap.h> |
30 | #include <linux/pda_power.h> | 30 | #include <linux/pda_power.h> |
31 | #include <linux/pwm.h> | ||
31 | #include <linux/pwm_backlight.h> | 32 | #include <linux/pwm_backlight.h> |
32 | #include <linux/regulator/driver.h> | 33 | #include <linux/regulator/driver.h> |
33 | #include <linux/regulator/gpio-regulator.h> | 34 | #include <linux/regulator/gpio-regulator.h> |
@@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = { | |||
556 | */ | 557 | */ |
557 | 558 | ||
558 | static struct platform_pwm_backlight_data backlight_data = { | 559 | static struct platform_pwm_backlight_data backlight_data = { |
559 | .pwm_id = 1, | 560 | .pwm_id = -1, /* Superseded by pwm_lookup */ |
560 | .max_brightness = 200, | 561 | .max_brightness = 200, |
561 | .dft_brightness = 100, | 562 | .dft_brightness = 100, |
562 | .pwm_period_ns = 30923, | 563 | .pwm_period_ns = 30923, |
@@ -571,6 +572,10 @@ static struct platform_device backlight = { | |||
571 | }, | 572 | }, |
572 | }; | 573 | }; |
573 | 574 | ||
575 | static struct pwm_lookup hx4700_pwm_lookup[] = { | ||
576 | PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL), | ||
577 | }; | ||
578 | |||
574 | /* | 579 | /* |
575 | * USB "Transceiver" | 580 | * USB "Transceiver" |
576 | */ | 581 | */ |
@@ -872,6 +877,7 @@ static void __init hx4700_init(void) | |||
872 | pxa_set_stuart_info(NULL); | 877 | pxa_set_stuart_info(NULL); |
873 | 878 | ||
874 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 879 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
880 | pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); | ||
875 | 881 | ||
876 | pxa_set_ficp_info(&ficp_info); | 882 | pxa_set_ficp_info(&ficp_info); |
877 | pxa27x_set_i2c_power_info(NULL); | 883 | pxa27x_set_i2c_power_info(NULL); |
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 438f02fe122a..842596d4d31e 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c | |||
@@ -86,10 +86,7 @@ static void spitz_discharge1(int on) | |||
86 | gpio_set_value(SPITZ_GPIO_LED_GREEN, on); | 86 | gpio_set_value(SPITZ_GPIO_LED_GREEN, on); |
87 | } | 87 | } |
88 | 88 | ||
89 | static unsigned long gpio18_config[] = { | 89 | static unsigned long gpio18_config = GPIO18_GPIO; |
90 | GPIO18_RDY, | ||
91 | GPIO18_GPIO, | ||
92 | }; | ||
93 | 90 | ||
94 | static void spitz_presuspend(void) | 91 | static void spitz_presuspend(void) |
95 | { | 92 | { |
@@ -112,7 +109,7 @@ static void spitz_presuspend(void) | |||
112 | PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT; | 109 | PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT; |
113 | PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0); | 110 | PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0); |
114 | 111 | ||
115 | pxa2xx_mfp_config(&gpio18_config[0], 1); | 112 | pxa2xx_mfp_config(&gpio18_config, 1); |
116 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown"); | 113 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown"); |
117 | gpio_free(18); | 114 | gpio_free(18); |
118 | 115 | ||
@@ -131,7 +128,6 @@ static void spitz_presuspend(void) | |||
131 | 128 | ||
132 | static void spitz_postsuspend(void) | 129 | static void spitz_postsuspend(void) |
133 | { | 130 | { |
134 | pxa2xx_mfp_config(&gpio18_config[1], 1); | ||
135 | } | 131 | } |
136 | 132 | ||
137 | static int spitz_should_wakeup(unsigned int resume_on_alarm) | 133 | static int spitz_should_wakeup(unsigned int resume_on_alarm) |
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c index a5683a84c6ee..6013831a043e 100644 --- a/arch/arm/plat-omap/i2c.c +++ b/arch/arm/plat-omap/i2c.c | |||
@@ -26,12 +26,14 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/i2c-omap.h> | ||
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | #include <linux/err.h> | 31 | #include <linux/err.h> |
31 | #include <linux/clk.h> | 32 | #include <linux/clk.h> |
32 | 33 | ||
33 | #include <mach/irqs.h> | 34 | #include <mach/irqs.h> |
34 | #include <plat/i2c.h> | 35 | #include <plat/i2c.h> |
36 | #include <plat/omap-pm.h> | ||
35 | #include <plat/omap_device.h> | 37 | #include <plat/omap_device.h> |
36 | 38 | ||
37 | #define OMAP_I2C_SIZE 0x3f | 39 | #define OMAP_I2C_SIZE 0x3f |
@@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id) | |||
127 | 129 | ||
128 | 130 | ||
129 | #ifdef CONFIG_ARCH_OMAP2PLUS | 131 | #ifdef CONFIG_ARCH_OMAP2PLUS |
132 | /* | ||
133 | * XXX This function is a temporary compatibility wrapper - only | ||
134 | * needed until the I2C driver can be converted to call | ||
135 | * omap_pm_set_max_dev_wakeup_lat() and handle a return code. | ||
136 | */ | ||
137 | static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t) | ||
138 | { | ||
139 | omap_pm_set_max_mpu_wakeup_lat(dev, t); | ||
140 | } | ||
141 | |||
130 | static inline int omap2_i2c_add_bus(int bus_id) | 142 | static inline int omap2_i2c_add_bus(int bus_id) |
131 | { | 143 | { |
132 | int l; | 144 | int l; |
@@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id) | |||
158 | dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; | 170 | dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; |
159 | pdata->flags = dev_attr->flags; | 171 | pdata->flags = dev_attr->flags; |
160 | 172 | ||
173 | /* | ||
174 | * When waiting for completion of a i2c transfer, we need to | ||
175 | * set a wake up latency constraint for the MPU. This is to | ||
176 | * ensure quick enough wakeup from idle, when transfer | ||
177 | * completes. | ||
178 | * Only omap3 has support for constraints | ||
179 | */ | ||
180 | if (cpu_is_omap34xx()) | ||
181 | pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat; | ||
161 | pdev = omap_device_build(name, bus_id, oh, pdata, | 182 | pdev = omap_device_build(name, bus_id, oh, pdata, |
162 | sizeof(struct omap_i2c_bus_platform_data), | 183 | sizeof(struct omap_i2c_bus_platform_data), |
163 | NULL, 0, 0); | 184 | NULL, 0, 0); |
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index b3349f7b1a2c..1db029438022 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h | |||
@@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm { | |||
443 | * in order to complete the reset. Optional clocks will be disabled | 443 | * in order to complete the reset. Optional clocks will be disabled |
444 | * again after the reset. | 444 | * again after the reset. |
445 | * HWMOD_16BIT_REG: Module has 16bit registers | 445 | * HWMOD_16BIT_REG: Module has 16bit registers |
446 | * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for | ||
447 | * this IP block comes from an off-chip source and is not always | ||
448 | * enabled. This prevents the hwmod code from being able to | ||
449 | * enable and reset the IP block early. XXX Eventually it should | ||
450 | * be possible to query the clock framework for this information. | ||
446 | */ | 451 | */ |
447 | #define HWMOD_SWSUP_SIDLE (1 << 0) | 452 | #define HWMOD_SWSUP_SIDLE (1 << 0) |
448 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) | 453 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) |
@@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm { | |||
453 | #define HWMOD_NO_IDLEST (1 << 6) | 458 | #define HWMOD_NO_IDLEST (1 << 6) |
454 | #define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7) | 459 | #define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7) |
455 | #define HWMOD_16BIT_REG (1 << 8) | 460 | #define HWMOD_16BIT_REG (1 << 8) |
461 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) | ||
456 | 462 | ||
457 | /* | 463 | /* |
458 | * omap_hwmod._int_flags definitions | 464 | * omap_hwmod._int_flags definitions |
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index cd60a81163e9..32d05c8219dc 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile | |||
@@ -5,6 +5,6 @@ | |||
5 | # | 5 | # |
6 | 6 | ||
7 | include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types | 7 | include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types |
8 | $(kecho) ' Generating $@' | 8 | @$(kecho) ' Generating $@' |
9 | @mkdir -p $(dir $@) | 9 | @mkdir -p $(dir $@) |
10 | $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } | 10 | $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 54f6116697f7..d2f05a608274 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot | |||
222 | extern void __iounmap(volatile void __iomem *addr); | 222 | extern void __iounmap(volatile void __iomem *addr); |
223 | 223 | ||
224 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 224 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
225 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 225 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
226 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 226 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
227 | 227 | ||
228 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 228 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 0f3b4581d925..75fd13d289b9 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -38,7 +38,8 @@ | |||
38 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 38 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
39 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 39 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
40 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) | 40 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) |
41 | #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) | 41 | #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) |
42 | #define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) | ||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). | 45 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). |
@@ -57,7 +58,8 @@ | |||
57 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | 58 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
58 | #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ | 59 | #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ |
59 | #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ | 60 | #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ |
60 | #define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ | 61 | #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ |
62 | #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ | ||
61 | 63 | ||
62 | /* | 64 | /* |
63 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). | 65 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8960239be722..14aba2db6776 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default; | |||
62 | 62 | ||
63 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) | 63 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
64 | 64 | ||
65 | #define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) | 65 | #define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
66 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) | 66 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
67 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) | 67 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
68 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) | 68 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
69 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) | 69 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
70 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) | 70 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
71 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) | 71 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
72 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) | 72 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) |
73 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) | 73 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) |
74 | 74 | ||
75 | #define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) | 75 | #define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
76 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) | 76 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
77 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) | 77 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
78 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) | 78 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
79 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) | 79 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
80 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) | 80 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
81 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) | 81 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
82 | 82 | ||
83 | #endif /* __ASSEMBLY__ */ | 83 | #endif /* __ASSEMBLY__ */ |
84 | 84 | ||
@@ -130,10 +130,10 @@ extern struct page *empty_zero_page; | |||
130 | #define pte_young(pte) (pte_val(pte) & PTE_AF) | 130 | #define pte_young(pte) (pte_val(pte) & PTE_AF) |
131 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) | 131 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) |
132 | #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) | 132 | #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) |
133 | #define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) | 133 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
134 | 134 | ||
135 | #define pte_present_exec_user(pte) \ | 135 | #define pte_present_exec_user(pte) \ |
136 | ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ | 136 | ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \ |
137 | (PTE_VALID | PTE_USER)) | 137 | (PTE_VALID | PTE_USER)) |
138 | 138 | ||
139 | #define PTE_BIT_FUNC(fn,op) \ | 139 | #define PTE_BIT_FUNC(fn,op) \ |
@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
262 | 262 | ||
263 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 263 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
264 | { | 264 | { |
265 | const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; | 265 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY; |
266 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 266 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
267 | return pte; | 267 | return pte; |
268 | } | 268 | } |
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index d05e78f6db94..f69c32ffbe6a 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h | |||
@@ -7,9 +7,6 @@ | |||
7 | #define _ASM_IA64_DEVICE_H | 7 | #define _ASM_IA64_DEVICE_H |
8 | 8 | ||
9 | struct dev_archdata { | 9 | struct dev_archdata { |
10 | #ifdef CONFIG_ACPI | ||
11 | void *acpi_handle; | ||
12 | #endif | ||
13 | #ifdef CONFIG_INTEL_IOMMU | 10 | #ifdef CONFIG_INTEL_IOMMU |
14 | void *iommu; /* hook for IOMMU specific extension */ | 11 | void *iommu; /* hook for IOMMU specific extension */ |
15 | #endif | 12 | #endif |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 440578850ae5..e9682f5be343 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -633,6 +633,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) | |||
633 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : | 633 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : |
634 | IOSAPIC_LEVEL); | 634 | IOSAPIC_LEVEL); |
635 | } | 635 | } |
636 | EXPORT_SYMBOL_GPL(acpi_register_gsi); | ||
636 | 637 | ||
637 | void acpi_unregister_gsi(u32 gsi) | 638 | void acpi_unregister_gsi(u32 gsi) |
638 | { | 639 | { |
@@ -644,6 +645,7 @@ void acpi_unregister_gsi(u32 gsi) | |||
644 | 645 | ||
645 | iosapic_unregister_intr(gsi); | 646 | iosapic_unregister_intr(gsi); |
646 | } | 647 | } |
648 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | ||
647 | 649 | ||
648 | static int __init acpi_parse_fadt(struct acpi_table_header *table) | 650 | static int __init acpi_parse_fadt(struct acpi_table_header *table) |
649 | { | 651 | { |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index acd5b68e8871..082e383c1b6f 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -637,7 +637,6 @@ mem_init (void) | |||
637 | 637 | ||
638 | high_memory = __va(max_low_pfn * PAGE_SIZE); | 638 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
639 | 639 | ||
640 | reset_zone_present_pages(); | ||
641 | for_each_online_pgdat(pgdat) | 640 | for_each_online_pgdat(pgdat) |
642 | if (pgdat->bdata->node_bootmem_map) | 641 | if (pgdat->bdata->node_bootmem_map) |
643 | totalram_pages += free_all_bootmem_node(pgdat); | 642 | totalram_pages += free_all_bootmem_node(pgdat); |
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 67e489d8d1bd..2df26b57c26a 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h | |||
@@ -41,7 +41,7 @@ struct k_sigaction { | |||
41 | static inline void sigaddset(sigset_t *set, int _sig) | 41 | static inline void sigaddset(sigset_t *set, int _sig) |
42 | { | 42 | { |
43 | asm ("bfset %0{%1,#1}" | 43 | asm ("bfset %0{%1,#1}" |
44 | : "+od" (*set) | 44 | : "+o" (*set) |
45 | : "id" ((_sig - 1) ^ 31) | 45 | : "id" ((_sig - 1) ^ 31) |
46 | : "cc"); | 46 | : "cc"); |
47 | } | 47 | } |
@@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig) | |||
49 | static inline void sigdelset(sigset_t *set, int _sig) | 49 | static inline void sigdelset(sigset_t *set, int _sig) |
50 | { | 50 | { |
51 | asm ("bfclr %0{%1,#1}" | 51 | asm ("bfclr %0{%1,#1}" |
52 | : "+od" (*set) | 52 | : "+o" (*set) |
53 | : "id" ((_sig - 1) ^ 31) | 53 | : "id" ((_sig - 1) ^ 31) |
54 | : "cc"); | 54 | : "cc"); |
55 | } | 55 | } |
@@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) | |||
65 | int ret; | 65 | int ret; |
66 | asm ("bfextu %1{%2,#1},%0" | 66 | asm ("bfextu %1{%2,#1},%0" |
67 | : "=d" (ret) | 67 | : "=d" (ret) |
68 | : "od" (*set), "id" ((_sig-1) ^ 31) | 68 | : "o" (*set), "id" ((_sig-1) ^ 31) |
69 | : "cc"); | 69 | : "cc"); |
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index d38246e33ddb..9f883bf76953 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c | |||
@@ -30,6 +30,7 @@ | |||
30 | * measurement, and debugging facilities. | 30 | * measurement, and debugging facilities. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/irqflags.h> | ||
33 | #include <asm/octeon/cvmx.h> | 34 | #include <asm/octeon/cvmx.h> |
34 | #include <asm/octeon/cvmx-l2c.h> | 35 | #include <asm/octeon/cvmx-l2c.h> |
35 | #include <asm/octeon/cvmx-spinlock.h> | 36 | #include <asm/octeon/cvmx-spinlock.h> |
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index 7cf80ca2c1d2..f9f5307434c2 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/irqflags.h> | ||
14 | 15 | ||
15 | #include <asm/bcache.h> | 16 | #include <asm/bcache.h> |
16 | 17 | ||
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 82ad35ce2b45..46ac73abd5ee 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/irqflags.h> | ||
18 | #include <linux/types.h> | 17 | #include <linux/types.h> |
19 | #include <asm/barrier.h> | 18 | #include <asm/barrier.h> |
20 | #include <asm/byteorder.h> /* sigh ... */ | 19 | #include <asm/byteorder.h> /* sigh ... */ |
@@ -44,6 +43,24 @@ | |||
44 | #define smp_mb__before_clear_bit() smp_mb__before_llsc() | 43 | #define smp_mb__before_clear_bit() smp_mb__before_llsc() |
45 | #define smp_mb__after_clear_bit() smp_llsc_mb() | 44 | #define smp_mb__after_clear_bit() smp_llsc_mb() |
46 | 45 | ||
46 | |||
47 | /* | ||
48 | * These are the "slower" versions of the functions and are in bitops.c. | ||
49 | * These functions call raw_local_irq_{save,restore}(). | ||
50 | */ | ||
51 | void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); | ||
52 | void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); | ||
53 | void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); | ||
54 | int __mips_test_and_set_bit(unsigned long nr, | ||
55 | volatile unsigned long *addr); | ||
56 | int __mips_test_and_set_bit_lock(unsigned long nr, | ||
57 | volatile unsigned long *addr); | ||
58 | int __mips_test_and_clear_bit(unsigned long nr, | ||
59 | volatile unsigned long *addr); | ||
60 | int __mips_test_and_change_bit(unsigned long nr, | ||
61 | volatile unsigned long *addr); | ||
62 | |||
63 | |||
47 | /* | 64 | /* |
48 | * set_bit - Atomically set a bit in memory | 65 | * set_bit - Atomically set a bit in memory |
49 | * @nr: the bit to set | 66 | * @nr: the bit to set |
@@ -57,7 +74,7 @@ | |||
57 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 74 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
58 | { | 75 | { |
59 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 76 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
60 | unsigned short bit = nr & SZLONG_MASK; | 77 | int bit = nr & SZLONG_MASK; |
61 | unsigned long temp; | 78 | unsigned long temp; |
62 | 79 | ||
63 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 80 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
92 | : "=&r" (temp), "+m" (*m) | 109 | : "=&r" (temp), "+m" (*m) |
93 | : "ir" (1UL << bit)); | 110 | : "ir" (1UL << bit)); |
94 | } while (unlikely(!temp)); | 111 | } while (unlikely(!temp)); |
95 | } else { | 112 | } else |
96 | volatile unsigned long *a = addr; | 113 | __mips_set_bit(nr, addr); |
97 | unsigned long mask; | ||
98 | unsigned long flags; | ||
99 | |||
100 | a += nr >> SZLONG_LOG; | ||
101 | mask = 1UL << bit; | ||
102 | raw_local_irq_save(flags); | ||
103 | *a |= mask; | ||
104 | raw_local_irq_restore(flags); | ||
105 | } | ||
106 | } | 114 | } |
107 | 115 | ||
108 | /* | 116 | /* |
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
118 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 126 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
119 | { | 127 | { |
120 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 128 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
121 | unsigned short bit = nr & SZLONG_MASK; | 129 | int bit = nr & SZLONG_MASK; |
122 | unsigned long temp; | 130 | unsigned long temp; |
123 | 131 | ||
124 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 132 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
153 | : "=&r" (temp), "+m" (*m) | 161 | : "=&r" (temp), "+m" (*m) |
154 | : "ir" (~(1UL << bit))); | 162 | : "ir" (~(1UL << bit))); |
155 | } while (unlikely(!temp)); | 163 | } while (unlikely(!temp)); |
156 | } else { | 164 | } else |
157 | volatile unsigned long *a = addr; | 165 | __mips_clear_bit(nr, addr); |
158 | unsigned long mask; | ||
159 | unsigned long flags; | ||
160 | |||
161 | a += nr >> SZLONG_LOG; | ||
162 | mask = 1UL << bit; | ||
163 | raw_local_irq_save(flags); | ||
164 | *a &= ~mask; | ||
165 | raw_local_irq_restore(flags); | ||
166 | } | ||
167 | } | 166 | } |
168 | 167 | ||
169 | /* | 168 | /* |
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad | |||
191 | */ | 190 | */ |
192 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 191 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
193 | { | 192 | { |
194 | unsigned short bit = nr & SZLONG_MASK; | 193 | int bit = nr & SZLONG_MASK; |
195 | 194 | ||
196 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 195 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
197 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 196 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
220 | : "=&r" (temp), "+m" (*m) | 219 | : "=&r" (temp), "+m" (*m) |
221 | : "ir" (1UL << bit)); | 220 | : "ir" (1UL << bit)); |
222 | } while (unlikely(!temp)); | 221 | } while (unlikely(!temp)); |
223 | } else { | 222 | } else |
224 | volatile unsigned long *a = addr; | 223 | __mips_change_bit(nr, addr); |
225 | unsigned long mask; | ||
226 | unsigned long flags; | ||
227 | |||
228 | a += nr >> SZLONG_LOG; | ||
229 | mask = 1UL << bit; | ||
230 | raw_local_irq_save(flags); | ||
231 | *a ^= mask; | ||
232 | raw_local_irq_restore(flags); | ||
233 | } | ||
234 | } | 224 | } |
235 | 225 | ||
236 | /* | 226 | /* |
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
244 | static inline int test_and_set_bit(unsigned long nr, | 234 | static inline int test_and_set_bit(unsigned long nr, |
245 | volatile unsigned long *addr) | 235 | volatile unsigned long *addr) |
246 | { | 236 | { |
247 | unsigned short bit = nr & SZLONG_MASK; | 237 | int bit = nr & SZLONG_MASK; |
248 | unsigned long res; | 238 | unsigned long res; |
249 | 239 | ||
250 | smp_mb__before_llsc(); | 240 | smp_mb__before_llsc(); |
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, | |||
281 | } while (unlikely(!res)); | 271 | } while (unlikely(!res)); |
282 | 272 | ||
283 | res = temp & (1UL << bit); | 273 | res = temp & (1UL << bit); |
284 | } else { | 274 | } else |
285 | volatile unsigned long *a = addr; | 275 | res = __mips_test_and_set_bit(nr, addr); |
286 | unsigned long mask; | ||
287 | unsigned long flags; | ||
288 | |||
289 | a += nr >> SZLONG_LOG; | ||
290 | mask = 1UL << bit; | ||
291 | raw_local_irq_save(flags); | ||
292 | res = (mask & *a); | ||
293 | *a |= mask; | ||
294 | raw_local_irq_restore(flags); | ||
295 | } | ||
296 | 276 | ||
297 | smp_llsc_mb(); | 277 | smp_llsc_mb(); |
298 | 278 | ||
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr, | |||
310 | static inline int test_and_set_bit_lock(unsigned long nr, | 290 | static inline int test_and_set_bit_lock(unsigned long nr, |
311 | volatile unsigned long *addr) | 291 | volatile unsigned long *addr) |
312 | { | 292 | { |
313 | unsigned short bit = nr & SZLONG_MASK; | 293 | int bit = nr & SZLONG_MASK; |
314 | unsigned long res; | 294 | unsigned long res; |
315 | 295 | ||
316 | if (kernel_uses_llsc && R10000_LLSC_WAR) { | 296 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, | |||
345 | } while (unlikely(!res)); | 325 | } while (unlikely(!res)); |
346 | 326 | ||
347 | res = temp & (1UL << bit); | 327 | res = temp & (1UL << bit); |
348 | } else { | 328 | } else |
349 | volatile unsigned long *a = addr; | 329 | res = __mips_test_and_set_bit_lock(nr, addr); |
350 | unsigned long mask; | ||
351 | unsigned long flags; | ||
352 | |||
353 | a += nr >> SZLONG_LOG; | ||
354 | mask = 1UL << bit; | ||
355 | raw_local_irq_save(flags); | ||
356 | res = (mask & *a); | ||
357 | *a |= mask; | ||
358 | raw_local_irq_restore(flags); | ||
359 | } | ||
360 | 330 | ||
361 | smp_llsc_mb(); | 331 | smp_llsc_mb(); |
362 | 332 | ||
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, | |||
373 | static inline int test_and_clear_bit(unsigned long nr, | 343 | static inline int test_and_clear_bit(unsigned long nr, |
374 | volatile unsigned long *addr) | 344 | volatile unsigned long *addr) |
375 | { | 345 | { |
376 | unsigned short bit = nr & SZLONG_MASK; | 346 | int bit = nr & SZLONG_MASK; |
377 | unsigned long res; | 347 | unsigned long res; |
378 | 348 | ||
379 | smp_mb__before_llsc(); | 349 | smp_mb__before_llsc(); |
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
428 | } while (unlikely(!res)); | 398 | } while (unlikely(!res)); |
429 | 399 | ||
430 | res = temp & (1UL << bit); | 400 | res = temp & (1UL << bit); |
431 | } else { | 401 | } else |
432 | volatile unsigned long *a = addr; | 402 | res = __mips_test_and_clear_bit(nr, addr); |
433 | unsigned long mask; | ||
434 | unsigned long flags; | ||
435 | |||
436 | a += nr >> SZLONG_LOG; | ||
437 | mask = 1UL << bit; | ||
438 | raw_local_irq_save(flags); | ||
439 | res = (mask & *a); | ||
440 | *a &= ~mask; | ||
441 | raw_local_irq_restore(flags); | ||
442 | } | ||
443 | 403 | ||
444 | smp_llsc_mb(); | 404 | smp_llsc_mb(); |
445 | 405 | ||
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
457 | static inline int test_and_change_bit(unsigned long nr, | 417 | static inline int test_and_change_bit(unsigned long nr, |
458 | volatile unsigned long *addr) | 418 | volatile unsigned long *addr) |
459 | { | 419 | { |
460 | unsigned short bit = nr & SZLONG_MASK; | 420 | int bit = nr & SZLONG_MASK; |
461 | unsigned long res; | 421 | unsigned long res; |
462 | 422 | ||
463 | smp_mb__before_llsc(); | 423 | smp_mb__before_llsc(); |
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, | |||
494 | } while (unlikely(!res)); | 454 | } while (unlikely(!res)); |
495 | 455 | ||
496 | res = temp & (1UL << bit); | 456 | res = temp & (1UL << bit); |
497 | } else { | 457 | } else |
498 | volatile unsigned long *a = addr; | 458 | res = __mips_test_and_change_bit(nr, addr); |
499 | unsigned long mask; | ||
500 | unsigned long flags; | ||
501 | |||
502 | a += nr >> SZLONG_LOG; | ||
503 | mask = 1UL << bit; | ||
504 | raw_local_irq_save(flags); | ||
505 | res = (mask & *a); | ||
506 | *a ^= mask; | ||
507 | raw_local_irq_restore(flags); | ||
508 | } | ||
509 | 459 | ||
510 | smp_llsc_mb(); | 460 | smp_llsc_mb(); |
511 | 461 | ||
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 58277e0e9cd4..3c5d1464b7bd 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h | |||
@@ -290,7 +290,7 @@ struct compat_shmid64_ds { | |||
290 | 290 | ||
291 | static inline int is_compat_task(void) | 291 | static inline int is_compat_task(void) |
292 | { | 292 | { |
293 | return test_thread_flag(TIF_32BIT); | 293 | return test_thread_flag(TIF_32BIT_ADDR); |
294 | } | 294 | } |
295 | 295 | ||
296 | #endif /* _ASM_COMPAT_H */ | 296 | #endif /* _ASM_COMPAT_H */ |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29d9c23c20c7..ff2e0345e013 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/irqflags.h> | ||
18 | 19 | ||
19 | #include <asm/addrspace.h> | 20 | #include <asm/addrspace.h> |
20 | #include <asm/bug.h> | 21 | #include <asm/bug.h> |
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909c..9f3384c789d7 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h | |||
@@ -16,83 +16,13 @@ | |||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <asm/hazards.h> | 17 | #include <asm/hazards.h> |
18 | 18 | ||
19 | __asm__( | 19 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) |
20 | " .macro arch_local_irq_enable \n" | ||
21 | " .set push \n" | ||
22 | " .set reorder \n" | ||
23 | " .set noat \n" | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
26 | " ori $1, 0x400 \n" | ||
27 | " xori $1, 0x400 \n" | ||
28 | " mtc0 $1, $2, 1 \n" | ||
29 | #elif defined(CONFIG_CPU_MIPSR2) | ||
30 | " ei \n" | ||
31 | #else | ||
32 | " mfc0 $1,$12 \n" | ||
33 | " ori $1,0x1f \n" | ||
34 | " xori $1,0x1e \n" | ||
35 | " mtc0 $1,$12 \n" | ||
36 | #endif | ||
37 | " irq_enable_hazard \n" | ||
38 | " .set pop \n" | ||
39 | " .endm"); | ||
40 | 20 | ||
41 | extern void smtc_ipi_replay(void); | ||
42 | |||
43 | static inline void arch_local_irq_enable(void) | ||
44 | { | ||
45 | #ifdef CONFIG_MIPS_MT_SMTC | ||
46 | /* | ||
47 | * SMTC kernel needs to do a software replay of queued | ||
48 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
49 | */ | ||
50 | smtc_ipi_replay(); | ||
51 | #endif | ||
52 | __asm__ __volatile__( | ||
53 | "arch_local_irq_enable" | ||
54 | : /* no outputs */ | ||
55 | : /* no inputs */ | ||
56 | : "memory"); | ||
57 | } | ||
58 | |||
59 | |||
60 | /* | ||
61 | * For cli() we have to insert nops to make sure that the new value | ||
62 | * has actually arrived in the status register before the end of this | ||
63 | * macro. | ||
64 | * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs | ||
65 | * no nops at all. | ||
66 | */ | ||
67 | /* | ||
68 | * For TX49, operating only IE bit is not enough. | ||
69 | * | ||
70 | * If mfc0 $12 follows store and the mfc0 is last instruction of a | ||
71 | * page and fetching the next instruction causes TLB miss, the result | ||
72 | * of the mfc0 might wrongly contain EXL bit. | ||
73 | * | ||
74 | * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 | ||
75 | * | ||
76 | * Workaround: mask EXL bit of the result or place a nop before mfc0. | ||
77 | */ | ||
78 | __asm__( | 21 | __asm__( |
79 | " .macro arch_local_irq_disable\n" | 22 | " .macro arch_local_irq_disable\n" |
80 | " .set push \n" | 23 | " .set push \n" |
81 | " .set noat \n" | 24 | " .set noat \n" |
82 | #ifdef CONFIG_MIPS_MT_SMTC | ||
83 | " mfc0 $1, $2, 1 \n" | ||
84 | " ori $1, 0x400 \n" | ||
85 | " .set noreorder \n" | ||
86 | " mtc0 $1, $2, 1 \n" | ||
87 | #elif defined(CONFIG_CPU_MIPSR2) | ||
88 | " di \n" | 25 | " di \n" |
89 | #else | ||
90 | " mfc0 $1,$12 \n" | ||
91 | " ori $1,0x1f \n" | ||
92 | " xori $1,0x1f \n" | ||
93 | " .set noreorder \n" | ||
94 | " mtc0 $1,$12 \n" | ||
95 | #endif | ||
96 | " irq_disable_hazard \n" | 26 | " irq_disable_hazard \n" |
97 | " .set pop \n" | 27 | " .set pop \n" |
98 | " .endm \n"); | 28 | " .endm \n"); |
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) | |||
106 | : "memory"); | 36 | : "memory"); |
107 | } | 37 | } |
108 | 38 | ||
109 | __asm__( | ||
110 | " .macro arch_local_save_flags flags \n" | ||
111 | " .set push \n" | ||
112 | " .set reorder \n" | ||
113 | #ifdef CONFIG_MIPS_MT_SMTC | ||
114 | " mfc0 \\flags, $2, 1 \n" | ||
115 | #else | ||
116 | " mfc0 \\flags, $12 \n" | ||
117 | #endif | ||
118 | " .set pop \n" | ||
119 | " .endm \n"); | ||
120 | |||
121 | static inline unsigned long arch_local_save_flags(void) | ||
122 | { | ||
123 | unsigned long flags; | ||
124 | asm volatile("arch_local_save_flags %0" : "=r" (flags)); | ||
125 | return flags; | ||
126 | } | ||
127 | 39 | ||
128 | __asm__( | 40 | __asm__( |
129 | " .macro arch_local_irq_save result \n" | 41 | " .macro arch_local_irq_save result \n" |
130 | " .set push \n" | 42 | " .set push \n" |
131 | " .set reorder \n" | 43 | " .set reorder \n" |
132 | " .set noat \n" | 44 | " .set noat \n" |
133 | #ifdef CONFIG_MIPS_MT_SMTC | ||
134 | " mfc0 \\result, $2, 1 \n" | ||
135 | " ori $1, \\result, 0x400 \n" | ||
136 | " .set noreorder \n" | ||
137 | " mtc0 $1, $2, 1 \n" | ||
138 | " andi \\result, \\result, 0x400 \n" | ||
139 | #elif defined(CONFIG_CPU_MIPSR2) | ||
140 | " di \\result \n" | 45 | " di \\result \n" |
141 | " andi \\result, 1 \n" | 46 | " andi \\result, 1 \n" |
142 | #else | ||
143 | " mfc0 \\result, $12 \n" | ||
144 | " ori $1, \\result, 0x1f \n" | ||
145 | " xori $1, 0x1f \n" | ||
146 | " .set noreorder \n" | ||
147 | " mtc0 $1, $12 \n" | ||
148 | #endif | ||
149 | " irq_disable_hazard \n" | 47 | " irq_disable_hazard \n" |
150 | " .set pop \n" | 48 | " .set pop \n" |
151 | " .endm \n"); | 49 | " .endm \n"); |
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) | |||
160 | return flags; | 58 | return flags; |
161 | } | 59 | } |
162 | 60 | ||
61 | |||
163 | __asm__( | 62 | __asm__( |
164 | " .macro arch_local_irq_restore flags \n" | 63 | " .macro arch_local_irq_restore flags \n" |
165 | " .set push \n" | 64 | " .set push \n" |
166 | " .set noreorder \n" | 65 | " .set noreorder \n" |
167 | " .set noat \n" | 66 | " .set noat \n" |
168 | #ifdef CONFIG_MIPS_MT_SMTC | 67 | #if defined(CONFIG_IRQ_CPU) |
169 | "mfc0 $1, $2, 1 \n" | ||
170 | "andi \\flags, 0x400 \n" | ||
171 | "ori $1, 0x400 \n" | ||
172 | "xori $1, 0x400 \n" | ||
173 | "or \\flags, $1 \n" | ||
174 | "mtc0 \\flags, $2, 1 \n" | ||
175 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
176 | /* | 68 | /* |
177 | * Slow, but doesn't suffer from a relatively unlikely race | 69 | * Slow, but doesn't suffer from a relatively unlikely race |
178 | * condition we're having since days 1. | 70 | * condition we're having since days 1. |
179 | */ | 71 | */ |
180 | " beqz \\flags, 1f \n" | 72 | " beqz \\flags, 1f \n" |
181 | " di \n" | 73 | " di \n" |
182 | " ei \n" | 74 | " ei \n" |
183 | "1: \n" | 75 | "1: \n" |
184 | #elif defined(CONFIG_CPU_MIPSR2) | 76 | #else |
185 | /* | 77 | /* |
186 | * Fast, dangerous. Life is fun, life is good. | 78 | * Fast, dangerous. Life is fun, life is good. |
187 | */ | 79 | */ |
188 | " mfc0 $1, $12 \n" | 80 | " mfc0 $1, $12 \n" |
189 | " ins $1, \\flags, 0, 1 \n" | 81 | " ins $1, \\flags, 0, 1 \n" |
190 | " mtc0 $1, $12 \n" | 82 | " mtc0 $1, $12 \n" |
191 | #else | ||
192 | " mfc0 $1, $12 \n" | ||
193 | " andi \\flags, 1 \n" | ||
194 | " ori $1, 0x1f \n" | ||
195 | " xori $1, 0x1f \n" | ||
196 | " or \\flags, $1 \n" | ||
197 | " mtc0 \\flags, $12 \n" | ||
198 | #endif | 83 | #endif |
199 | " irq_disable_hazard \n" | 84 | " irq_disable_hazard \n" |
200 | " .set pop \n" | 85 | " .set pop \n" |
201 | " .endm \n"); | 86 | " .endm \n"); |
202 | 87 | ||
203 | |||
204 | static inline void arch_local_irq_restore(unsigned long flags) | 88 | static inline void arch_local_irq_restore(unsigned long flags) |
205 | { | 89 | { |
206 | unsigned long __tmp1; | 90 | unsigned long __tmp1; |
207 | 91 | ||
208 | #ifdef CONFIG_MIPS_MT_SMTC | ||
209 | /* | ||
210 | * SMTC kernel needs to do a software replay of queued | ||
211 | * IPIs, at the cost of branch and call overhead on each | ||
212 | * local_irq_restore() | ||
213 | */ | ||
214 | if (unlikely(!(flags & 0x0400))) | ||
215 | smtc_ipi_replay(); | ||
216 | #endif | ||
217 | |||
218 | __asm__ __volatile__( | 92 | __asm__ __volatile__( |
219 | "arch_local_irq_restore\t%0" | 93 | "arch_local_irq_restore\t%0" |
220 | : "=r" (__tmp1) | 94 | : "=r" (__tmp1) |
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) | |||
232 | : "0" (flags) | 106 | : "0" (flags) |
233 | : "memory"); | 107 | : "memory"); |
234 | } | 108 | } |
109 | #else | ||
110 | /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ | ||
111 | void arch_local_irq_disable(void); | ||
112 | unsigned long arch_local_irq_save(void); | ||
113 | void arch_local_irq_restore(unsigned long flags); | ||
114 | void __arch_local_irq_restore(unsigned long flags); | ||
115 | #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ | ||
116 | |||
117 | |||
118 | __asm__( | ||
119 | " .macro arch_local_irq_enable \n" | ||
120 | " .set push \n" | ||
121 | " .set reorder \n" | ||
122 | " .set noat \n" | ||
123 | #ifdef CONFIG_MIPS_MT_SMTC | ||
124 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
125 | " ori $1, 0x400 \n" | ||
126 | " xori $1, 0x400 \n" | ||
127 | " mtc0 $1, $2, 1 \n" | ||
128 | #elif defined(CONFIG_CPU_MIPSR2) | ||
129 | " ei \n" | ||
130 | #else | ||
131 | " mfc0 $1,$12 \n" | ||
132 | " ori $1,0x1f \n" | ||
133 | " xori $1,0x1e \n" | ||
134 | " mtc0 $1,$12 \n" | ||
135 | #endif | ||
136 | " irq_enable_hazard \n" | ||
137 | " .set pop \n" | ||
138 | " .endm"); | ||
139 | |||
140 | extern void smtc_ipi_replay(void); | ||
141 | |||
142 | static inline void arch_local_irq_enable(void) | ||
143 | { | ||
144 | #ifdef CONFIG_MIPS_MT_SMTC | ||
145 | /* | ||
146 | * SMTC kernel needs to do a software replay of queued | ||
147 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
148 | */ | ||
149 | smtc_ipi_replay(); | ||
150 | #endif | ||
151 | __asm__ __volatile__( | ||
152 | "arch_local_irq_enable" | ||
153 | : /* no outputs */ | ||
154 | : /* no inputs */ | ||
155 | : "memory"); | ||
156 | } | ||
157 | |||
158 | |||
159 | __asm__( | ||
160 | " .macro arch_local_save_flags flags \n" | ||
161 | " .set push \n" | ||
162 | " .set reorder \n" | ||
163 | #ifdef CONFIG_MIPS_MT_SMTC | ||
164 | " mfc0 \\flags, $2, 1 \n" | ||
165 | #else | ||
166 | " mfc0 \\flags, $12 \n" | ||
167 | #endif | ||
168 | " .set pop \n" | ||
169 | " .endm \n"); | ||
170 | |||
171 | static inline unsigned long arch_local_save_flags(void) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | asm volatile("arch_local_save_flags %0" : "=r" (flags)); | ||
175 | return flags; | ||
176 | } | ||
177 | |||
235 | 178 | ||
236 | static inline int arch_irqs_disabled_flags(unsigned long flags) | 179 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
237 | { | 180 | { |
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) | |||
245 | #endif | 188 | #endif |
246 | } | 189 | } |
247 | 190 | ||
248 | #endif | 191 | #endif /* #ifndef __ASSEMBLY__ */ |
249 | 192 | ||
250 | /* | 193 | /* |
251 | * Do the CPU's IRQ-state tracing from assembly code. | 194 | * Do the CPU's IRQ-state tracing from assembly code. |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 8debe9e91754..18806a52061c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
112 | #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ | 112 | #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ |
113 | #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ | 113 | #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ |
114 | 114 | ||
115 | #ifdef CONFIG_MIPS32_O32 | ||
116 | #define TIF_32BIT TIF_32BIT_REGS | ||
117 | #elif defined(CONFIG_MIPS32_N32) | ||
118 | #define TIF_32BIT _TIF_32BIT_ADDR | ||
119 | #endif /* CONFIG_MIPS32_O32 */ | ||
120 | |||
121 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 115 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
122 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 116 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
123 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 117 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index a53f8ec37aac..290dc6a1d7a3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", }; | |||
79 | void __init add_memory_region(phys_t start, phys_t size, long type) | 79 | void __init add_memory_region(phys_t start, phys_t size, long type) |
80 | { | 80 | { |
81 | int x = boot_mem_map.nr_map; | 81 | int x = boot_mem_map.nr_map; |
82 | struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; | 82 | int i; |
83 | 83 | ||
84 | /* Sanity check */ | 84 | /* Sanity check */ |
85 | if (start + size < start) { | 85 | if (start + size < start) { |
@@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Try to merge with previous entry if any. This is far less than | 91 | * Try to merge with existing entry, if any. |
92 | * perfect but is sufficient for most real world cases. | ||
93 | */ | 92 | */ |
94 | if (x && prev->addr + prev->size == start && prev->type == type) { | 93 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
95 | prev->size += size; | 94 | struct boot_mem_map_entry *entry = boot_mem_map.map + i; |
95 | unsigned long top; | ||
96 | |||
97 | if (entry->type != type) | ||
98 | continue; | ||
99 | |||
100 | if (start + size < entry->addr) | ||
101 | continue; /* no overlap */ | ||
102 | |||
103 | if (entry->addr + entry->size < start) | ||
104 | continue; /* no overlap */ | ||
105 | |||
106 | top = max(entry->addr + entry->size, start + size); | ||
107 | entry->addr = min(entry->addr, start); | ||
108 | entry->size = top - entry->addr; | ||
109 | |||
96 | return; | 110 | return; |
97 | } | 111 | } |
98 | 112 | ||
99 | if (x == BOOT_MEM_MAP_MAX) { | 113 | if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { |
100 | pr_err("Ooops! Too many entries in the memory map!\n"); | 114 | pr_err("Ooops! Too many entries in the memory map!\n"); |
101 | return; | 115 | return; |
102 | } | 116 | } |
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index c4a82e841c73..eeddc58802e1 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -2,8 +2,9 @@ | |||
2 | # Makefile for MIPS-specific library files.. | 2 | # Makefile for MIPS-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += csum_partial.o delay.o memcpy.o memset.o \ | 5 | lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ |
6 | strlen_user.o strncpy_user.o strnlen_user.o uncached.o | 6 | mips-atomic.o strlen_user.o strncpy_user.o \ |
7 | strnlen_user.o uncached.o | ||
7 | 8 | ||
8 | obj-y += iomap.o | 9 | obj-y += iomap.o |
9 | obj-$(CONFIG_PCI) += iomap-pci.o | 10 | obj-$(CONFIG_PCI) += iomap-pci.o |
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c new file mode 100644 index 000000000000..239a9c957b02 --- /dev/null +++ b/arch/mips/lib/bitops.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | ||
8 | */ | ||
9 | #include <linux/bitops.h> | ||
10 | #include <linux/irqflags.h> | ||
11 | #include <linux/export.h> | ||
12 | |||
13 | |||
14 | /** | ||
15 | * __mips_set_bit - Atomically set a bit in memory. This is called by | ||
16 | * set_bit() if it cannot find a faster solution. | ||
17 | * @nr: the bit to set | ||
18 | * @addr: the address to start counting from | ||
19 | */ | ||
20 | void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
21 | { | ||
22 | volatile unsigned long *a = addr; | ||
23 | unsigned bit = nr & SZLONG_MASK; | ||
24 | unsigned long mask; | ||
25 | unsigned long flags; | ||
26 | |||
27 | a += nr >> SZLONG_LOG; | ||
28 | mask = 1UL << bit; | ||
29 | raw_local_irq_save(flags); | ||
30 | *a |= mask; | ||
31 | raw_local_irq_restore(flags); | ||
32 | } | ||
33 | EXPORT_SYMBOL(__mips_set_bit); | ||
34 | |||
35 | |||
36 | /** | ||
37 | * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if | ||
38 | * it cannot find a faster solution. | ||
39 | * @nr: Bit to clear | ||
40 | * @addr: Address to start counting from | ||
41 | */ | ||
42 | void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
43 | { | ||
44 | volatile unsigned long *a = addr; | ||
45 | unsigned bit = nr & SZLONG_MASK; | ||
46 | unsigned long mask; | ||
47 | unsigned long flags; | ||
48 | |||
49 | a += nr >> SZLONG_LOG; | ||
50 | mask = 1UL << bit; | ||
51 | raw_local_irq_save(flags); | ||
52 | *a &= ~mask; | ||
53 | raw_local_irq_restore(flags); | ||
54 | } | ||
55 | EXPORT_SYMBOL(__mips_clear_bit); | ||
56 | |||
57 | |||
58 | /** | ||
59 | * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() | ||
60 | * if it cannot find a faster solution. | ||
61 | * @nr: Bit to change | ||
62 | * @addr: Address to start counting from | ||
63 | */ | ||
64 | void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
65 | { | ||
66 | volatile unsigned long *a = addr; | ||
67 | unsigned bit = nr & SZLONG_MASK; | ||
68 | unsigned long mask; | ||
69 | unsigned long flags; | ||
70 | |||
71 | a += nr >> SZLONG_LOG; | ||
72 | mask = 1UL << bit; | ||
73 | raw_local_irq_save(flags); | ||
74 | *a ^= mask; | ||
75 | raw_local_irq_restore(flags); | ||
76 | } | ||
77 | EXPORT_SYMBOL(__mips_change_bit); | ||
78 | |||
79 | |||
80 | /** | ||
81 | * __mips_test_and_set_bit - Set a bit and return its old value. This is | ||
82 | * called by test_and_set_bit() if it cannot find a faster solution. | ||
83 | * @nr: Bit to set | ||
84 | * @addr: Address to count from | ||
85 | */ | ||
86 | int __mips_test_and_set_bit(unsigned long nr, | ||
87 | volatile unsigned long *addr) | ||
88 | { | ||
89 | volatile unsigned long *a = addr; | ||
90 | unsigned bit = nr & SZLONG_MASK; | ||
91 | unsigned long mask; | ||
92 | unsigned long flags; | ||
93 | unsigned long res; | ||
94 | |||
95 | a += nr >> SZLONG_LOG; | ||
96 | mask = 1UL << bit; | ||
97 | raw_local_irq_save(flags); | ||
98 | res = (mask & *a); | ||
99 | *a |= mask; | ||
100 | raw_local_irq_restore(flags); | ||
101 | return res; | ||
102 | } | ||
103 | EXPORT_SYMBOL(__mips_test_and_set_bit); | ||
104 | |||
105 | |||
106 | /** | ||
107 | * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is | ||
108 | * called by test_and_set_bit_lock() if it cannot find a faster solution. | ||
109 | * @nr: Bit to set | ||
110 | * @addr: Address to count from | ||
111 | */ | ||
112 | int __mips_test_and_set_bit_lock(unsigned long nr, | ||
113 | volatile unsigned long *addr) | ||
114 | { | ||
115 | volatile unsigned long *a = addr; | ||
116 | unsigned bit = nr & SZLONG_MASK; | ||
117 | unsigned long mask; | ||
118 | unsigned long flags; | ||
119 | unsigned long res; | ||
120 | |||
121 | a += nr >> SZLONG_LOG; | ||
122 | mask = 1UL << bit; | ||
123 | raw_local_irq_save(flags); | ||
124 | res = (mask & *a); | ||
125 | *a |= mask; | ||
126 | raw_local_irq_restore(flags); | ||
127 | return res; | ||
128 | } | ||
129 | EXPORT_SYMBOL(__mips_test_and_set_bit_lock); | ||
130 | |||
131 | |||
132 | /** | ||
133 | * __mips_test_and_clear_bit - Clear a bit and return its old value. This is | ||
134 | * called by test_and_clear_bit() if it cannot find a faster solution. | ||
135 | * @nr: Bit to clear | ||
136 | * @addr: Address to count from | ||
137 | */ | ||
138 | int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
139 | { | ||
140 | volatile unsigned long *a = addr; | ||
141 | unsigned bit = nr & SZLONG_MASK; | ||
142 | unsigned long mask; | ||
143 | unsigned long flags; | ||
144 | unsigned long res; | ||
145 | |||
146 | a += nr >> SZLONG_LOG; | ||
147 | mask = 1UL << bit; | ||
148 | raw_local_irq_save(flags); | ||
149 | res = (mask & *a); | ||
150 | *a &= ~mask; | ||
151 | raw_local_irq_restore(flags); | ||
152 | return res; | ||
153 | } | ||
154 | EXPORT_SYMBOL(__mips_test_and_clear_bit); | ||
155 | |||
156 | |||
157 | /** | ||
158 | * __mips_test_and_change_bit - Change a bit and return its old value. This is | ||
159 | * called by test_and_change_bit() if it cannot find a faster solution. | ||
160 | * @nr: Bit to change | ||
161 | * @addr: Address to count from | ||
162 | */ | ||
163 | int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
164 | { | ||
165 | volatile unsigned long *a = addr; | ||
166 | unsigned bit = nr & SZLONG_MASK; | ||
167 | unsigned long mask; | ||
168 | unsigned long flags; | ||
169 | unsigned long res; | ||
170 | |||
171 | a += nr >> SZLONG_LOG; | ||
172 | mask = 1UL << bit; | ||
173 | raw_local_irq_save(flags); | ||
174 | res = (mask & *a); | ||
175 | *a ^= mask; | ||
176 | raw_local_irq_restore(flags); | ||
177 | return res; | ||
178 | } | ||
179 | EXPORT_SYMBOL(__mips_test_and_change_bit); | ||
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 000000000000..cd160be3ce4d --- /dev/null +++ b/arch/mips/lib/mips-atomic.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | ||
7 | * Copyright (C) 1996 by Paul M. Antoine | ||
8 | * Copyright (C) 1999 Silicon Graphics | ||
9 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
10 | */ | ||
11 | #include <asm/irqflags.h> | ||
12 | #include <asm/hazards.h> | ||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/preempt.h> | ||
15 | #include <linux/export.h> | ||
16 | |||
17 | #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) | ||
18 | |||
19 | /* | ||
20 | * For cli() we have to insert nops to make sure that the new value | ||
21 | * has actually arrived in the status register before the end of this | ||
22 | * macro. | ||
23 | * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs | ||
24 | * no nops at all. | ||
25 | */ | ||
26 | /* | ||
27 | * For TX49, operating only IE bit is not enough. | ||
28 | * | ||
29 | * If mfc0 $12 follows store and the mfc0 is last instruction of a | ||
30 | * page and fetching the next instruction causes TLB miss, the result | ||
31 | * of the mfc0 might wrongly contain EXL bit. | ||
32 | * | ||
33 | * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 | ||
34 | * | ||
35 | * Workaround: mask EXL bit of the result or place a nop before mfc0. | ||
36 | */ | ||
37 | __asm__( | ||
38 | " .macro arch_local_irq_disable\n" | ||
39 | " .set push \n" | ||
40 | " .set noat \n" | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | ||
42 | " mfc0 $1, $2, 1 \n" | ||
43 | " ori $1, 0x400 \n" | ||
44 | " .set noreorder \n" | ||
45 | " mtc0 $1, $2, 1 \n" | ||
46 | #elif defined(CONFIG_CPU_MIPSR2) | ||
47 | /* see irqflags.h for inline function */ | ||
48 | #else | ||
49 | " mfc0 $1,$12 \n" | ||
50 | " ori $1,0x1f \n" | ||
51 | " xori $1,0x1f \n" | ||
52 | " .set noreorder \n" | ||
53 | " mtc0 $1,$12 \n" | ||
54 | #endif | ||
55 | " irq_disable_hazard \n" | ||
56 | " .set pop \n" | ||
57 | " .endm \n"); | ||
58 | |||
59 | notrace void arch_local_irq_disable(void) | ||
60 | { | ||
61 | preempt_disable(); | ||
62 | __asm__ __volatile__( | ||
63 | "arch_local_irq_disable" | ||
64 | : /* no outputs */ | ||
65 | : /* no inputs */ | ||
66 | : "memory"); | ||
67 | preempt_enable(); | ||
68 | } | ||
69 | EXPORT_SYMBOL(arch_local_irq_disable); | ||
70 | |||
71 | |||
72 | __asm__( | ||
73 | " .macro arch_local_irq_save result \n" | ||
74 | " .set push \n" | ||
75 | " .set reorder \n" | ||
76 | " .set noat \n" | ||
77 | #ifdef CONFIG_MIPS_MT_SMTC | ||
78 | " mfc0 \\result, $2, 1 \n" | ||
79 | " ori $1, \\result, 0x400 \n" | ||
80 | " .set noreorder \n" | ||
81 | " mtc0 $1, $2, 1 \n" | ||
82 | " andi \\result, \\result, 0x400 \n" | ||
83 | #elif defined(CONFIG_CPU_MIPSR2) | ||
84 | /* see irqflags.h for inline function */ | ||
85 | #else | ||
86 | " mfc0 \\result, $12 \n" | ||
87 | " ori $1, \\result, 0x1f \n" | ||
88 | " xori $1, 0x1f \n" | ||
89 | " .set noreorder \n" | ||
90 | " mtc0 $1, $12 \n" | ||
91 | #endif | ||
92 | " irq_disable_hazard \n" | ||
93 | " .set pop \n" | ||
94 | " .endm \n"); | ||
95 | |||
96 | notrace unsigned long arch_local_irq_save(void) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | preempt_disable(); | ||
100 | asm volatile("arch_local_irq_save\t%0" | ||
101 | : "=r" (flags) | ||
102 | : /* no inputs */ | ||
103 | : "memory"); | ||
104 | preempt_enable(); | ||
105 | return flags; | ||
106 | } | ||
107 | EXPORT_SYMBOL(arch_local_irq_save); | ||
108 | |||
109 | |||
110 | __asm__( | ||
111 | " .macro arch_local_irq_restore flags \n" | ||
112 | " .set push \n" | ||
113 | " .set noreorder \n" | ||
114 | " .set noat \n" | ||
115 | #ifdef CONFIG_MIPS_MT_SMTC | ||
116 | "mfc0 $1, $2, 1 \n" | ||
117 | "andi \\flags, 0x400 \n" | ||
118 | "ori $1, 0x400 \n" | ||
119 | "xori $1, 0x400 \n" | ||
120 | "or \\flags, $1 \n" | ||
121 | "mtc0 \\flags, $2, 1 \n" | ||
122 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
123 | /* see irqflags.h for inline function */ | ||
124 | #elif defined(CONFIG_CPU_MIPSR2) | ||
125 | /* see irqflags.h for inline function */ | ||
126 | #else | ||
127 | " mfc0 $1, $12 \n" | ||
128 | " andi \\flags, 1 \n" | ||
129 | " ori $1, 0x1f \n" | ||
130 | " xori $1, 0x1f \n" | ||
131 | " or \\flags, $1 \n" | ||
132 | " mtc0 \\flags, $12 \n" | ||
133 | #endif | ||
134 | " irq_disable_hazard \n" | ||
135 | " .set pop \n" | ||
136 | " .endm \n"); | ||
137 | |||
138 | notrace void arch_local_irq_restore(unsigned long flags) | ||
139 | { | ||
140 | unsigned long __tmp1; | ||
141 | |||
142 | #ifdef CONFIG_MIPS_MT_SMTC | ||
143 | /* | ||
144 | * SMTC kernel needs to do a software replay of queued | ||
145 | * IPIs, at the cost of branch and call overhead on each | ||
146 | * local_irq_restore() | ||
147 | */ | ||
148 | if (unlikely(!(flags & 0x0400))) | ||
149 | smtc_ipi_replay(); | ||
150 | #endif | ||
151 | preempt_disable(); | ||
152 | __asm__ __volatile__( | ||
153 | "arch_local_irq_restore\t%0" | ||
154 | : "=r" (__tmp1) | ||
155 | : "0" (flags) | ||
156 | : "memory"); | ||
157 | preempt_enable(); | ||
158 | } | ||
159 | EXPORT_SYMBOL(arch_local_irq_restore); | ||
160 | |||
161 | |||
162 | notrace void __arch_local_irq_restore(unsigned long flags) | ||
163 | { | ||
164 | unsigned long __tmp1; | ||
165 | |||
166 | preempt_disable(); | ||
167 | __asm__ __volatile__( | ||
168 | "arch_local_irq_restore\t%0" | ||
169 | : "=r" (__tmp1) | ||
170 | : "0" (flags) | ||
171 | : "memory"); | ||
172 | preempt_enable(); | ||
173 | } | ||
174 | EXPORT_SYMBOL(__arch_local_irq_restore); | ||
175 | |||
176 | #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ | ||
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c index 80562b81f0f2..74732177851c 100644 --- a/arch/mips/mti-malta/malta-platform.c +++ b/arch/mips/mti-malta/malta-platform.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/mtd/partitions.h> | 29 | #include <linux/mtd/partitions.h> |
30 | #include <linux/mtd/physmap.h> | 30 | #include <linux/mtd/physmap.h> |
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <asm/mips-boards/maltaint.h> | ||
32 | #include <mtd/mtd-abi.h> | 33 | #include <mtd/mtd-abi.h> |
33 | 34 | ||
34 | #define SMC_PORT(base, int) \ | 35 | #define SMC_PORT(base, int) \ |
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = { | |||
48 | SMC_PORT(0x2F8, 3), | 49 | SMC_PORT(0x2F8, 3), |
49 | { | 50 | { |
50 | .mapbase = 0x1f000900, /* The CBUS UART */ | 51 | .mapbase = 0x1f000900, /* The CBUS UART */ |
51 | .irq = MIPS_CPU_IRQ_BASE + 2, | 52 | .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, |
52 | .uartclk = 3686400, /* Twice the usual clk! */ | 53 | .uartclk = 3686400, /* Twice the usual clk! */ |
53 | .iotype = UPIO_MEM32, | 54 | .iotype = UPIO_MEM32, |
54 | .flags = CBUS_UART_FLAGS, | 55 | .flags = CBUS_UART_FLAGS, |
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index fd49aeda9eb8..5dede04f2f3e 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c | |||
@@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) | |||
65 | { | 65 | { |
66 | compat_sigset_t s; | 66 | compat_sigset_t s; |
67 | 67 | ||
68 | if (sz != sizeof *set) panic("put_sigset32()"); | 68 | if (sz != sizeof *set) |
69 | return -EINVAL; | ||
69 | sigset_64to32(&s, set); | 70 | sigset_64to32(&s, set); |
70 | 71 | ||
71 | return copy_to_user(up, &s, sizeof s); | 72 | return copy_to_user(up, &s, sizeof s); |
@@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) | |||
77 | compat_sigset_t s; | 78 | compat_sigset_t s; |
78 | int r; | 79 | int r; |
79 | 80 | ||
80 | if (sz != sizeof *set) panic("put_sigset32()"); | 81 | if (sz != sizeof *set) |
82 | return -EINVAL; | ||
81 | 83 | ||
82 | if ((r = copy_from_user(&s, up, sz)) == 0) { | 84 | if ((r = copy_from_user(&s, up, sz)) == 0) { |
83 | sigset_32to64(set, &s); | 85 | sigset_32to64(set, &s); |
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 7426e40699bd..f76c10863c62 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c | |||
@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, | |||
73 | struct vm_area_struct *vma; | 73 | struct vm_area_struct *vma; |
74 | int offset = mapping ? get_offset(mapping) : 0; | 74 | int offset = mapping ? get_offset(mapping) : 0; |
75 | 75 | ||
76 | offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; | ||
77 | |||
76 | addr = DCACHE_ALIGN(addr - offset) + offset; | 78 | addr = DCACHE_ALIGN(addr - offset) + offset; |
77 | 79 | ||
78 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { | 80 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { |
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi index 7ab286ab5300..39ed65a44c5f 100644 --- a/arch/powerpc/boot/dts/mpc5200b.dtsi +++ b/arch/powerpc/boot/dts/mpc5200b.dtsi | |||
@@ -231,6 +231,12 @@ | |||
231 | interrupts = <2 7 0>; | 231 | interrupts = <2 7 0>; |
232 | }; | 232 | }; |
233 | 233 | ||
234 | sclpc@3c00 { | ||
235 | compatible = "fsl,mpc5200-lpbfifo"; | ||
236 | reg = <0x3c00 0x60>; | ||
237 | interrupts = <2 23 0>; | ||
238 | }; | ||
239 | |||
234 | i2c@3d00 { | 240 | i2c@3d00 { |
235 | #address-cells = <1>; | 241 | #address-cells = <1>; |
236 | #size-cells = <0>; | 242 | #size-cells = <0>; |
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi index 3444eb8f0ade..24f668039295 100644 --- a/arch/powerpc/boot/dts/o2d.dtsi +++ b/arch/powerpc/boot/dts/o2d.dtsi | |||
@@ -86,12 +86,6 @@ | |||
86 | reg = <0>; | 86 | reg = <0>; |
87 | }; | 87 | }; |
88 | }; | 88 | }; |
89 | |||
90 | sclpc@3c00 { | ||
91 | compatible = "fsl,mpc5200-lpbfifo"; | ||
92 | reg = <0x3c00 0x60>; | ||
93 | interrupts = <3 23 0>; | ||
94 | }; | ||
95 | }; | 89 | }; |
96 | 90 | ||
97 | localbus { | 91 | localbus { |
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts index 9e354997eb7e..96512c058033 100644 --- a/arch/powerpc/boot/dts/pcm030.dts +++ b/arch/powerpc/boot/dts/pcm030.dts | |||
@@ -59,7 +59,7 @@ | |||
59 | #gpio-cells = <2>; | 59 | #gpio-cells = <2>; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | psc@2000 { /* PSC1 in ac97 mode */ | 62 | audioplatform: psc@2000 { /* PSC1 in ac97 mode */ |
63 | compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; | 63 | compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; |
64 | cell-index = <0>; | 64 | cell-index = <0>; |
65 | }; | 65 | }; |
@@ -134,4 +134,9 @@ | |||
134 | localbus { | 134 | localbus { |
135 | status = "disabled"; | 135 | status = "disabled"; |
136 | }; | 136 | }; |
137 | |||
138 | sound { | ||
139 | compatible = "phytec,pcm030-audio-fabric"; | ||
140 | asoc-platform = <&audioplatform>; | ||
141 | }; | ||
137 | }; | 142 | }; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 8520b58a5e9a..b89ef65392dc 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, | |||
372 | case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; | 372 | case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; |
373 | case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; | 373 | case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; |
374 | case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; | 374 | case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; |
375 | default: | 375 | case MPC52xx_IRQ_L1_CRIT: |
376 | pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n", | 376 | pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", |
377 | __func__, virq, l1irq, l2irq); | 377 | __func__, l2irq); |
378 | return -EINVAL; | 378 | irq_set_chip(virq, &no_irq_chip); |
379 | return 0; | ||
379 | } | 380 | } |
380 | 381 | ||
381 | irq_set_chip_and_handler(virq, irqchip, handle_level_irq); | 382 | irq_set_chip_and_handler(virq, irqchip, handle_level_irq); |
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c index 797cd181dc3f..d16c8ded1084 100644 --- a/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/arch/powerpc/platforms/pseries/eeh_pe.c | |||
@@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) | |||
449 | if (list_empty(&pe->edevs)) { | 449 | if (list_empty(&pe->edevs)) { |
450 | cnt = 0; | 450 | cnt = 0; |
451 | list_for_each_entry(child, &pe->child_list, child) { | 451 | list_for_each_entry(child, &pe->child_list, child) { |
452 | if (!(pe->type & EEH_PE_INVALID)) { | 452 | if (!(child->type & EEH_PE_INVALID)) { |
453 | cnt++; | 453 | cnt++; |
454 | break; | 454 | break; |
455 | } | 455 | } |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index d19f4977c834..e5b084723131 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) | |||
220 | 220 | ||
221 | /* Get the top level device in the PE */ | 221 | /* Get the top level device in the PE */ |
222 | edev = of_node_to_eeh_dev(dn); | 222 | edev = of_node_to_eeh_dev(dn); |
223 | edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); | 223 | if (edev->pe) |
224 | edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); | ||
224 | dn = eeh_dev_to_of_node(edev); | 225 | dn = eeh_dev_to_of_node(edev); |
225 | if (!dn) | 226 | if (!dn) |
226 | return NULL; | 227 | return NULL; |
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 45d00e5fe14d..4d806b419606 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c | |||
@@ -36,7 +36,7 @@ static struct cpuidle_state *cpuidle_state_table; | |||
36 | static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) | 36 | static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) |
37 | { | 37 | { |
38 | 38 | ||
39 | *kt_before = ktime_get_real(); | 39 | *kt_before = ktime_get(); |
40 | *in_purr = mfspr(SPRN_PURR); | 40 | *in_purr = mfspr(SPRN_PURR); |
41 | /* | 41 | /* |
42 | * Indicate to the HV that we are idle. Now would be | 42 | * Indicate to the HV that we are idle. Now would be |
@@ -50,7 +50,7 @@ static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) | |||
50 | get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; | 50 | get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; |
51 | get_lppaca()->idle = 0; | 51 | get_lppaca()->idle = 0; |
52 | 52 | ||
53 | return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); | 53 | return ktime_to_us(ktime_sub(ktime_get(), kt_before)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static int snooze_loop(struct cpuidle_device *dev, | 56 | static int snooze_loop(struct cpuidle_device *dev, |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5dba755a43e6..d385f396dfee 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -96,6 +96,7 @@ config S390 | |||
96 | select HAVE_MEMBLOCK_NODE_MAP | 96 | select HAVE_MEMBLOCK_NODE_MAP |
97 | select HAVE_CMPXCHG_LOCAL | 97 | select HAVE_CMPXCHG_LOCAL |
98 | select HAVE_CMPXCHG_DOUBLE | 98 | select HAVE_CMPXCHG_DOUBLE |
99 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB | ||
99 | select HAVE_VIRT_CPU_ACCOUNTING | 100 | select HAVE_VIRT_CPU_ACCOUNTING |
100 | select VIRT_CPU_ACCOUNTING | 101 | select VIRT_CPU_ACCOUNTING |
101 | select ARCH_DISCARD_MEMBLOCK | 102 | select ARCH_DISCARD_MEMBLOCK |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a34a9d612fc0..18cd6b592650 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #define PSW32_MASK_CC 0x00003000UL | 20 | #define PSW32_MASK_CC 0x00003000UL |
21 | #define PSW32_MASK_PM 0x00000f00UL | 21 | #define PSW32_MASK_PM 0x00000f00UL |
22 | 22 | ||
23 | #define PSW32_MASK_USER 0x00003F00UL | 23 | #define PSW32_MASK_USER 0x0000FF00UL |
24 | 24 | ||
25 | #define PSW32_ADDR_AMODE 0x80000000UL | 25 | #define PSW32_ADDR_AMODE 0x80000000UL |
26 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | 26 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 9ca305383760..9935cbd6a46f 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -8,6 +8,9 @@ struct cpu; | |||
8 | 8 | ||
9 | #ifdef CONFIG_SCHED_BOOK | 9 | #ifdef CONFIG_SCHED_BOOK |
10 | 10 | ||
11 | extern unsigned char cpu_socket_id[NR_CPUS]; | ||
12 | #define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) | ||
13 | |||
11 | extern unsigned char cpu_core_id[NR_CPUS]; | 14 | extern unsigned char cpu_core_id[NR_CPUS]; |
12 | extern cpumask_t cpu_core_map[NR_CPUS]; | 15 | extern cpumask_t cpu_core_map[NR_CPUS]; |
13 | 16 | ||
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index 705588a16d70..a5ca214b34fd 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h | |||
@@ -239,7 +239,7 @@ typedef struct | |||
239 | #define PSW_MASK_EA 0x00000000UL | 239 | #define PSW_MASK_EA 0x00000000UL |
240 | #define PSW_MASK_BA 0x00000000UL | 240 | #define PSW_MASK_BA 0x00000000UL |
241 | 241 | ||
242 | #define PSW_MASK_USER 0x00003F00UL | 242 | #define PSW_MASK_USER 0x0000FF00UL |
243 | 243 | ||
244 | #define PSW_ADDR_AMODE 0x80000000UL | 244 | #define PSW_ADDR_AMODE 0x80000000UL |
245 | #define PSW_ADDR_INSN 0x7FFFFFFFUL | 245 | #define PSW_ADDR_INSN 0x7FFFFFFFUL |
@@ -269,7 +269,7 @@ typedef struct | |||
269 | #define PSW_MASK_EA 0x0000000100000000UL | 269 | #define PSW_MASK_EA 0x0000000100000000UL |
270 | #define PSW_MASK_BA 0x0000000080000000UL | 270 | #define PSW_MASK_BA 0x0000000080000000UL |
271 | 271 | ||
272 | #define PSW_MASK_USER 0x00003F8180000000UL | 272 | #define PSW_MASK_USER 0x0000FF8180000000UL |
273 | 273 | ||
274 | #define PSW_ADDR_AMODE 0x0000000000000000UL | 274 | #define PSW_ADDR_AMODE 0x0000000000000000UL |
275 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL | 275 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a1e8a8694bb7..593fcc9253fc 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
309 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | | 309 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
310 | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | | 310 | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | |
311 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); | 311 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); |
312 | /* Check for invalid user address space control. */ | ||
313 | if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) | ||
314 | regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | | ||
315 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
312 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); | 316 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); |
313 | for (i = 0; i < NUM_GPRS; i++) | 317 | for (i = 0; i < NUM_GPRS; i++) |
314 | regs->gprs[i] = (__u64) regs32.gprs[i]; | 318 | regs->gprs[i] = (__u64) regs32.gprs[i]; |
@@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
481 | 485 | ||
482 | /* Set up registers for signal handler */ | 486 | /* Set up registers for signal handler */ |
483 | regs->gprs[15] = (__force __u64) frame; | 487 | regs->gprs[15] = (__force __u64) frame; |
484 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ | 488 | /* Force 31 bit amode and default user address space control. */ |
489 | regs->psw.mask = PSW_MASK_BA | | ||
490 | (psw_user_bits & PSW_MASK_ASC) | | ||
491 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
485 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; | 492 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; |
486 | 493 | ||
487 | regs->gprs[2] = map_signal(sig); | 494 | regs->gprs[2] = map_signal(sig); |
@@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
549 | 556 | ||
550 | /* Set up registers for signal handler */ | 557 | /* Set up registers for signal handler */ |
551 | regs->gprs[15] = (__force __u64) frame; | 558 | regs->gprs[15] = (__force __u64) frame; |
552 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ | 559 | /* Force 31 bit amode and default user address space control. */ |
560 | regs->psw.mask = PSW_MASK_BA | | ||
561 | (psw_user_bits & PSW_MASK_ASC) | | ||
562 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
553 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 563 | regs->psw.addr = (__u64) ka->sa.sa_handler; |
554 | 564 | ||
555 | regs->gprs[2] = map_signal(sig); | 565 | regs->gprs[2] = map_signal(sig); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c13a2a37ef00..d1259d875074 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
136 | /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ | 136 | /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ |
137 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | | 137 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
138 | (user_sregs.regs.psw.mask & PSW_MASK_USER); | 138 | (user_sregs.regs.psw.mask & PSW_MASK_USER); |
139 | /* Check for invalid user address space control. */ | ||
140 | if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) | ||
141 | regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | | ||
142 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
139 | /* Check for invalid amode */ | 143 | /* Check for invalid amode */ |
140 | if (regs->psw.mask & PSW_MASK_EA) | 144 | if (regs->psw.mask & PSW_MASK_EA) |
141 | regs->psw.mask |= PSW_MASK_BA; | 145 | regs->psw.mask |= PSW_MASK_BA; |
@@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
273 | 277 | ||
274 | /* Set up registers for signal handler */ | 278 | /* Set up registers for signal handler */ |
275 | regs->gprs[15] = (unsigned long) frame; | 279 | regs->gprs[15] = (unsigned long) frame; |
276 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | 280 | /* Force default amode and default user address space control. */ |
281 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | | ||
282 | (psw_user_bits & PSW_MASK_ASC) | | ||
283 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
277 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 284 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
278 | 285 | ||
279 | regs->gprs[2] = map_signal(sig); | 286 | regs->gprs[2] = map_signal(sig); |
@@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
346 | 353 | ||
347 | /* Set up registers for signal handler */ | 354 | /* Set up registers for signal handler */ |
348 | regs->gprs[15] = (unsigned long) frame; | 355 | regs->gprs[15] = (unsigned long) frame; |
349 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | 356 | /* Force default amode and default user address space control. */ |
357 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | | ||
358 | (psw_user_bits & PSW_MASK_ASC) | | ||
359 | (regs->psw.mask & ~PSW_MASK_ASC); | ||
350 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 360 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
351 | 361 | ||
352 | regs->gprs[2] = map_signal(sig); | 362 | regs->gprs[2] = map_signal(sig); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 54d93f4b6818..dd55f7c20104 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock); | |||
40 | static struct mask_info core_info; | 40 | static struct mask_info core_info; |
41 | cpumask_t cpu_core_map[NR_CPUS]; | 41 | cpumask_t cpu_core_map[NR_CPUS]; |
42 | unsigned char cpu_core_id[NR_CPUS]; | 42 | unsigned char cpu_core_id[NR_CPUS]; |
43 | unsigned char cpu_socket_id[NR_CPUS]; | ||
43 | 44 | ||
44 | static struct mask_info book_info; | 45 | static struct mask_info book_info; |
45 | cpumask_t cpu_book_map[NR_CPUS]; | 46 | cpumask_t cpu_book_map[NR_CPUS]; |
@@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
83 | cpumask_set_cpu(lcpu, &book->mask); | 84 | cpumask_set_cpu(lcpu, &book->mask); |
84 | cpu_book_id[lcpu] = book->id; | 85 | cpu_book_id[lcpu] = book->id; |
85 | cpumask_set_cpu(lcpu, &core->mask); | 86 | cpumask_set_cpu(lcpu, &core->mask); |
87 | cpu_core_id[lcpu] = rcpu; | ||
86 | if (one_core_per_cpu) { | 88 | if (one_core_per_cpu) { |
87 | cpu_core_id[lcpu] = rcpu; | 89 | cpu_socket_id[lcpu] = rcpu; |
88 | core = core->next; | 90 | core = core->next; |
89 | } else { | 91 | } else { |
90 | cpu_core_id[lcpu] = core->id; | 92 | cpu_socket_id[lcpu] = core->id; |
91 | } | 93 | } |
92 | smp_cpu_set_polarization(lcpu, tl_cpu->pp); | 94 | smp_cpu_set_polarization(lcpu, tl_cpu->pp); |
93 | } | 95 | } |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8b8285310b5a..1f5315d1215c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
180 | addr = start; | 180 | addr = start; |
181 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 181 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
182 | end = start + len; | 182 | end = start + len; |
183 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 183 | if ((end < start) || (end > TASK_SIZE)) |
184 | (void __user *)start, len))) | ||
185 | return 0; | 184 | return 0; |
186 | 185 | ||
187 | local_irq_save(flags); | 186 | local_irq_save(flags); |
@@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
229 | addr = start; | 228 | addr = start; |
230 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 229 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
231 | end = start + len; | 230 | end = start + len; |
232 | if (end < start) | 231 | if ((end < start) || (end > TASK_SIZE)) |
233 | goto slow_irqon; | 232 | goto slow_irqon; |
234 | 233 | ||
235 | /* | 234 | /* |
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index f93003123bce..67c62578d170 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h | |||
@@ -63,10 +63,13 @@ extern char *of_console_options; | |||
63 | extern void irq_trans_init(struct device_node *dp); | 63 | extern void irq_trans_init(struct device_node *dp); |
64 | extern char *build_path_component(struct device_node *dp); | 64 | extern char *build_path_component(struct device_node *dp); |
65 | 65 | ||
66 | /* SPARC has a local implementation */ | 66 | /* SPARC has local implementations */ |
67 | extern int of_address_to_resource(struct device_node *dev, int index, | 67 | extern int of_address_to_resource(struct device_node *dev, int index, |
68 | struct resource *r); | 68 | struct resource *r); |
69 | #define of_address_to_resource of_address_to_resource | 69 | #define of_address_to_resource of_address_to_resource |
70 | 70 | ||
71 | void __iomem *of_iomap(struct device_node *node, int index); | ||
72 | #define of_iomap of_iomap | ||
73 | |||
71 | #endif /* __KERNEL__ */ | 74 | #endif /* __KERNEL__ */ |
72 | #endif /* _SPARC_PROM_H */ | 75 | #endif /* _SPARC_PROM_H */ |
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 867de2f8189c..689e1ba62809 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
295 | err |= restore_fpu_state(regs, fpu_save); | 295 | err |= restore_fpu_state(regs, fpu_save); |
296 | 296 | ||
297 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); | 297 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); |
298 | err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); | 298 | if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) |
299 | |||
300 | if (err) | ||
301 | goto segv; | 299 | goto segv; |
302 | 300 | ||
303 | err |= __get_user(rwin_save, &sf->rwin_save); | 301 | err |= __get_user(rwin_save, &sf->rwin_save); |
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index e5c5473e69ce..c4fbb21e802b 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig | |||
@@ -16,6 +16,8 @@ config UNICORE32 | |||
16 | select ARCH_WANT_FRAME_POINTERS | 16 | select ARCH_WANT_FRAME_POINTERS |
17 | select GENERIC_IOMAP | 17 | select GENERIC_IOMAP |
18 | select MODULES_USE_ELF_REL | 18 | select MODULES_USE_ELF_REL |
19 | select GENERIC_KERNEL_THREAD | ||
20 | select GENERIC_KERNEL_EXECVE | ||
19 | help | 21 | help |
20 | UniCore-32 is 32-bit Instruction Set Architecture, | 22 | UniCore-32 is 32-bit Instruction Set Architecture, |
21 | including a series of low-power-consumption RISC chip | 23 | including a series of low-power-consumption RISC chip |
@@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY | |||
64 | config ARCH_MAY_HAVE_PC_FDC | 66 | config ARCH_MAY_HAVE_PC_FDC |
65 | bool | 67 | bool |
66 | 68 | ||
69 | config ZONE_DMA | ||
70 | def_bool y | ||
71 | |||
67 | config NEED_DMA_MAP_STATE | 72 | config NEED_DMA_MAP_STATE |
68 | def_bool y | 73 | def_bool y |
69 | 74 | ||
@@ -216,7 +221,7 @@ config PUV3_GPIO | |||
216 | bool | 221 | bool |
217 | depends on !ARCH_FPGA | 222 | depends on !ARCH_FPGA |
218 | select GENERIC_GPIO | 223 | select GENERIC_GPIO |
219 | select GPIO_SYSFS if EXPERIMENTAL | 224 | select GPIO_SYSFS |
220 | default y | 225 | default y |
221 | 226 | ||
222 | if PUV3_NB0916 | 227 | if PUV3_NB0916 |
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index c910c9857e11..601e92f18af6 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
@@ -1,4 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | 1 | ||
3 | generic-y += atomic.h | 2 | generic-y += atomic.h |
4 | generic-y += auxvec.h | 3 | generic-y += auxvec.h |
diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h index b1ff8cadb086..93a56f3e2344 100644 --- a/arch/unicore32/include/asm/bug.h +++ b/arch/unicore32/include/asm/bug.h | |||
@@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err); | |||
19 | extern void uc32_notify_die(const char *str, struct pt_regs *regs, | 19 | extern void uc32_notify_die(const char *str, struct pt_regs *regs, |
20 | struct siginfo *info, unsigned long err, unsigned long trap); | 20 | struct siginfo *info, unsigned long err, unsigned long trap); |
21 | 21 | ||
22 | extern asmlinkage void __backtrace(void); | ||
23 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); | ||
24 | |||
25 | extern void __show_regs(struct pt_regs *); | ||
26 | |||
27 | #endif /* __UNICORE_BUG_H__ */ | 22 | #endif /* __UNICORE_BUG_H__ */ |
diff --git a/arch/unicore32/include/asm/cmpxchg.h b/arch/unicore32/include/asm/cmpxchg.h index df4d5acfd19f..8e797ad4fa24 100644 --- a/arch/unicore32/include/asm/cmpxchg.h +++ b/arch/unicore32/include/asm/cmpxchg.h | |||
@@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
35 | : "memory", "cc"); | 35 | : "memory", "cc"); |
36 | break; | 36 | break; |
37 | default: | 37 | default: |
38 | ret = __xchg_bad_pointer(); | 38 | __xchg_bad_pointer(); |
39 | } | 39 | } |
40 | 40 | ||
41 | return ret; | 41 | return ret; |
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/unicore32/include/asm/kvm_para.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h index 14382cb09657..4eaa42167667 100644 --- a/arch/unicore32/include/asm/processor.h +++ b/arch/unicore32/include/asm/processor.h | |||
@@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
72 | 72 | ||
73 | #define cpu_relax() barrier() | 73 | #define cpu_relax() barrier() |
74 | 74 | ||
75 | /* | ||
76 | * Create a new kernel thread | ||
77 | */ | ||
78 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
79 | |||
80 | #define task_pt_regs(p) \ | 75 | #define task_pt_regs(p) \ |
81 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 76 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
82 | 77 | ||
diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h index b9caf9b0997b..726749dab52f 100644 --- a/arch/unicore32/include/asm/ptrace.h +++ b/arch/unicore32/include/asm/ptrace.h | |||
@@ -12,80 +12,10 @@ | |||
12 | #ifndef __UNICORE_PTRACE_H__ | 12 | #ifndef __UNICORE_PTRACE_H__ |
13 | #define __UNICORE_PTRACE_H__ | 13 | #define __UNICORE_PTRACE_H__ |
14 | 14 | ||
15 | #define PTRACE_GET_THREAD_AREA 22 | 15 | #include <uapi/asm/ptrace.h> |
16 | |||
17 | /* | ||
18 | * PSR bits | ||
19 | */ | ||
20 | #define USER_MODE 0x00000010 | ||
21 | #define REAL_MODE 0x00000011 | ||
22 | #define INTR_MODE 0x00000012 | ||
23 | #define PRIV_MODE 0x00000013 | ||
24 | #define ABRT_MODE 0x00000017 | ||
25 | #define EXTN_MODE 0x0000001b | ||
26 | #define SUSR_MODE 0x0000001f | ||
27 | #define MODE_MASK 0x0000001f | ||
28 | #define PSR_R_BIT 0x00000040 | ||
29 | #define PSR_I_BIT 0x00000080 | ||
30 | #define PSR_V_BIT 0x10000000 | ||
31 | #define PSR_C_BIT 0x20000000 | ||
32 | #define PSR_Z_BIT 0x40000000 | ||
33 | #define PSR_S_BIT 0x80000000 | ||
34 | |||
35 | /* | ||
36 | * Groups of PSR bits | ||
37 | */ | ||
38 | #define PSR_f 0xff000000 /* Flags */ | ||
39 | #define PSR_c 0x000000ff /* Control */ | ||
40 | 16 | ||
41 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
42 | 18 | ||
43 | /* | ||
44 | * This struct defines the way the registers are stored on the | ||
45 | * stack during a system call. Note that sizeof(struct pt_regs) | ||
46 | * has to be a multiple of 8. | ||
47 | */ | ||
48 | struct pt_regs { | ||
49 | unsigned long uregs[34]; | ||
50 | }; | ||
51 | |||
52 | #define UCreg_asr uregs[32] | ||
53 | #define UCreg_pc uregs[31] | ||
54 | #define UCreg_lr uregs[30] | ||
55 | #define UCreg_sp uregs[29] | ||
56 | #define UCreg_ip uregs[28] | ||
57 | #define UCreg_fp uregs[27] | ||
58 | #define UCreg_26 uregs[26] | ||
59 | #define UCreg_25 uregs[25] | ||
60 | #define UCreg_24 uregs[24] | ||
61 | #define UCreg_23 uregs[23] | ||
62 | #define UCreg_22 uregs[22] | ||
63 | #define UCreg_21 uregs[21] | ||
64 | #define UCreg_20 uregs[20] | ||
65 | #define UCreg_19 uregs[19] | ||
66 | #define UCreg_18 uregs[18] | ||
67 | #define UCreg_17 uregs[17] | ||
68 | #define UCreg_16 uregs[16] | ||
69 | #define UCreg_15 uregs[15] | ||
70 | #define UCreg_14 uregs[14] | ||
71 | #define UCreg_13 uregs[13] | ||
72 | #define UCreg_12 uregs[12] | ||
73 | #define UCreg_11 uregs[11] | ||
74 | #define UCreg_10 uregs[10] | ||
75 | #define UCreg_09 uregs[9] | ||
76 | #define UCreg_08 uregs[8] | ||
77 | #define UCreg_07 uregs[7] | ||
78 | #define UCreg_06 uregs[6] | ||
79 | #define UCreg_05 uregs[5] | ||
80 | #define UCreg_04 uregs[4] | ||
81 | #define UCreg_03 uregs[3] | ||
82 | #define UCreg_02 uregs[2] | ||
83 | #define UCreg_01 uregs[1] | ||
84 | #define UCreg_00 uregs[0] | ||
85 | #define UCreg_ORIG_00 uregs[33] | ||
86 | |||
87 | #ifdef __KERNEL__ | ||
88 | |||
89 | #define user_mode(regs) \ | 19 | #define user_mode(regs) \ |
90 | (processor_mode(regs) == USER_MODE) | 20 | (processor_mode(regs) == USER_MODE) |
91 | 21 | ||
@@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs) | |||
125 | 55 | ||
126 | #define instruction_pointer(regs) ((regs)->UCreg_pc) | 56 | #define instruction_pointer(regs) ((regs)->UCreg_pc) |
127 | 57 | ||
128 | #endif /* __KERNEL__ */ | ||
129 | |||
130 | #endif /* __ASSEMBLY__ */ | 58 | #endif /* __ASSEMBLY__ */ |
131 | |||
132 | #endif | 59 | #endif |
133 | |||
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index baebb3da1d44..0514d7ad6855 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild | |||
@@ -1,3 +1,10 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
3 | 3 | ||
4 | header-y += byteorder.h | ||
5 | header-y += kvm_para.h | ||
6 | header-y += ptrace.h | ||
7 | header-y += sigcontext.h | ||
8 | header-y += unistd.h | ||
9 | |||
10 | generic-y += kvm_para.h | ||
diff --git a/arch/unicore32/include/asm/byteorder.h b/arch/unicore32/include/uapi/asm/byteorder.h index ebe1b3fef3e3..ebe1b3fef3e3 100644 --- a/arch/unicore32/include/asm/byteorder.h +++ b/arch/unicore32/include/uapi/asm/byteorder.h | |||
diff --git a/arch/unicore32/include/uapi/asm/ptrace.h b/arch/unicore32/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..187aa2e98a53 --- /dev/null +++ b/arch/unicore32/include/uapi/asm/ptrace.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/include/asm/ptrace.h | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #ifndef _UAPI__UNICORE_PTRACE_H__ | ||
13 | #define _UAPI__UNICORE_PTRACE_H__ | ||
14 | |||
15 | #define PTRACE_GET_THREAD_AREA 22 | ||
16 | |||
17 | /* | ||
18 | * PSR bits | ||
19 | */ | ||
20 | #define USER_MODE 0x00000010 | ||
21 | #define REAL_MODE 0x00000011 | ||
22 | #define INTR_MODE 0x00000012 | ||
23 | #define PRIV_MODE 0x00000013 | ||
24 | #define ABRT_MODE 0x00000017 | ||
25 | #define EXTN_MODE 0x0000001b | ||
26 | #define SUSR_MODE 0x0000001f | ||
27 | #define MODE_MASK 0x0000001f | ||
28 | #define PSR_R_BIT 0x00000040 | ||
29 | #define PSR_I_BIT 0x00000080 | ||
30 | #define PSR_V_BIT 0x10000000 | ||
31 | #define PSR_C_BIT 0x20000000 | ||
32 | #define PSR_Z_BIT 0x40000000 | ||
33 | #define PSR_S_BIT 0x80000000 | ||
34 | |||
35 | /* | ||
36 | * Groups of PSR bits | ||
37 | */ | ||
38 | #define PSR_f 0xff000000 /* Flags */ | ||
39 | #define PSR_c 0x000000ff /* Control */ | ||
40 | |||
41 | #ifndef __ASSEMBLY__ | ||
42 | |||
43 | /* | ||
44 | * This struct defines the way the registers are stored on the | ||
45 | * stack during a system call. Note that sizeof(struct pt_regs) | ||
46 | * has to be a multiple of 8. | ||
47 | */ | ||
48 | struct pt_regs { | ||
49 | unsigned long uregs[34]; | ||
50 | }; | ||
51 | |||
52 | #define UCreg_asr uregs[32] | ||
53 | #define UCreg_pc uregs[31] | ||
54 | #define UCreg_lr uregs[30] | ||
55 | #define UCreg_sp uregs[29] | ||
56 | #define UCreg_ip uregs[28] | ||
57 | #define UCreg_fp uregs[27] | ||
58 | #define UCreg_26 uregs[26] | ||
59 | #define UCreg_25 uregs[25] | ||
60 | #define UCreg_24 uregs[24] | ||
61 | #define UCreg_23 uregs[23] | ||
62 | #define UCreg_22 uregs[22] | ||
63 | #define UCreg_21 uregs[21] | ||
64 | #define UCreg_20 uregs[20] | ||
65 | #define UCreg_19 uregs[19] | ||
66 | #define UCreg_18 uregs[18] | ||
67 | #define UCreg_17 uregs[17] | ||
68 | #define UCreg_16 uregs[16] | ||
69 | #define UCreg_15 uregs[15] | ||
70 | #define UCreg_14 uregs[14] | ||
71 | #define UCreg_13 uregs[13] | ||
72 | #define UCreg_12 uregs[12] | ||
73 | #define UCreg_11 uregs[11] | ||
74 | #define UCreg_10 uregs[10] | ||
75 | #define UCreg_09 uregs[9] | ||
76 | #define UCreg_08 uregs[8] | ||
77 | #define UCreg_07 uregs[7] | ||
78 | #define UCreg_06 uregs[6] | ||
79 | #define UCreg_05 uregs[5] | ||
80 | #define UCreg_04 uregs[4] | ||
81 | #define UCreg_03 uregs[3] | ||
82 | #define UCreg_02 uregs[2] | ||
83 | #define UCreg_01 uregs[1] | ||
84 | #define UCreg_00 uregs[0] | ||
85 | #define UCreg_ORIG_00 uregs[33] | ||
86 | |||
87 | |||
88 | #endif /* __ASSEMBLY__ */ | ||
89 | |||
90 | #endif /* _UAPI__UNICORE_PTRACE_H__ */ | ||
diff --git a/arch/unicore32/include/asm/sigcontext.h b/arch/unicore32/include/uapi/asm/sigcontext.h index 6a2d7671c052..6a2d7671c052 100644 --- a/arch/unicore32/include/asm/sigcontext.h +++ b/arch/unicore32/include/uapi/asm/sigcontext.h | |||
diff --git a/arch/unicore32/include/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 2abcf61c615d..d18a3be89b38 100644 --- a/arch/unicore32/include/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h | |||
@@ -12,3 +12,4 @@ | |||
12 | 12 | ||
13 | /* Use the standard ABI for syscalls. */ | 13 | /* Use the standard ABI for syscalls. */ |
14 | #include <asm-generic/unistd.h> | 14 | #include <asm-generic/unistd.h> |
15 | #define __ARCH_WANT_SYS_EXECVE | ||
diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S index dcb87ab19ddd..7049350c790f 100644 --- a/arch/unicore32/kernel/entry.S +++ b/arch/unicore32/kernel/entry.S | |||
@@ -573,17 +573,16 @@ ENDPROC(ret_to_user) | |||
573 | */ | 573 | */ |
574 | ENTRY(ret_from_fork) | 574 | ENTRY(ret_from_fork) |
575 | b.l schedule_tail | 575 | b.l schedule_tail |
576 | get_thread_info tsk | ||
577 | ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing | ||
578 | mov why, #1 | ||
579 | cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
580 | beq ret_slow_syscall | ||
581 | mov r1, sp | ||
582 | mov r0, #1 @ trace exit [IP = 1] | ||
583 | b.l syscall_trace | ||
584 | b ret_slow_syscall | 576 | b ret_slow_syscall |
585 | ENDPROC(ret_from_fork) | 577 | ENDPROC(ret_from_fork) |
586 | 578 | ||
579 | ENTRY(ret_from_kernel_thread) | ||
580 | b.l schedule_tail | ||
581 | mov r0, r5 | ||
582 | adr lr, ret_slow_syscall | ||
583 | mov pc, r4 | ||
584 | ENDPROC(ret_from_kernel_thread) | ||
585 | |||
587 | /*============================================================================= | 586 | /*============================================================================= |
588 | * SWI handler | 587 | * SWI handler |
589 | *----------------------------------------------------------------------------- | 588 | *----------------------------------------------------------------------------- |
@@ -669,11 +668,6 @@ __cr_alignment: | |||
669 | #endif | 668 | #endif |
670 | .ltorg | 669 | .ltorg |
671 | 670 | ||
672 | ENTRY(sys_execve) | ||
673 | add r3, sp, #S_OFF | ||
674 | b __sys_execve | ||
675 | ENDPROC(sys_execve) | ||
676 | |||
677 | ENTRY(sys_clone) | 671 | ENTRY(sys_clone) |
678 | add ip, sp, #S_OFF | 672 | add ip, sp, #S_OFF |
679 | stw ip, [sp+], #4 | 673 | stw ip, [sp+], #4 |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index b008586dad75..a8fe265ce2c0 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 260 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
261 | asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); | ||
261 | 262 | ||
262 | int | 263 | int |
263 | copy_thread(unsigned long clone_flags, unsigned long stack_start, | 264 | copy_thread(unsigned long clone_flags, unsigned long stack_start, |
@@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
266 | struct thread_info *thread = task_thread_info(p); | 267 | struct thread_info *thread = task_thread_info(p); |
267 | struct pt_regs *childregs = task_pt_regs(p); | 268 | struct pt_regs *childregs = task_pt_regs(p); |
268 | 269 | ||
269 | *childregs = *regs; | ||
270 | childregs->UCreg_00 = 0; | ||
271 | childregs->UCreg_sp = stack_start; | ||
272 | |||
273 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); | 270 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); |
274 | thread->cpu_context.sp = (unsigned long)childregs; | 271 | thread->cpu_context.sp = (unsigned long)childregs; |
275 | thread->cpu_context.pc = (unsigned long)ret_from_fork; | 272 | if (unlikely(!regs)) { |
276 | 273 | thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread; | |
277 | if (clone_flags & CLONE_SETTLS) | 274 | thread->cpu_context.r4 = stack_start; |
278 | childregs->UCreg_16 = regs->UCreg_03; | 275 | thread->cpu_context.r5 = stk_sz; |
276 | memset(childregs, 0, sizeof(struct pt_regs)); | ||
277 | } else { | ||
278 | thread->cpu_context.pc = (unsigned long)ret_from_fork; | ||
279 | *childregs = *regs; | ||
280 | childregs->UCreg_00 = 0; | ||
281 | childregs->UCreg_sp = stack_start; | ||
279 | 282 | ||
283 | if (clone_flags & CLONE_SETTLS) | ||
284 | childregs->UCreg_16 = regs->UCreg_03; | ||
285 | } | ||
280 | return 0; | 286 | return 0; |
281 | } | 287 | } |
282 | 288 | ||
@@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp) | |||
305 | } | 311 | } |
306 | EXPORT_SYMBOL(dump_fpu); | 312 | EXPORT_SYMBOL(dump_fpu); |
307 | 313 | ||
308 | /* | ||
309 | * Shuffle the argument into the correct register before calling the | ||
310 | * thread function. r1 is the thread argument, r2 is the pointer to | ||
311 | * the thread function, and r3 points to the exit function. | ||
312 | */ | ||
313 | asm(".pushsection .text\n" | ||
314 | " .align\n" | ||
315 | " .type kernel_thread_helper, #function\n" | ||
316 | "kernel_thread_helper:\n" | ||
317 | " mov.a asr, r7\n" | ||
318 | " mov r0, r4\n" | ||
319 | " mov lr, r6\n" | ||
320 | " mov pc, r5\n" | ||
321 | " .size kernel_thread_helper, . - kernel_thread_helper\n" | ||
322 | " .popsection"); | ||
323 | |||
324 | /* | ||
325 | * Create a kernel thread. | ||
326 | */ | ||
327 | pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | ||
328 | { | ||
329 | struct pt_regs regs; | ||
330 | |||
331 | memset(®s, 0, sizeof(regs)); | ||
332 | |||
333 | regs.UCreg_04 = (unsigned long)arg; | ||
334 | regs.UCreg_05 = (unsigned long)fn; | ||
335 | regs.UCreg_06 = (unsigned long)do_exit; | ||
336 | regs.UCreg_07 = PRIV_MODE; | ||
337 | regs.UCreg_pc = (unsigned long)kernel_thread_helper; | ||
338 | regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT; | ||
339 | |||
340 | return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | ||
341 | } | ||
342 | EXPORT_SYMBOL(kernel_thread); | ||
343 | |||
344 | unsigned long get_wchan(struct task_struct *p) | 314 | unsigned long get_wchan(struct task_struct *p) |
345 | { | 315 | { |
346 | struct stackframe frame; | 316 | struct stackframe frame; |
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index f23955028a18..30f749da8f73 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h | |||
@@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[]; | |||
30 | extern void kernel_thread_helper(void); | 30 | extern void kernel_thread_helper(void); |
31 | 31 | ||
32 | extern void __init early_signal_init(void); | 32 | extern void __init early_signal_init(void); |
33 | |||
34 | extern asmlinkage void __backtrace(void); | ||
35 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); | ||
36 | |||
37 | extern void __show_regs(struct pt_regs *); | ||
38 | |||
33 | #endif | 39 | #endif |
diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c index fabdee96110b..9680134b31f0 100644 --- a/arch/unicore32/kernel/sys.c +++ b/arch/unicore32/kernel/sys.c | |||
@@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
42 | parent_tid, child_tid); | 42 | parent_tid, child_tid); |
43 | } | 43 | } |
44 | 44 | ||
45 | /* sys_execve() executes a new program. | ||
46 | * This is called indirectly via a small wrapper | ||
47 | */ | ||
48 | asmlinkage long __sys_execve(const char __user *filename, | ||
49 | const char __user *const __user *argv, | ||
50 | const char __user *const __user *envp, | ||
51 | struct pt_regs *regs) | ||
52 | { | ||
53 | int error; | ||
54 | struct filename *fn; | ||
55 | |||
56 | fn = getname(filename); | ||
57 | error = PTR_ERR(fn); | ||
58 | if (IS_ERR(fn)) | ||
59 | goto out; | ||
60 | error = do_execve(fn->name, argv, envp, regs); | ||
61 | putname(fn); | ||
62 | out: | ||
63 | return error; | ||
64 | } | ||
65 | |||
66 | int kernel_execve(const char *filename, | ||
67 | const char *const argv[], | ||
68 | const char *const envp[]) | ||
69 | { | ||
70 | struct pt_regs regs; | ||
71 | int ret; | ||
72 | |||
73 | memset(®s, 0, sizeof(struct pt_regs)); | ||
74 | ret = do_execve(filename, | ||
75 | (const char __user *const __user *)argv, | ||
76 | (const char __user *const __user *)envp, ®s); | ||
77 | if (ret < 0) | ||
78 | goto out; | ||
79 | |||
80 | /* | ||
81 | * Save argc to the register structure for userspace. | ||
82 | */ | ||
83 | regs.UCreg_00 = ret; | ||
84 | |||
85 | /* | ||
86 | * We were successful. We won't be returning to our caller, but | ||
87 | * instead to user space by manipulating the kernel stack. | ||
88 | */ | ||
89 | asm("add r0, %0, %1\n\t" | ||
90 | "mov r1, %2\n\t" | ||
91 | "mov r2, %3\n\t" | ||
92 | "mov r22, #0\n\t" /* not a syscall */ | ||
93 | "mov r23, %0\n\t" /* thread structure */ | ||
94 | "b.l memmove\n\t" /* copy regs to top of stack */ | ||
95 | "mov sp, r0\n\t" /* reposition stack pointer */ | ||
96 | "b ret_to_user" | ||
97 | : | ||
98 | : "r" (current_thread_info()), | ||
99 | "Ir" (THREAD_START_SP - sizeof(regs)), | ||
100 | "r" (®s), | ||
101 | "Ir" (sizeof(regs)) | ||
102 | : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); | ||
103 | |||
104 | out: | ||
105 | return ret; | ||
106 | } | ||
107 | |||
108 | /* Note: used by the compat code even in 64-bit Linux. */ | 45 | /* Note: used by the compat code even in 64-bit Linux. */ |
109 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | 46 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, |
110 | unsigned long, prot, unsigned long, flags, | 47 | unsigned long, prot, unsigned long, flags, |
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2eeb9c04cab0..f9b5c10bccee 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c | |||
@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 170 | static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
171 | struct task_struct *tsk) | 171 | unsigned int flags, struct task_struct *tsk) |
172 | { | 172 | { |
173 | struct vm_area_struct *vma; | 173 | struct vm_area_struct *vma; |
174 | int fault; | 174 | int fault; |
@@ -194,14 +194,7 @@ good_area: | |||
194 | * If for any reason at all we couldn't handle the fault, make | 194 | * If for any reason at all we couldn't handle the fault, make |
195 | * sure we exit gracefully rather than endlessly redo the fault. | 195 | * sure we exit gracefully rather than endlessly redo the fault. |
196 | */ | 196 | */ |
197 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, | 197 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); |
198 | (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); | ||
199 | if (unlikely(fault & VM_FAULT_ERROR)) | ||
200 | return fault; | ||
201 | if (fault & VM_FAULT_MAJOR) | ||
202 | tsk->maj_flt++; | ||
203 | else | ||
204 | tsk->min_flt++; | ||
205 | return fault; | 198 | return fault; |
206 | 199 | ||
207 | check_stack: | 200 | check_stack: |
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
216 | struct task_struct *tsk; | 209 | struct task_struct *tsk; |
217 | struct mm_struct *mm; | 210 | struct mm_struct *mm; |
218 | int fault, sig, code; | 211 | int fault, sig, code; |
212 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
213 | ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); | ||
219 | 214 | ||
220 | tsk = current; | 215 | tsk = current; |
221 | mm = tsk->mm; | 216 | mm = tsk->mm; |
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
236 | if (!user_mode(regs) | 231 | if (!user_mode(regs) |
237 | && !search_exception_tables(regs->UCreg_pc)) | 232 | && !search_exception_tables(regs->UCreg_pc)) |
238 | goto no_context; | 233 | goto no_context; |
234 | retry: | ||
239 | down_read(&mm->mmap_sem); | 235 | down_read(&mm->mmap_sem); |
240 | } else { | 236 | } else { |
241 | /* | 237 | /* |
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
251 | #endif | 247 | #endif |
252 | } | 248 | } |
253 | 249 | ||
254 | fault = __do_pf(mm, addr, fsr, tsk); | 250 | fault = __do_pf(mm, addr, fsr, flags, tsk); |
251 | |||
252 | /* If we need to retry but a fatal signal is pending, handle the | ||
253 | * signal first. We do not need to release the mmap_sem because | ||
254 | * it would already be released in __lock_page_or_retry in | ||
255 | * mm/filemap.c. */ | ||
256 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
257 | return 0; | ||
258 | |||
259 | if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { | ||
260 | if (fault & VM_FAULT_MAJOR) | ||
261 | tsk->maj_flt++; | ||
262 | else | ||
263 | tsk->min_flt++; | ||
264 | if (fault & VM_FAULT_RETRY) { | ||
265 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
266 | * of starvation. */ | ||
267 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
268 | goto retry; | ||
269 | } | ||
270 | } | ||
271 | |||
255 | up_read(&mm->mmap_sem); | 272 | up_read(&mm->mmap_sem); |
256 | 273 | ||
257 | /* | 274 | /* |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c760e073963e..e87b0cac14b5 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <asm/setup.h> | 12 | #include <asm/setup.h> |
13 | #include <asm/desc.h> | 13 | #include <asm/desc.h> |
14 | 14 | ||
15 | #undef memcpy /* Use memcpy from misc.c */ | ||
16 | |||
15 | #include "eboot.h" | 17 | #include "eboot.h" |
16 | 18 | ||
17 | static efi_system_table_t *sys_table; | 19 | static efi_system_table_t *sys_table; |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 2a017441b8b2..8c132a625b94 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -476,6 +476,3 @@ die: | |||
476 | setup_corrupt: | 476 | setup_corrupt: |
477 | .byte 7 | 477 | .byte 7 |
478 | .string "No setup signature found...\n" | 478 | .string "No setup signature found...\n" |
479 | |||
480 | .data | ||
481 | dummy: .long 0 | ||
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 93e1c55f14ab..03dd72957d2f 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -2,9 +2,6 @@ | |||
2 | #define _ASM_X86_DEVICE_H | 2 | #define _ASM_X86_DEVICE_H |
3 | 3 | ||
4 | struct dev_archdata { | 4 | struct dev_archdata { |
5 | #ifdef CONFIG_ACPI | ||
6 | void *acpi_handle; | ||
7 | #endif | ||
8 | #ifdef CONFIG_X86_DEV_DMA_OPS | 5 | #ifdef CONFIG_X86_DEV_DMA_OPS |
9 | struct dma_map_ops *dma_ops; | 6 | struct dma_map_ops *dma_ops; |
10 | #endif | 7 | #endif |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index dcfde52979c3..19f16ebaf4fa 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) | |||
205 | } | 205 | } |
206 | #endif | 206 | #endif |
207 | 207 | ||
208 | /* | ||
209 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode | ||
210 | * when it traps. The previous stack will be directly underneath the saved | ||
211 | * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. | ||
212 | * | ||
213 | * This is valid only for kernel mode traps. | ||
214 | */ | ||
215 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | ||
216 | { | ||
217 | #ifdef CONFIG_X86_32 | 208 | #ifdef CONFIG_X86_32 |
218 | return (unsigned long)(®s->sp); | 209 | extern unsigned long kernel_stack_pointer(struct pt_regs *regs); |
219 | #else | 210 | #else |
211 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | ||
212 | { | ||
220 | return regs->sp; | 213 | return regs->sp; |
221 | #endif | ||
222 | } | 214 | } |
215 | #endif | ||
223 | 216 | ||
224 | #define GET_IP(regs) ((regs)->ip) | 217 | #define GET_IP(regs) ((regs)->ip) |
225 | #define GET_FP(regs) ((regs)->bp) | 218 | #define GET_FP(regs) ((regs)->bp) |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e651f7a589ac..e48cafcf92ae 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -574,6 +574,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
574 | 574 | ||
575 | return irq; | 575 | return irq; |
576 | } | 576 | } |
577 | EXPORT_SYMBOL_GPL(acpi_register_gsi); | ||
578 | |||
579 | void acpi_unregister_gsi(u32 gsi) | ||
580 | { | ||
581 | } | ||
582 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | ||
577 | 583 | ||
578 | void __init acpi_set_irq_model_pic(void) | 584 | void __init acpi_set_irq_model_pic(void) |
579 | { | 585 | { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f7e98a2c0d12..1b7d1656a042 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
631 | } | 631 | } |
632 | } | 632 | } |
633 | 633 | ||
634 | /* | ||
635 | * The way access filter has a performance penalty on some workloads. | ||
636 | * Disable it on the affected CPUs. | ||
637 | */ | ||
638 | if ((c->x86 == 0x15) && | ||
639 | (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | ||
640 | u64 val; | ||
641 | |||
642 | if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { | ||
643 | val |= 0x1E; | ||
644 | wrmsrl_safe(0xc0011021, val); | ||
645 | } | ||
646 | } | ||
647 | |||
634 | cpu_detect_cache_sizes(c); | 648 | cpu_detect_cache_sizes(c); |
635 | 649 | ||
636 | /* Multi core CPU? */ | 650 | /* Multi core CPU? */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 698b6ec12e0f..1ac581f38dfa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Written by Jacob Shin - AMD, Inc. | 7 | * Written by Jacob Shin - AMD, Inc. |
8 | * | 8 | * |
9 | * Support: borislav.petkov@amd.com | 9 | * Maintained by: Borislav Petkov <bp@alien8.de> |
10 | * | 10 | * |
11 | * April 2006 | 11 | * April 2006 |
12 | * - added support for AMD Family 0x10 processors | 12 | * - added support for AMD Family 0x10 processors |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 5f88abf07e9c..4f9a3cbfc4a3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -285,34 +285,39 @@ void cmci_clear(void) | |||
285 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 285 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
286 | } | 286 | } |
287 | 287 | ||
288 | static long cmci_rediscover_work_func(void *arg) | ||
289 | { | ||
290 | int banks; | ||
291 | |||
292 | /* Recheck banks in case CPUs don't all have the same */ | ||
293 | if (cmci_supported(&banks)) | ||
294 | cmci_discover(banks); | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
288 | /* | 299 | /* |
289 | * After a CPU went down cycle through all the others and rediscover | 300 | * After a CPU went down cycle through all the others and rediscover |
290 | * Must run in process context. | 301 | * Must run in process context. |
291 | */ | 302 | */ |
292 | void cmci_rediscover(int dying) | 303 | void cmci_rediscover(int dying) |
293 | { | 304 | { |
294 | int banks; | 305 | int cpu, banks; |
295 | int cpu; | ||
296 | cpumask_var_t old; | ||
297 | 306 | ||
298 | if (!cmci_supported(&banks)) | 307 | if (!cmci_supported(&banks)) |
299 | return; | 308 | return; |
300 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
301 | return; | ||
302 | cpumask_copy(old, ¤t->cpus_allowed); | ||
303 | 309 | ||
304 | for_each_online_cpu(cpu) { | 310 | for_each_online_cpu(cpu) { |
305 | if (cpu == dying) | 311 | if (cpu == dying) |
306 | continue; | 312 | continue; |
307 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) | 313 | |
314 | if (cpu == smp_processor_id()) { | ||
315 | cmci_rediscover_work_func(NULL); | ||
308 | continue; | 316 | continue; |
309 | /* Recheck banks in case CPUs don't all have the same */ | 317 | } |
310 | if (cmci_supported(&banks)) | ||
311 | cmci_discover(banks); | ||
312 | } | ||
313 | 318 | ||
314 | set_cpus_allowed_ptr(current, old); | 319 | work_on_cpu(cpu, cmci_rediscover_work_func, NULL); |
315 | free_cpumask_var(old); | 320 | } |
316 | } | 321 | } |
317 | 322 | ||
318 | /* | 323 | /* |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b51b2c7ee51f..1328fe49a3f1 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -995,8 +995,8 @@ END(interrupt) | |||
995 | */ | 995 | */ |
996 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 996 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
997 | common_interrupt: | 997 | common_interrupt: |
998 | ASM_CLAC | ||
999 | XCPT_FRAME | 998 | XCPT_FRAME |
999 | ASM_CLAC | ||
1000 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ | 1000 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1001 | interrupt do_IRQ | 1001 | interrupt do_IRQ |
1002 | /* 0(%rsp): old_rsp-ARGOFFSET */ | 1002 | /* 0(%rsp): old_rsp-ARGOFFSET */ |
@@ -1135,8 +1135,8 @@ END(common_interrupt) | |||
1135 | */ | 1135 | */ |
1136 | .macro apicinterrupt num sym do_sym | 1136 | .macro apicinterrupt num sym do_sym |
1137 | ENTRY(\sym) | 1137 | ENTRY(\sym) |
1138 | ASM_CLAC | ||
1139 | INTR_FRAME | 1138 | INTR_FRAME |
1139 | ASM_CLAC | ||
1140 | pushq_cfi $~(\num) | 1140 | pushq_cfi $~(\num) |
1141 | .Lcommon_\sym: | 1141 | .Lcommon_\sym: |
1142 | interrupt \do_sym | 1142 | interrupt \do_sym |
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \ | |||
1190 | */ | 1190 | */ |
1191 | .macro zeroentry sym do_sym | 1191 | .macro zeroentry sym do_sym |
1192 | ENTRY(\sym) | 1192 | ENTRY(\sym) |
1193 | ASM_CLAC | ||
1194 | INTR_FRAME | 1193 | INTR_FRAME |
1194 | ASM_CLAC | ||
1195 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1195 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1196 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1196 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1197 | subq $ORIG_RAX-R15, %rsp | 1197 | subq $ORIG_RAX-R15, %rsp |
@@ -1208,8 +1208,8 @@ END(\sym) | |||
1208 | 1208 | ||
1209 | .macro paranoidzeroentry sym do_sym | 1209 | .macro paranoidzeroentry sym do_sym |
1210 | ENTRY(\sym) | 1210 | ENTRY(\sym) |
1211 | ASM_CLAC | ||
1212 | INTR_FRAME | 1211 | INTR_FRAME |
1212 | ASM_CLAC | ||
1213 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1213 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1214 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1214 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1215 | subq $ORIG_RAX-R15, %rsp | 1215 | subq $ORIG_RAX-R15, %rsp |
@@ -1227,8 +1227,8 @@ END(\sym) | |||
1227 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) | 1227 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) |
1228 | .macro paranoidzeroentry_ist sym do_sym ist | 1228 | .macro paranoidzeroentry_ist sym do_sym ist |
1229 | ENTRY(\sym) | 1229 | ENTRY(\sym) |
1230 | ASM_CLAC | ||
1231 | INTR_FRAME | 1230 | INTR_FRAME |
1231 | ASM_CLAC | ||
1232 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1232 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1233 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1233 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1234 | subq $ORIG_RAX-R15, %rsp | 1234 | subq $ORIG_RAX-R15, %rsp |
@@ -1247,8 +1247,8 @@ END(\sym) | |||
1247 | 1247 | ||
1248 | .macro errorentry sym do_sym | 1248 | .macro errorentry sym do_sym |
1249 | ENTRY(\sym) | 1249 | ENTRY(\sym) |
1250 | ASM_CLAC | ||
1251 | XCPT_FRAME | 1250 | XCPT_FRAME |
1251 | ASM_CLAC | ||
1252 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1252 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1253 | subq $ORIG_RAX-R15, %rsp | 1253 | subq $ORIG_RAX-R15, %rsp |
1254 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1254 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
@@ -1266,8 +1266,8 @@ END(\sym) | |||
1266 | /* error code is on the stack already */ | 1266 | /* error code is on the stack already */ |
1267 | .macro paranoiderrorentry sym do_sym | 1267 | .macro paranoiderrorentry sym do_sym |
1268 | ENTRY(\sym) | 1268 | ENTRY(\sym) |
1269 | ASM_CLAC | ||
1270 | XCPT_FRAME | 1269 | XCPT_FRAME |
1270 | ASM_CLAC | ||
1271 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1271 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1272 | subq $ORIG_RAX-R15, %rsp | 1272 | subq $ORIG_RAX-R15, %rsp |
1273 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1273 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7720ff5a9ee2..efdec7cd8e01 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -8,8 +8,8 @@ | |||
8 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | 8 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
9 | * | 9 | * |
10 | * Maintainers: | 10 | * Maintainers: |
11 | * Andreas Herrmann <andreas.herrmann3@amd.com> | 11 | * Andreas Herrmann <herrmann.der.user@googlemail.com> |
12 | * Borislav Petkov <borislav.petkov@amd.com> | 12 | * Borislav Petkov <bp@alien8.de> |
13 | * | 13 | * |
14 | * This driver allows to upgrade microcode on F10h AMD | 14 | * This driver allows to upgrade microcode on F10h AMD |
15 | * CPUs and later. | 15 | * CPUs and later. |
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
190 | #define F1XH_MPB_MAX_SIZE 2048 | 190 | #define F1XH_MPB_MAX_SIZE 2048 |
191 | #define F14H_MPB_MAX_SIZE 1824 | 191 | #define F14H_MPB_MAX_SIZE 1824 |
192 | #define F15H_MPB_MAX_SIZE 4096 | 192 | #define F15H_MPB_MAX_SIZE 4096 |
193 | #define F16H_MPB_MAX_SIZE 3458 | ||
193 | 194 | ||
194 | switch (c->x86) { | 195 | switch (c->x86) { |
195 | case 0x14: | 196 | case 0x14: |
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
198 | case 0x15: | 199 | case 0x15: |
199 | max_size = F15H_MPB_MAX_SIZE; | 200 | max_size = F15H_MPB_MAX_SIZE; |
200 | break; | 201 | break; |
202 | case 0x16: | ||
203 | max_size = F16H_MPB_MAX_SIZE; | ||
204 | break; | ||
201 | default: | 205 | default: |
202 | max_size = F1XH_MPB_MAX_SIZE; | 206 | max_size = F1XH_MPB_MAX_SIZE; |
203 | break; | 207 | break; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b00b33a18390..5e0596b0632e 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/perf_event.h> | 22 | #include <linux/perf_event.h> |
23 | #include <linux/hw_breakpoint.h> | 23 | #include <linux/hw_breakpoint.h> |
24 | #include <linux/rcupdate.h> | 24 | #include <linux/rcupdate.h> |
25 | #include <linux/module.h> | ||
25 | 26 | ||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value) | |||
166 | 167 | ||
167 | #define FLAG_MASK FLAG_MASK_32 | 168 | #define FLAG_MASK FLAG_MASK_32 |
168 | 169 | ||
170 | /* | ||
171 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode | ||
172 | * when it traps. The previous stack will be directly underneath the saved | ||
173 | * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. | ||
174 | * | ||
175 | * Now, if the stack is empty, '®s->sp' is out of range. In this | ||
176 | * case we try to take the previous stack. To always return a non-null | ||
177 | * stack pointer we fall back to regs as stack if no previous stack | ||
178 | * exists. | ||
179 | * | ||
180 | * This is valid only for kernel mode traps. | ||
181 | */ | ||
182 | unsigned long kernel_stack_pointer(struct pt_regs *regs) | ||
183 | { | ||
184 | unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); | ||
185 | unsigned long sp = (unsigned long)®s->sp; | ||
186 | struct thread_info *tinfo; | ||
187 | |||
188 | if (context == (sp & ~(THREAD_SIZE - 1))) | ||
189 | return sp; | ||
190 | |||
191 | tinfo = (struct thread_info *)context; | ||
192 | if (tinfo->previous_esp) | ||
193 | return tinfo->previous_esp; | ||
194 | |||
195 | return (unsigned long)regs; | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(kernel_stack_pointer); | ||
198 | |||
169 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) | 199 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) |
170 | { | 200 | { |
171 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); | 201 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index a10e46016851..58fc51488828 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) | |||
24 | { | 24 | { |
25 | struct kvm_cpuid_entry2 *best; | 25 | struct kvm_cpuid_entry2 *best; |
26 | 26 | ||
27 | if (!static_cpu_has(X86_FEATURE_XSAVE)) | ||
28 | return 0; | ||
29 | |||
27 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 30 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
28 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); | 31 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); |
29 | } | 32 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ad6b1dd06f8b..f85815945fc6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
6549 | } | 6549 | } |
6550 | } | 6550 | } |
6551 | 6551 | ||
6552 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); | ||
6553 | /* Exposing INVPCID only when PCID is exposed */ | 6552 | /* Exposing INVPCID only when PCID is exposed */ |
6554 | best = kvm_find_cpuid_entry(vcpu, 0x7, 0); | 6553 | best = kvm_find_cpuid_entry(vcpu, 0x7, 0); |
6555 | if (vmx_invpcid_supported() && | 6554 | if (vmx_invpcid_supported() && |
6556 | best && (best->ebx & bit(X86_FEATURE_INVPCID)) && | 6555 | best && (best->ebx & bit(X86_FEATURE_INVPCID)) && |
6557 | guest_cpuid_has_pcid(vcpu)) { | 6556 | guest_cpuid_has_pcid(vcpu)) { |
6557 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); | ||
6558 | exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; | 6558 | exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; |
6559 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, | 6559 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, |
6560 | exec_control); | 6560 | exec_control); |
6561 | } else { | 6561 | } else { |
6562 | exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; | 6562 | if (cpu_has_secondary_exec_ctrls()) { |
6563 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, | 6563 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); |
6564 | exec_control); | 6564 | exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; |
6565 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, | ||
6566 | exec_control); | ||
6567 | } | ||
6565 | if (best) | 6568 | if (best) |
6566 | best->ebx &= ~bit(X86_FEATURE_INVPCID); | 6569 | best->ebx &= ~bit(X86_FEATURE_INVPCID); |
6567 | } | 6570 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 224a7e78cb6c..4f7641756be2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
5781 | int pending_vec, max_bits, idx; | 5781 | int pending_vec, max_bits, idx; |
5782 | struct desc_ptr dt; | 5782 | struct desc_ptr dt; |
5783 | 5783 | ||
5784 | if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) | ||
5785 | return -EINVAL; | ||
5786 | |||
5784 | dt.size = sregs->idt.limit; | 5787 | dt.size = sregs->idt.limit; |
5785 | dt.address = sregs->idt.base; | 5788 | dt.address = sregs->idt.base; |
5786 | kvm_x86_ops->set_idt(vcpu, &dt); | 5789 | kvm_x86_ops->set_idt(vcpu, &dt); |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0777f042e400..60f926cd8b0e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |||
197 | } | 197 | } |
198 | 198 | ||
199 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 | 199 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 |
200 | || vmflag == VM_HUGETLB) { | 200 | || vmflag & VM_HUGETLB) { |
201 | local_flush_tlb(); | 201 | local_flush_tlb(); |
202 | goto flush_all; | 202 | goto flush_all; |
203 | } | 203 | } |
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 41bd2a2d2c50..b914e20b5a00 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c | |||
@@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value) | |||
115 | reg_read(reg, value); | 115 | reg_read(reg, value); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value) | ||
119 | { | ||
120 | unsigned long flags; | ||
121 | |||
122 | raw_spin_lock_irqsave(&pci_config_lock, flags); | ||
123 | /* force interrupt pin value to 0 */ | ||
124 | *value = reg->sim_reg.value & 0xfff00ff; | ||
125 | raw_spin_unlock_irqrestore(&pci_config_lock, flags); | ||
126 | } | ||
127 | |||
118 | static struct sim_dev_reg bus1_fixups[] = { | 128 | static struct sim_dev_reg bus1_fixups[] = { |
119 | DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) | 129 | DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) |
120 | DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) | 130 | DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) |
@@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = { | |||
144 | DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) | 154 | DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) |
145 | DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) | 155 | DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) |
146 | DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) | 156 | DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) |
157 | DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write) | ||
147 | DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) | 158 | DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) |
148 | DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) | 159 | DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) |
149 | DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) | 160 | DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) |
@@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = { | |||
161 | DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) | 172 | DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) |
162 | DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) | 173 | DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) |
163 | DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) | 174 | DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) |
175 | DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) | ||
164 | DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) | 176 | DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) |
165 | DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) | 177 | DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) |
178 | DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) | ||
166 | }; | 179 | }; |
167 | 180 | ||
168 | static void __init init_sim_regs(void) | 181 | static void __init init_sim_regs(void) |
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 4c61b52191eb..92525cb8e54c 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -21,12 +21,25 @@ | |||
21 | #include <asm/i8259.h> | 21 | #include <asm/i8259.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include <asm/io_apic.h> | 23 | #include <asm/io_apic.h> |
24 | #include <asm/emergency-restart.h> | ||
24 | 25 | ||
25 | static int ce4100_i8042_detect(void) | 26 | static int ce4100_i8042_detect(void) |
26 | { | 27 | { |
27 | return 0; | 28 | return 0; |
28 | } | 29 | } |
29 | 30 | ||
31 | /* | ||
32 | * The CE4100 platform has an internal 8051 Microcontroller which is | ||
33 | * responsible for signaling to the external Power Management Unit the | ||
34 | * intention to reset, reboot or power off the system. This 8051 device has | ||
35 | * its command register mapped at I/O port 0xcf9 and the value 0x4 is used | ||
36 | * to power off the system. | ||
37 | */ | ||
38 | static void ce4100_power_off(void) | ||
39 | { | ||
40 | outb(0x4, 0xcf9); | ||
41 | } | ||
42 | |||
30 | #ifdef CONFIG_SERIAL_8250 | 43 | #ifdef CONFIG_SERIAL_8250 |
31 | 44 | ||
32 | static unsigned int mem_serial_in(struct uart_port *p, int offset) | 45 | static unsigned int mem_serial_in(struct uart_port *p, int offset) |
@@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void) | |||
139 | x86_init.mpparse.find_smp_config = x86_init_noop; | 152 | x86_init.mpparse.find_smp_config = x86_init_noop; |
140 | x86_init.pci.init = ce4100_pci_init; | 153 | x86_init.pci.init = ce4100_pci_init; |
141 | 154 | ||
155 | /* | ||
156 | * By default, the reboot method is ACPI which is supported by the | ||
157 | * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue | ||
158 | * the bootloader will however issue a system power off instead of | ||
159 | * reboot. By using BOOT_KBD we ensure proper system reboot as | ||
160 | * expected. | ||
161 | */ | ||
162 | reboot_type = BOOT_KBD; | ||
163 | |||
142 | #ifdef CONFIG_X86_IO_APIC | 164 | #ifdef CONFIG_X86_IO_APIC |
143 | x86_init.pci.init_irq = sdv_pci_init; | 165 | x86_init.pci.init_irq = sdv_pci_init; |
144 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; | 166 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; |
145 | #endif | 167 | #endif |
168 | |||
169 | pm_power_off = ce4100_power_off; | ||
146 | } | 170 | } |
diff --git a/block/blk-exec.c b/block/blk-exec.c index 8b6dc5bd4dd0..f71eac35c1b9 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
52 | rq_end_io_fn *done) | 52 | rq_end_io_fn *done) |
53 | { | 53 | { |
54 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 54 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
55 | bool is_pm_resume; | ||
55 | 56 | ||
56 | WARN_ON(irqs_disabled()); | 57 | WARN_ON(irqs_disabled()); |
57 | 58 | ||
58 | rq->rq_disk = bd_disk; | 59 | rq->rq_disk = bd_disk; |
59 | rq->end_io = done; | 60 | rq->end_io = done; |
61 | /* | ||
62 | * need to check this before __blk_run_queue(), because rq can | ||
63 | * be freed before that returns. | ||
64 | */ | ||
65 | is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; | ||
60 | 66 | ||
61 | spin_lock_irq(q->queue_lock); | 67 | spin_lock_irq(q->queue_lock); |
62 | 68 | ||
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
71 | __elv_add_request(q, rq, where); | 77 | __elv_add_request(q, rq, where); |
72 | __blk_run_queue(q); | 78 | __blk_run_queue(q); |
73 | /* the queue is stopped so it won't be run */ | 79 | /* the queue is stopped so it won't be run */ |
74 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) | 80 | if (is_pm_resume) |
75 | q->request_fn(q); | 81 | q->request_fn(q); |
76 | spin_unlock_irq(q->queue_lock); | 82 | spin_unlock_irq(q->queue_lock); |
77 | } | 83 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 119d58db8342..0300bf612946 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -181,6 +181,12 @@ config ACPI_DOCK | |||
181 | This driver supports ACPI-controlled docking stations and removable | 181 | This driver supports ACPI-controlled docking stations and removable |
182 | drive bays such as the IBM Ultrabay and the Dell Module Bay. | 182 | drive bays such as the IBM Ultrabay and the Dell Module Bay. |
183 | 183 | ||
184 | config ACPI_I2C | ||
185 | def_tristate I2C | ||
186 | depends on I2C | ||
187 | help | ||
188 | ACPI I2C enumeration support. | ||
189 | |||
184 | config ACPI_PROCESSOR | 190 | config ACPI_PROCESSOR |
185 | tristate "Processor" | 191 | tristate "Processor" |
186 | select THERMAL | 192 | select THERMAL |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 82422fe90f81..2a4502becd13 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -21,9 +21,10 @@ obj-y += acpi.o \ | |||
21 | acpi-y += osl.o utils.o reboot.o | 21 | acpi-y += osl.o utils.o reboot.o |
22 | acpi-y += nvs.o | 22 | acpi-y += nvs.o |
23 | 23 | ||
24 | # sleep related files | 24 | # Power management related files |
25 | acpi-y += wakeup.o | 25 | acpi-y += wakeup.o |
26 | acpi-y += sleep.o | 26 | acpi-y += sleep.o |
27 | acpi-$(CONFIG_PM) += device_pm.o | ||
27 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o | 28 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o |
28 | 29 | ||
29 | 30 | ||
@@ -32,10 +33,12 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o | |||
32 | # | 33 | # |
33 | acpi-y += bus.o glue.o | 34 | acpi-y += bus.o glue.o |
34 | acpi-y += scan.o | 35 | acpi-y += scan.o |
36 | acpi-y += resource.o | ||
35 | acpi-y += processor_core.o | 37 | acpi-y += processor_core.o |
36 | acpi-y += ec.o | 38 | acpi-y += ec.o |
37 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 39 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
38 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o | 40 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o |
41 | acpi-y += acpi_platform.o | ||
39 | acpi-y += power.o | 42 | acpi-y += power.o |
40 | acpi-y += event.o | 43 | acpi-y += event.o |
41 | acpi-y += sysfs.o | 44 | acpi-y += sysfs.o |
@@ -67,6 +70,7 @@ obj-$(CONFIG_ACPI_HED) += hed.o | |||
67 | obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o | 70 | obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o |
68 | obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o | 71 | obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o |
69 | obj-$(CONFIG_ACPI_BGRT) += bgrt.o | 72 | obj-$(CONFIG_ACPI_BGRT) += bgrt.o |
73 | obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o | ||
70 | 74 | ||
71 | # processor has its own "processor." module_param namespace | 75 | # processor has its own "processor." module_param namespace |
72 | processor-y := processor_driver.o processor_throttling.o | 76 | processor-y := processor_driver.o processor_throttling.o |
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c new file mode 100644 index 000000000000..82045e3f5cac --- /dev/null +++ b/drivers/acpi/acpi_i2c.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * ACPI I2C enumeration support | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/acpi.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/export.h> | ||
15 | #include <linux/i2c.h> | ||
16 | #include <linux/ioport.h> | ||
17 | |||
18 | ACPI_MODULE_NAME("i2c"); | ||
19 | |||
20 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
21 | { | ||
22 | struct i2c_board_info *info = data; | ||
23 | |||
24 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
25 | struct acpi_resource_i2c_serialbus *sb; | ||
26 | |||
27 | sb = &ares->data.i2c_serial_bus; | ||
28 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
29 | info->addr = sb->slave_address; | ||
30 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
31 | info->flags |= I2C_CLIENT_TEN; | ||
32 | } | ||
33 | } else if (info->irq < 0) { | ||
34 | struct resource r; | ||
35 | |||
36 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
37 | info->irq = r.start; | ||
38 | } | ||
39 | |||
40 | /* Tell the ACPI core to skip this resource */ | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
45 | void *data, void **return_value) | ||
46 | { | ||
47 | struct i2c_adapter *adapter = data; | ||
48 | struct list_head resource_list; | ||
49 | struct i2c_board_info info; | ||
50 | struct acpi_device *adev; | ||
51 | int ret; | ||
52 | |||
53 | if (acpi_bus_get_device(handle, &adev)) | ||
54 | return AE_OK; | ||
55 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
56 | return AE_OK; | ||
57 | |||
58 | memset(&info, 0, sizeof(info)); | ||
59 | info.acpi_node.handle = handle; | ||
60 | info.irq = -1; | ||
61 | |||
62 | INIT_LIST_HEAD(&resource_list); | ||
63 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
64 | acpi_i2c_add_resource, &info); | ||
65 | acpi_dev_free_resource_list(&resource_list); | ||
66 | |||
67 | if (ret < 0 || !info.addr) | ||
68 | return AE_OK; | ||
69 | |||
70 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
71 | if (!i2c_new_device(adapter, &info)) { | ||
72 | dev_err(&adapter->dev, | ||
73 | "failed to add I2C device %s from ACPI\n", | ||
74 | dev_name(&adev->dev)); | ||
75 | } | ||
76 | |||
77 | return AE_OK; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
82 | * @adapter: pointer to adapter | ||
83 | * | ||
84 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
85 | * namespace. When a device is found it will be added to the Linux device | ||
86 | * model and bound to the corresponding ACPI handle. | ||
87 | */ | ||
88 | void acpi_i2c_register_devices(struct i2c_adapter *adapter) | ||
89 | { | ||
90 | acpi_handle handle; | ||
91 | acpi_status status; | ||
92 | |||
93 | handle = ACPI_HANDLE(&adapter->dev); | ||
94 | if (!handle) | ||
95 | return; | ||
96 | |||
97 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
98 | acpi_i2c_add_device, NULL, | ||
99 | adapter, NULL); | ||
100 | if (ACPI_FAILURE(status)) | ||
101 | dev_warn(&adapter->dev, "failed to enumerate I2C slaves\n"); | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(acpi_i2c_register_devices); | ||
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c new file mode 100644 index 000000000000..db129b9f52cb --- /dev/null +++ b/drivers/acpi/acpi_platform.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * ACPI support for platform bus type. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
6 | * Mathias Nyman <mathias.nyman@linux.intel.com> | ||
7 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include "internal.h" | ||
21 | |||
22 | ACPI_MODULE_NAME("platform"); | ||
23 | |||
24 | /** | ||
25 | * acpi_create_platform_device - Create platform device for ACPI device node | ||
26 | * @adev: ACPI device node to create a platform device for. | ||
27 | * | ||
28 | * Check if the given @adev can be represented as a platform device and, if | ||
29 | * that's the case, create and register a platform device, populate its common | ||
30 | * resources and returns a pointer to it. Otherwise, return %NULL. | ||
31 | * | ||
32 | * The platform device's name will be taken from the @adev's _HID and _UID. | ||
33 | */ | ||
34 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | ||
35 | { | ||
36 | struct platform_device *pdev = NULL; | ||
37 | struct acpi_device *acpi_parent; | ||
38 | struct platform_device_info pdevinfo; | ||
39 | struct resource_list_entry *rentry; | ||
40 | struct list_head resource_list; | ||
41 | struct resource *resources; | ||
42 | int count; | ||
43 | |||
44 | /* If the ACPI node already has a physical device attached, skip it. */ | ||
45 | if (adev->physical_node_count) | ||
46 | return NULL; | ||
47 | |||
48 | INIT_LIST_HEAD(&resource_list); | ||
49 | count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); | ||
50 | if (count <= 0) | ||
51 | return NULL; | ||
52 | |||
53 | resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL); | ||
54 | if (!resources) { | ||
55 | dev_err(&adev->dev, "No memory for resources\n"); | ||
56 | acpi_dev_free_resource_list(&resource_list); | ||
57 | return NULL; | ||
58 | } | ||
59 | count = 0; | ||
60 | list_for_each_entry(rentry, &resource_list, node) | ||
61 | resources[count++] = rentry->res; | ||
62 | |||
63 | acpi_dev_free_resource_list(&resource_list); | ||
64 | |||
65 | memset(&pdevinfo, 0, sizeof(pdevinfo)); | ||
66 | /* | ||
67 | * If the ACPI node has a parent and that parent has a physical device | ||
68 | * attached to it, that physical device should be the parent of the | ||
69 | * platform device we are about to create. | ||
70 | */ | ||
71 | pdevinfo.parent = NULL; | ||
72 | acpi_parent = adev->parent; | ||
73 | if (acpi_parent) { | ||
74 | struct acpi_device_physical_node *entry; | ||
75 | struct list_head *list; | ||
76 | |||
77 | mutex_lock(&acpi_parent->physical_node_lock); | ||
78 | list = &acpi_parent->physical_node_list; | ||
79 | if (!list_empty(list)) { | ||
80 | entry = list_first_entry(list, | ||
81 | struct acpi_device_physical_node, | ||
82 | node); | ||
83 | pdevinfo.parent = entry->dev; | ||
84 | } | ||
85 | mutex_unlock(&acpi_parent->physical_node_lock); | ||
86 | } | ||
87 | pdevinfo.name = dev_name(&adev->dev); | ||
88 | pdevinfo.id = -1; | ||
89 | pdevinfo.res = resources; | ||
90 | pdevinfo.num_res = count; | ||
91 | pdevinfo.acpi_node.handle = adev->handle; | ||
92 | pdev = platform_device_register_full(&pdevinfo); | ||
93 | if (IS_ERR(pdev)) { | ||
94 | dev_err(&adev->dev, "platform device creation failed: %ld\n", | ||
95 | PTR_ERR(pdev)); | ||
96 | pdev = NULL; | ||
97 | } else { | ||
98 | dev_dbg(&adev->dev, "created platform device %s\n", | ||
99 | dev_name(&pdev->dev)); | ||
100 | } | ||
101 | |||
102 | kfree(resources); | ||
103 | return pdev; | ||
104 | } | ||
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 7f1d40797e80..c8bc24bd1f72 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile | |||
@@ -161,3 +161,6 @@ acpi-y += \ | |||
161 | utxfinit.o \ | 161 | utxfinit.o \ |
162 | utxferror.o \ | 162 | utxferror.o \ |
163 | utxfmutex.o | 163 | utxfmutex.o |
164 | |||
165 | acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o | ||
166 | |||
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 5e8abb07724f..432a318c9ed1 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h | |||
@@ -44,17 +44,28 @@ | |||
44 | #ifndef __ACDEBUG_H__ | 44 | #ifndef __ACDEBUG_H__ |
45 | #define __ACDEBUG_H__ | 45 | #define __ACDEBUG_H__ |
46 | 46 | ||
47 | #define ACPI_DEBUG_BUFFER_SIZE 4196 | 47 | #define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ |
48 | 48 | ||
49 | struct command_info { | 49 | struct acpi_db_command_info { |
50 | char *name; /* Command Name */ | 50 | char *name; /* Command Name */ |
51 | u8 min_args; /* Minimum arguments required */ | 51 | u8 min_args; /* Minimum arguments required */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct argument_info { | 54 | struct acpi_db_command_help { |
55 | u8 line_count; /* Number of help lines */ | ||
56 | char *invocation; /* Command Invocation */ | ||
57 | char *description; /* Command Description */ | ||
58 | }; | ||
59 | |||
60 | struct acpi_db_argument_info { | ||
55 | char *name; /* Argument Name */ | 61 | char *name; /* Argument Name */ |
56 | }; | 62 | }; |
57 | 63 | ||
64 | struct acpi_db_execute_walk { | ||
65 | u32 count; | ||
66 | u32 max_count; | ||
67 | }; | ||
68 | |||
58 | #define PARAM_LIST(pl) pl | 69 | #define PARAM_LIST(pl) pl |
59 | #define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) | 70 | #define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) |
60 | #define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ | 71 | #define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ |
@@ -77,59 +88,71 @@ acpi_db_single_step(struct acpi_walk_state *walk_state, | |||
77 | /* | 88 | /* |
78 | * dbcmds - debug commands and output routines | 89 | * dbcmds - debug commands and output routines |
79 | */ | 90 | */ |
80 | acpi_status acpi_db_disassemble_method(char *name); | 91 | struct acpi_namespace_node *acpi_db_convert_to_node(char *in_string); |
81 | 92 | ||
82 | void acpi_db_display_table_info(char *table_arg); | 93 | void acpi_db_display_table_info(char *table_arg); |
83 | 94 | ||
84 | void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg); | 95 | void acpi_db_display_template(char *buffer_arg); |
85 | 96 | ||
86 | void | 97 | void acpi_db_unload_acpi_table(char *name); |
87 | acpi_db_set_method_breakpoint(char *location, | ||
88 | struct acpi_walk_state *walk_state, | ||
89 | union acpi_parse_object *op); | ||
90 | 98 | ||
91 | void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); | 99 | void acpi_db_send_notify(char *name, u32 value); |
92 | 100 | ||
93 | void acpi_db_get_bus_info(void); | 101 | void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); |
94 | 102 | ||
95 | void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); | 103 | acpi_status acpi_db_sleep(char *object_arg); |
96 | 104 | ||
97 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); | 105 | void acpi_db_display_locks(void); |
98 | 106 | ||
99 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); | 107 | void acpi_db_display_resources(char *object_arg); |
100 | 108 | ||
101 | void acpi_db_send_notify(char *name, u32 value); | 109 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) |
110 | |||
111 | void acpi_db_display_handlers(void); | ||
112 | |||
113 | ACPI_HW_DEPENDENT_RETURN_VOID(void | ||
114 | acpi_db_generate_gpe(char *gpe_arg, | ||
115 | char *block_arg)) | ||
116 | |||
117 | /* | ||
118 | * dbmethod - control method commands | ||
119 | */ | ||
120 | void | ||
121 | acpi_db_set_method_breakpoint(char *location, | ||
122 | struct acpi_walk_state *walk_state, | ||
123 | union acpi_parse_object *op); | ||
124 | |||
125 | void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); | ||
102 | 126 | ||
103 | void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg); | 127 | void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg); |
104 | 128 | ||
105 | acpi_status | 129 | acpi_status acpi_db_disassemble_method(char *name); |
106 | acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); | ||
107 | 130 | ||
108 | void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); | 131 | void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); |
109 | 132 | ||
110 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); | 133 | void acpi_db_batch_execute(char *count_arg); |
111 | 134 | ||
135 | /* | ||
136 | * dbnames - namespace commands | ||
137 | */ | ||
112 | void acpi_db_set_scope(char *name); | 138 | void acpi_db_set_scope(char *name); |
113 | 139 | ||
114 | ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg)) | 140 | void acpi_db_dump_namespace(char *start_arg, char *depth_arg); |
115 | 141 | ||
116 | void acpi_db_find_references(char *object_arg); | 142 | void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); |
117 | 143 | ||
118 | void acpi_db_display_locks(void); | 144 | acpi_status acpi_db_find_name_in_namespace(char *name_arg); |
119 | 145 | ||
120 | void acpi_db_display_resources(char *object_arg); | 146 | void acpi_db_check_predefined_names(void); |
121 | 147 | ||
122 | ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) | 148 | acpi_status |
149 | acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); | ||
123 | 150 | ||
124 | void acpi_db_check_integrity(void); | 151 | void acpi_db_check_integrity(void); |
125 | 152 | ||
126 | ACPI_HW_DEPENDENT_RETURN_VOID(void | 153 | void acpi_db_find_references(char *object_arg); |
127 | acpi_db_generate_gpe(char *gpe_arg, | ||
128 | char *block_arg)) | ||
129 | |||
130 | void acpi_db_check_predefined_names(void); | ||
131 | 154 | ||
132 | void acpi_db_batch_execute(void); | 155 | void acpi_db_get_bus_info(void); |
133 | 156 | ||
134 | /* | 157 | /* |
135 | * dbdisply - debug display commands | 158 | * dbdisply - debug display commands |
@@ -161,7 +184,8 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc, | |||
161 | /* | 184 | /* |
162 | * dbexec - debugger control method execution | 185 | * dbexec - debugger control method execution |
163 | */ | 186 | */ |
164 | void acpi_db_execute(char *name, char **args, u32 flags); | 187 | void |
188 | acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags); | ||
165 | 189 | ||
166 | void | 190 | void |
167 | acpi_db_create_execution_threads(char *num_threads_arg, | 191 | acpi_db_create_execution_threads(char *num_threads_arg, |
@@ -175,7 +199,8 @@ u32 acpi_db_get_cache_info(struct acpi_memory_list *cache); | |||
175 | * dbfileio - Debugger file I/O commands | 199 | * dbfileio - Debugger file I/O commands |
176 | */ | 200 | */ |
177 | acpi_object_type | 201 | acpi_object_type |
178 | acpi_db_match_argument(char *user_argument, struct argument_info *arguments); | 202 | acpi_db_match_argument(char *user_argument, |
203 | struct acpi_db_argument_info *arguments); | ||
179 | 204 | ||
180 | void acpi_db_close_debug_file(void); | 205 | void acpi_db_close_debug_file(void); |
181 | 206 | ||
@@ -208,6 +233,11 @@ acpi_db_command_dispatch(char *input_buffer, | |||
208 | 233 | ||
209 | void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); | 234 | void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); |
210 | 235 | ||
236 | acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op); | ||
237 | |||
238 | char *acpi_db_get_next_token(char *string, | ||
239 | char **next, acpi_object_type * return_type); | ||
240 | |||
211 | /* | 241 | /* |
212 | * dbstats - Generation and display of ACPI table statistics | 242 | * dbstats - Generation and display of ACPI table statistics |
213 | */ | 243 | */ |
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 5935ba6707e2..ed33ebcdaebe 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h | |||
@@ -309,10 +309,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state); | |||
309 | acpi_status | 309 | acpi_status |
310 | acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state); | 310 | acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state); |
311 | 311 | ||
312 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object | 312 | struct acpi_walk_state * acpi_ds_create_walk_state(acpi_owner_id owner_id, |
313 | *origin, union acpi_operand_object | 313 | union acpi_parse_object |
314 | *mth_desc, struct acpi_thread_state | 314 | *origin, |
315 | *thread); | 315 | union acpi_operand_object |
316 | *mth_desc, | ||
317 | struct acpi_thread_state | ||
318 | *thread); | ||
316 | 319 | ||
317 | acpi_status | 320 | acpi_status |
318 | acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, | 321 | acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c0a43b38c6a3..e975c6720448 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -84,9 +84,11 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); | |||
84 | 84 | ||
85 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 85 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
86 | 86 | ||
87 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | 87 | acpi_status |
88 | acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | ||
88 | 89 | ||
89 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | 90 | acpi_status |
91 | acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); | ||
90 | 92 | ||
91 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | 93 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, |
92 | u32 gpe_number); | 94 | u32 gpe_number); |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ce79100fb5eb..64472e4ec329 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Enable "slack" in the AML interpreter? Default is FALSE, and the | 72 | * Enable "slack" in the AML interpreter? Default is FALSE, and the |
73 | * interpreter strictly follows the ACPI specification. Setting to TRUE | 73 | * interpreter strictly follows the ACPI specification. Setting to TRUE |
74 | * allows the interpreter to ignore certain errors and/or bad AML constructs. | 74 | * allows the interpreter to ignore certain errors and/or bad AML constructs. |
75 | * | 75 | * |
76 | * Currently, these features are enabled by this flag: | 76 | * Currently, these features are enabled by this flag: |
@@ -155,26 +155,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE); | |||
155 | 155 | ||
156 | /***************************************************************************** | 156 | /***************************************************************************** |
157 | * | 157 | * |
158 | * Debug support | ||
159 | * | ||
160 | ****************************************************************************/ | ||
161 | |||
162 | /* Procedure nesting level for debug output */ | ||
163 | |||
164 | extern u32 acpi_gbl_nesting_level; | ||
165 | |||
166 | ACPI_EXTERN u32 acpi_gpe_count; | ||
167 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
168 | |||
169 | /* Support for dynamic control method tracing mechanism */ | ||
170 | |||
171 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | ||
172 | ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; | ||
173 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; | ||
174 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; | ||
175 | |||
176 | /***************************************************************************** | ||
177 | * | ||
178 | * ACPI Table globals | 158 | * ACPI Table globals |
179 | * | 159 | * |
180 | ****************************************************************************/ | 160 | ****************************************************************************/ |
@@ -259,15 +239,6 @@ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE reg | |||
259 | * | 239 | * |
260 | ****************************************************************************/ | 240 | ****************************************************************************/ |
261 | 241 | ||
262 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
263 | |||
264 | /* Lists for tracking memory allocations */ | ||
265 | |||
266 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; | ||
267 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; | ||
268 | ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; | ||
269 | #endif | ||
270 | |||
271 | /* Object caches */ | 242 | /* Object caches */ |
272 | 243 | ||
273 | ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache; | 244 | ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache; |
@@ -326,6 +297,15 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; | |||
326 | 297 | ||
327 | #endif | 298 | #endif |
328 | 299 | ||
300 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
301 | |||
302 | /* Lists for tracking memory allocations */ | ||
303 | |||
304 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; | ||
305 | ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; | ||
306 | ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; | ||
307 | #endif | ||
308 | |||
329 | /***************************************************************************** | 309 | /***************************************************************************** |
330 | * | 310 | * |
331 | * Namespace globals | 311 | * Namespace globals |
@@ -396,13 +376,35 @@ ACPI_EXTERN struct acpi_gpe_block_info | |||
396 | #if (!ACPI_REDUCED_HARDWARE) | 376 | #if (!ACPI_REDUCED_HARDWARE) |
397 | 377 | ||
398 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; | 378 | ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; |
399 | ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; | 379 | ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler; |
400 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; | 380 | ACPI_EXTERN void *acpi_gbl_global_event_handler_context; |
401 | 381 | ||
402 | #endif /* !ACPI_REDUCED_HARDWARE */ | 382 | #endif /* !ACPI_REDUCED_HARDWARE */ |
403 | 383 | ||
404 | /***************************************************************************** | 384 | /***************************************************************************** |
405 | * | 385 | * |
386 | * Debug support | ||
387 | * | ||
388 | ****************************************************************************/ | ||
389 | |||
390 | /* Procedure nesting level for debug output */ | ||
391 | |||
392 | extern u32 acpi_gbl_nesting_level; | ||
393 | |||
394 | /* Event counters */ | ||
395 | |||
396 | ACPI_EXTERN u32 acpi_gpe_count; | ||
397 | ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; | ||
398 | |||
399 | /* Support for dynamic control method tracing mechanism */ | ||
400 | |||
401 | ACPI_EXTERN u32 acpi_gbl_original_dbg_level; | ||
402 | ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; | ||
403 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; | ||
404 | ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; | ||
405 | |||
406 | /***************************************************************************** | ||
407 | * | ||
406 | * Debugger globals | 408 | * Debugger globals |
407 | * | 409 | * |
408 | ****************************************************************************/ | 410 | ****************************************************************************/ |
@@ -426,10 +428,11 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_stats; | |||
426 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; | 428 | ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; |
427 | 429 | ||
428 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; | 430 | ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; |
429 | ACPI_EXTERN char acpi_gbl_db_line_buf[80]; | 431 | ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; |
430 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[80]; | 432 | ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]; |
431 | ACPI_EXTERN char acpi_gbl_db_scope_buf[40]; | 433 | ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]; |
432 | ACPI_EXTERN char acpi_gbl_db_debug_filename[40]; | 434 | ACPI_EXTERN char acpi_gbl_db_scope_buf[80]; |
435 | ACPI_EXTERN char acpi_gbl_db_debug_filename[80]; | ||
433 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; | 436 | ACPI_EXTERN u8 acpi_gbl_db_output_to_file; |
434 | ACPI_EXTERN char *acpi_gbl_db_buffer; | 437 | ACPI_EXTERN char *acpi_gbl_db_buffer; |
435 | ACPI_EXTERN char *acpi_gbl_db_filename; | 438 | ACPI_EXTERN char *acpi_gbl_db_filename; |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index c816ee675094..ff8bd0061e8b 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -262,10 +262,10 @@ struct acpi_create_field_info { | |||
262 | }; | 262 | }; |
263 | 263 | ||
264 | typedef | 264 | typedef |
265 | acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state); | 265 | acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state); |
266 | 266 | ||
267 | /* | 267 | /* |
268 | * Bitmapped ACPI types. Used internally only | 268 | * Bitmapped ACPI types. Used internally only |
269 | */ | 269 | */ |
270 | #define ACPI_BTYPE_ANY 0x00000000 | 270 | #define ACPI_BTYPE_ANY 0x00000000 |
271 | #define ACPI_BTYPE_INTEGER 0x00000001 | 271 | #define ACPI_BTYPE_INTEGER 0x00000001 |
@@ -486,8 +486,10 @@ struct acpi_gpe_device_info { | |||
486 | struct acpi_namespace_node *gpe_device; | 486 | struct acpi_namespace_node *gpe_device; |
487 | }; | 487 | }; |
488 | 488 | ||
489 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 489 | typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info * |
490 | struct acpi_gpe_block_info *gpe_block, void *context); | 490 | gpe_xrupt_info, |
491 | struct acpi_gpe_block_info *gpe_block, | ||
492 | void *context); | ||
491 | 493 | ||
492 | /* Information about each particular fixed event */ | 494 | /* Information about each particular fixed event */ |
493 | 495 | ||
@@ -582,7 +584,7 @@ struct acpi_pscope_state { | |||
582 | }; | 584 | }; |
583 | 585 | ||
584 | /* | 586 | /* |
585 | * Thread state - one per thread across multiple walk states. Multiple walk | 587 | * Thread state - one per thread across multiple walk states. Multiple walk |
586 | * states are created when there are nested control methods executing. | 588 | * states are created when there are nested control methods executing. |
587 | */ | 589 | */ |
588 | struct acpi_thread_state { | 590 | struct acpi_thread_state { |
@@ -645,7 +647,7 @@ union acpi_generic_state { | |||
645 | * | 647 | * |
646 | ****************************************************************************/ | 648 | ****************************************************************************/ |
647 | 649 | ||
648 | typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); | 650 | typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state); |
649 | 651 | ||
650 | /* Address Range info block */ | 652 | /* Address Range info block */ |
651 | 653 | ||
@@ -1031,6 +1033,7 @@ struct acpi_db_method_info { | |||
1031 | acpi_handle method; | 1033 | acpi_handle method; |
1032 | acpi_handle main_thread_gate; | 1034 | acpi_handle main_thread_gate; |
1033 | acpi_handle thread_complete_gate; | 1035 | acpi_handle thread_complete_gate; |
1036 | acpi_handle info_gate; | ||
1034 | acpi_thread_id *threads; | 1037 | acpi_thread_id *threads; |
1035 | u32 num_threads; | 1038 | u32 num_threads; |
1036 | u32 num_created; | 1039 | u32 num_created; |
@@ -1041,6 +1044,7 @@ struct acpi_db_method_info { | |||
1041 | u32 num_loops; | 1044 | u32 num_loops; |
1042 | char pathname[128]; | 1045 | char pathname[128]; |
1043 | char **args; | 1046 | char **args; |
1047 | acpi_object_type *types; | ||
1044 | 1048 | ||
1045 | /* | 1049 | /* |
1046 | * Arguments to be passed to method for the command | 1050 | * Arguments to be passed to method for the command |
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index a7f68c47f517..5efad99f2169 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h | |||
@@ -84,29 +84,29 @@ | |||
84 | 84 | ||
85 | /* These macros reverse the bytes during the move, converting little-endian to big endian */ | 85 | /* These macros reverse the bytes during the move, converting little-endian to big endian */ |
86 | 86 | ||
87 | /* Big Endian <== Little Endian */ | 87 | /* Big Endian <== Little Endian */ |
88 | /* Hi...Lo Lo...Hi */ | 88 | /* Hi...Lo Lo...Hi */ |
89 | /* 16-bit source, 16/32/64 destination */ | 89 | /* 16-bit source, 16/32/64 destination */ |
90 | 90 | ||
91 | #define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ | 91 | #define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ |
92 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} | 92 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} |
93 | 93 | ||
94 | #define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\ | 94 | #define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\ |
95 | ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ | 95 | ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ |
96 | ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} | 96 | ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} |
97 | 97 | ||
98 | #define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ | 98 | #define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ |
99 | ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ | 99 | ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ |
100 | ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} | 100 | ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} |
101 | 101 | ||
102 | /* 32-bit source, 16/32/64 destination */ | 102 | /* 32-bit source, 16/32/64 destination */ |
103 | 103 | ||
104 | #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ | 104 | #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ |
105 | 105 | ||
106 | #define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ | 106 | #define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ |
107 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ | 107 | (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ |
108 | (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ | 108 | (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ |
109 | (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} | 109 | (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} |
110 | 110 | ||
111 | #define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ | 111 | #define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ |
112 | ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ | 112 | ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ |
@@ -196,24 +196,12 @@ | |||
196 | #endif | 196 | #endif |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | /* Macros based on machine integer width */ | ||
200 | |||
201 | #if ACPI_MACHINE_WIDTH == 32 | ||
202 | #define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_32_TO_16(d, s) | ||
203 | |||
204 | #elif ACPI_MACHINE_WIDTH == 64 | ||
205 | #define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_64_TO_16(d, s) | ||
206 | |||
207 | #else | ||
208 | #error unknown ACPI_MACHINE_WIDTH | ||
209 | #endif | ||
210 | |||
211 | /* | 199 | /* |
212 | * Fast power-of-two math macros for non-optimized compilers | 200 | * Fast power-of-two math macros for non-optimized compilers |
213 | */ | 201 | */ |
214 | #define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) | 202 | #define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) |
215 | #define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) | 203 | #define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) |
216 | #define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) | 204 | #define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) |
217 | 205 | ||
218 | #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) | 206 | #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) |
219 | #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) | 207 | #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) |
@@ -238,12 +226,12 @@ | |||
238 | /* | 226 | /* |
239 | * Rounding macros (Power of two boundaries only) | 227 | * Rounding macros (Power of two boundaries only) |
240 | */ | 228 | */ |
241 | #define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ | 229 | #define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ |
242 | (~(((acpi_size) boundary)-1))) | 230 | (~(((acpi_size) boundary)-1))) |
243 | 231 | ||
244 | #define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ | 232 | #define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ |
245 | (((acpi_size) boundary)-1)) & \ | 233 | (((acpi_size) boundary)-1)) & \ |
246 | (~(((acpi_size) boundary)-1))) | 234 | (~(((acpi_size) boundary)-1))) |
247 | 235 | ||
248 | /* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */ | 236 | /* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */ |
249 | 237 | ||
@@ -264,7 +252,7 @@ | |||
264 | 252 | ||
265 | #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) | 253 | #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) |
266 | 254 | ||
267 | #define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) | 255 | #define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) |
268 | 256 | ||
269 | /* | 257 | /* |
270 | * Bitmask creation | 258 | * Bitmask creation |
@@ -355,7 +343,6 @@ | |||
355 | * Ascii error messages can be configured out | 343 | * Ascii error messages can be configured out |
356 | */ | 344 | */ |
357 | #ifndef ACPI_NO_ERROR_MESSAGES | 345 | #ifndef ACPI_NO_ERROR_MESSAGES |
358 | |||
359 | /* | 346 | /* |
360 | * Error reporting. Callers module and line number are inserted by AE_INFO, | 347 | * Error reporting. Callers module and line number are inserted by AE_INFO, |
361 | * the plist contains a set of parens to allow variable-length lists. | 348 | * the plist contains a set of parens to allow variable-length lists. |
@@ -375,18 +362,15 @@ | |||
375 | #define ACPI_WARN_PREDEFINED(plist) | 362 | #define ACPI_WARN_PREDEFINED(plist) |
376 | #define ACPI_INFO_PREDEFINED(plist) | 363 | #define ACPI_INFO_PREDEFINED(plist) |
377 | 364 | ||
378 | #endif /* ACPI_NO_ERROR_MESSAGES */ | 365 | #endif /* ACPI_NO_ERROR_MESSAGES */ |
379 | 366 | ||
380 | /* | 367 | /* |
381 | * Debug macros that are conditionally compiled | 368 | * Debug macros that are conditionally compiled |
382 | */ | 369 | */ |
383 | #ifdef ACPI_DEBUG_OUTPUT | 370 | #ifdef ACPI_DEBUG_OUTPUT |
384 | |||
385 | /* | 371 | /* |
386 | * Function entry tracing | 372 | * Function entry tracing |
387 | */ | 373 | */ |
388 | #ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE | ||
389 | |||
390 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ | 374 | #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ |
391 | acpi_ut_trace(ACPI_DEBUG_PARAMETERS) | 375 | acpi_ut_trace(ACPI_DEBUG_PARAMETERS) |
392 | #define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \ | 376 | #define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \ |
@@ -464,45 +448,19 @@ | |||
464 | 448 | ||
465 | #endif /* ACPI_SIMPLE_RETURN_MACROS */ | 449 | #endif /* ACPI_SIMPLE_RETURN_MACROS */ |
466 | 450 | ||
467 | #else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */ | ||
468 | |||
469 | #define ACPI_FUNCTION_TRACE(a) | ||
470 | #define ACPI_FUNCTION_TRACE_PTR(a,b) | ||
471 | #define ACPI_FUNCTION_TRACE_U32(a,b) | ||
472 | #define ACPI_FUNCTION_TRACE_STR(a,b) | ||
473 | #define ACPI_FUNCTION_EXIT | ||
474 | #define ACPI_FUNCTION_STATUS_EXIT(s) | ||
475 | #define ACPI_FUNCTION_VALUE_EXIT(s) | ||
476 | #define ACPI_FUNCTION_TRACE(a) | ||
477 | #define ACPI_FUNCTION_ENTRY() | ||
478 | |||
479 | #define return_VOID return | ||
480 | #define return_ACPI_STATUS(s) return(s) | ||
481 | #define return_VALUE(s) return(s) | ||
482 | #define return_UINT8(s) return(s) | ||
483 | #define return_UINT32(s) return(s) | ||
484 | #define return_PTR(s) return(s) | ||
485 | |||
486 | #endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */ | ||
487 | |||
488 | /* Conditional execution */ | 451 | /* Conditional execution */ |
489 | 452 | ||
490 | #define ACPI_DEBUG_EXEC(a) a | 453 | #define ACPI_DEBUG_EXEC(a) a |
491 | #define ACPI_NORMAL_EXEC(a) | ||
492 | |||
493 | #define ACPI_DEBUG_DEFINE(a) a; | ||
494 | #define ACPI_DEBUG_ONLY_MEMBERS(a) a; | 454 | #define ACPI_DEBUG_ONLY_MEMBERS(a) a; |
495 | #define _VERBOSE_STRUCTURES | 455 | #define _VERBOSE_STRUCTURES |
496 | 456 | ||
497 | /* Stack and buffer dumping */ | 457 | /* Various object display routines for debug */ |
498 | 458 | ||
499 | #define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) | 459 | #define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) |
500 | #define ACPI_DUMP_OPERANDS(a, b, c) acpi_ex_dump_operands(a, b, c) | 460 | #define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) |
501 | |||
502 | #define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) | 461 | #define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) |
503 | #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) | 462 | #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) |
504 | #define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) | 463 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) |
505 | #define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) | ||
506 | 464 | ||
507 | #else | 465 | #else |
508 | /* | 466 | /* |
@@ -510,25 +468,23 @@ | |||
510 | * leaving no executable debug code! | 468 | * leaving no executable debug code! |
511 | */ | 469 | */ |
512 | #define ACPI_DEBUG_EXEC(a) | 470 | #define ACPI_DEBUG_EXEC(a) |
513 | #define ACPI_NORMAL_EXEC(a) a; | 471 | #define ACPI_DEBUG_ONLY_MEMBERS(a) |
514 | 472 | #define ACPI_FUNCTION_TRACE(a) | |
515 | #define ACPI_DEBUG_DEFINE(a) do { } while(0) | 473 | #define ACPI_FUNCTION_TRACE_PTR(a, b) |
516 | #define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) | 474 | #define ACPI_FUNCTION_TRACE_U32(a, b) |
517 | #define ACPI_FUNCTION_TRACE(a) do { } while(0) | 475 | #define ACPI_FUNCTION_TRACE_STR(a, b) |
518 | #define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) | 476 | #define ACPI_FUNCTION_EXIT |
519 | #define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) | 477 | #define ACPI_FUNCTION_STATUS_EXIT(s) |
520 | #define ACPI_FUNCTION_TRACE_STR(a, b) do { } while(0) | 478 | #define ACPI_FUNCTION_VALUE_EXIT(s) |
521 | #define ACPI_FUNCTION_EXIT do { } while(0) | 479 | #define ACPI_FUNCTION_ENTRY() |
522 | #define ACPI_FUNCTION_STATUS_EXIT(s) do { } while(0) | 480 | #define ACPI_DUMP_STACK_ENTRY(a) |
523 | #define ACPI_FUNCTION_VALUE_EXIT(s) do { } while(0) | 481 | #define ACPI_DUMP_OPERANDS(a, b, c) |
524 | #define ACPI_FUNCTION_ENTRY() do { } while(0) | 482 | #define ACPI_DUMP_ENTRY(a, b) |
525 | #define ACPI_DUMP_STACK_ENTRY(a) do { } while(0) | 483 | #define ACPI_DUMP_TABLES(a, b) |
526 | #define ACPI_DUMP_OPERANDS(a, b, c) do { } while(0) | 484 | #define ACPI_DUMP_PATHNAME(a, b, c, d) |
527 | #define ACPI_DUMP_ENTRY(a, b) do { } while(0) | 485 | #define ACPI_DUMP_BUFFER(a, b) |
528 | #define ACPI_DUMP_TABLES(a, b) do { } while(0) | 486 | #define ACPI_DEBUG_PRINT(pl) |
529 | #define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) | 487 | #define ACPI_DEBUG_PRINT_RAW(pl) |
530 | #define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) | ||
531 | #define ACPI_DUMP_BUFFER(a, b) do { } while(0) | ||
532 | 488 | ||
533 | #define return_VOID return | 489 | #define return_VOID return |
534 | #define return_ACPI_STATUS(s) return(s) | 490 | #define return_ACPI_STATUS(s) return(s) |
@@ -556,18 +512,6 @@ | |||
556 | #define ACPI_DEBUGGER_EXEC(a) | 512 | #define ACPI_DEBUGGER_EXEC(a) |
557 | #endif | 513 | #endif |
558 | 514 | ||
559 | #ifdef ACPI_DEBUG_OUTPUT | ||
560 | /* | ||
561 | * 1) Set name to blanks | ||
562 | * 2) Copy the object name | ||
563 | */ | ||
564 | #define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\ | ||
565 | ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name)) | ||
566 | #else | ||
567 | |||
568 | #define ACPI_ADD_OBJECT_NAME(a,b) | ||
569 | #endif | ||
570 | |||
571 | /* | 515 | /* |
572 | * Memory allocation tracking (DEBUG ONLY) | 516 | * Memory allocation tracking (DEBUG ONLY) |
573 | */ | 517 | */ |
@@ -578,13 +522,13 @@ | |||
578 | /* Memory allocation */ | 522 | /* Memory allocation */ |
579 | 523 | ||
580 | #ifndef ACPI_ALLOCATE | 524 | #ifndef ACPI_ALLOCATE |
581 | #define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS) | 525 | #define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS) |
582 | #endif | 526 | #endif |
583 | #ifndef ACPI_ALLOCATE_ZEROED | 527 | #ifndef ACPI_ALLOCATE_ZEROED |
584 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS) | 528 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS) |
585 | #endif | 529 | #endif |
586 | #ifndef ACPI_FREE | 530 | #ifndef ACPI_FREE |
587 | #define ACPI_FREE(a) acpio_os_free(a) | 531 | #define ACPI_FREE(a) acpi_os_free(a) |
588 | #endif | 532 | #endif |
589 | #define ACPI_MEM_TRACKING(a) | 533 | #define ACPI_MEM_TRACKING(a) |
590 | 534 | ||
@@ -592,16 +536,25 @@ | |||
592 | 536 | ||
593 | /* Memory allocation */ | 537 | /* Memory allocation */ |
594 | 538 | ||
595 | #define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) | 539 | #define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) |
596 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) | 540 | #define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) |
597 | #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) | 541 | #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) |
598 | #define ACPI_MEM_TRACKING(a) a | 542 | #define ACPI_MEM_TRACKING(a) a |
599 | 543 | ||
600 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ | 544 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ |
601 | 545 | ||
602 | /* Preemption point */ | 546 | /* |
603 | #ifndef ACPI_PREEMPTION_POINT | 547 | * Macros used for ACPICA utilities only |
604 | #define ACPI_PREEMPTION_POINT() /* no preemption */ | 548 | */ |
605 | #endif | 549 | |
550 | /* Generate a UUID */ | ||
551 | |||
552 | #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ | ||
553 | (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ | ||
554 | (b) & 0xFF, ((b) >> 8) & 0xFF, \ | ||
555 | (c) & 0xFF, ((c) >> 8) & 0xFF, \ | ||
556 | (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) | ||
557 | |||
558 | #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) | ||
606 | 559 | ||
607 | #endif /* ACMACROS_H */ | 560 | #endif /* ACMACROS_H */ |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 364a1303fb8f..24eb9eac9514 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) | 3 | * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) |
@@ -179,7 +178,7 @@ struct acpi_object_method { | |||
179 | union acpi_operand_object *mutex; | 178 | union acpi_operand_object *mutex; |
180 | u8 *aml_start; | 179 | u8 *aml_start; |
181 | union { | 180 | union { |
182 | ACPI_INTERNAL_METHOD implementation; | 181 | acpi_internal_method implementation; |
183 | union acpi_operand_object *handler; | 182 | union acpi_operand_object *handler; |
184 | } dispatch; | 183 | } dispatch; |
185 | 184 | ||
@@ -198,7 +197,7 @@ struct acpi_object_method { | |||
198 | 197 | ||
199 | /****************************************************************************** | 198 | /****************************************************************************** |
200 | * | 199 | * |
201 | * Objects that can be notified. All share a common notify_info area. | 200 | * Objects that can be notified. All share a common notify_info area. |
202 | * | 201 | * |
203 | *****************************************************************************/ | 202 | *****************************************************************************/ |
204 | 203 | ||
@@ -235,7 +234,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; | |||
235 | 234 | ||
236 | /****************************************************************************** | 235 | /****************************************************************************** |
237 | * | 236 | * |
238 | * Fields. All share a common header/info field. | 237 | * Fields. All share a common header/info field. |
239 | * | 238 | * |
240 | *****************************************************************************/ | 239 | *****************************************************************************/ |
241 | 240 | ||
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9440d053fbb3..d786a5128b78 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h | |||
@@ -54,7 +54,7 @@ | |||
54 | #define _UNK 0x6B | 54 | #define _UNK 0x6B |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Reserved ASCII characters. Do not use any of these for | 57 | * Reserved ASCII characters. Do not use any of these for |
58 | * internal opcodes, since they are used to differentiate | 58 | * internal opcodes, since they are used to differentiate |
59 | * name strings from AML opcodes | 59 | * name strings from AML opcodes |
60 | */ | 60 | */ |
@@ -63,7 +63,7 @@ | |||
63 | #define _PFX 0x6D | 63 | #define _PFX 0x6D |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * All AML opcodes and the parse-time arguments for each. Used by the AML | 66 | * All AML opcodes and the parse-time arguments for each. Used by the AML |
67 | * parser Each list is compressed into a 32-bit number and stored in the | 67 | * parser Each list is compressed into a 32-bit number and stored in the |
68 | * master opcode table (in psopcode.c). | 68 | * master opcode table (in psopcode.c). |
69 | */ | 69 | */ |
@@ -193,7 +193,7 @@ | |||
193 | #define ARGP_ZERO_OP ARG_NONE | 193 | #define ARGP_ZERO_OP ARG_NONE |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * All AML opcodes and the runtime arguments for each. Used by the AML | 196 | * All AML opcodes and the runtime arguments for each. Used by the AML |
197 | * interpreter Each list is compressed into a 32-bit number and stored | 197 | * interpreter Each list is compressed into a 32-bit number and stored |
198 | * in the master opcode table (in psopcode.c). | 198 | * in the master opcode table (in psopcode.c). |
199 | * | 199 | * |
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index b725d780d34d..eefcf47a61a0 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h | |||
@@ -150,8 +150,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state); | |||
150 | 150 | ||
151 | void | 151 | void |
152 | acpi_ps_pop_scope(struct acpi_parse_state *parser_state, | 152 | acpi_ps_pop_scope(struct acpi_parse_state *parser_state, |
153 | union acpi_parse_object **op, | 153 | union acpi_parse_object **op, u32 *arg_list, u32 *arg_count); |
154 | u32 * arg_list, u32 * arg_count); | ||
155 | 154 | ||
156 | acpi_status | 155 | acpi_status |
157 | acpi_ps_push_scope(struct acpi_parse_state *parser_state, | 156 | acpi_ps_push_scope(struct acpi_parse_state *parser_state, |
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 3080c017f5ba..9dfa1c83bd4e 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
@@ -150,8 +150,7 @@ enum acpi_return_package_types { | |||
150 | * is saved here (rather than in a separate table) in order to minimize the | 150 | * is saved here (rather than in a separate table) in order to minimize the |
151 | * overall size of the stored data. | 151 | * overall size of the stored data. |
152 | */ | 152 | */ |
153 | static const union acpi_predefined_info predefined_names[] = | 153 | static const union acpi_predefined_info predefined_names[] = { |
154 | { | ||
155 | {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, | 154 | {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, |
156 | {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, | 155 | {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, |
157 | {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, | 156 | {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, |
@@ -538,7 +537,8 @@ static const union acpi_predefined_info predefined_names[] = | |||
538 | 537 | ||
539 | /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ | 538 | /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ |
540 | 539 | ||
541 | {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, | 540 | {{"_WAK", 1, |
541 | ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, | ||
542 | {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ | 542 | {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ |
543 | 543 | ||
544 | /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ | 544 | /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ |
@@ -551,11 +551,12 @@ static const union acpi_predefined_info predefined_names[] = | |||
551 | }; | 551 | }; |
552 | 552 | ||
553 | #if 0 | 553 | #if 0 |
554 | |||
554 | /* This is an internally implemented control method, no need to check */ | 555 | /* This is an internally implemented control method, no need to check */ |
555 | {{"_OSI", 1, ACPI_RTYPE_INTEGER}}, | 556 | { { |
557 | "_OSI", 1, ACPI_RTYPE_INTEGER}}, | ||
556 | 558 | ||
557 | /* TBD: */ | 559 | /* TBD: */ |
558 | |||
559 | _PRT - currently ignore reversed entries. attempt to fix here? | 560 | _PRT - currently ignore reversed entries. attempt to fix here? |
560 | think about possibly fixing package elements like _BIF, etc. | 561 | think about possibly fixing package elements like _BIF, etc. |
561 | #endif | 562 | #endif |
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index f196e2c9a71f..937e66c65d1e 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h | |||
@@ -53,7 +53,7 @@ | |||
53 | ****************************************************************************/ | 53 | ****************************************************************************/ |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Walk state - current state of a parse tree walk. Used for both a leisurely | 56 | * Walk state - current state of a parse tree walk. Used for both a leisurely |
57 | * stroll through the tree (for whatever reason), and for control method | 57 | * stroll through the tree (for whatever reason), and for control method |
58 | * execution. | 58 | * execution. |
59 | */ | 59 | */ |
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5035327ebccc..b0f5f92b674a 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -69,6 +69,22 @@ extern const char *acpi_gbl_siz_decode[]; | |||
69 | extern const char *acpi_gbl_trs_decode[]; | 69 | extern const char *acpi_gbl_trs_decode[]; |
70 | extern const char *acpi_gbl_ttp_decode[]; | 70 | extern const char *acpi_gbl_ttp_decode[]; |
71 | extern const char *acpi_gbl_typ_decode[]; | 71 | extern const char *acpi_gbl_typ_decode[]; |
72 | extern const char *acpi_gbl_ppc_decode[]; | ||
73 | extern const char *acpi_gbl_ior_decode[]; | ||
74 | extern const char *acpi_gbl_dts_decode[]; | ||
75 | extern const char *acpi_gbl_ct_decode[]; | ||
76 | extern const char *acpi_gbl_sbt_decode[]; | ||
77 | extern const char *acpi_gbl_am_decode[]; | ||
78 | extern const char *acpi_gbl_sm_decode[]; | ||
79 | extern const char *acpi_gbl_wm_decode[]; | ||
80 | extern const char *acpi_gbl_cph_decode[]; | ||
81 | extern const char *acpi_gbl_cpo_decode[]; | ||
82 | extern const char *acpi_gbl_dp_decode[]; | ||
83 | extern const char *acpi_gbl_ed_decode[]; | ||
84 | extern const char *acpi_gbl_bpb_decode[]; | ||
85 | extern const char *acpi_gbl_sb_decode[]; | ||
86 | extern const char *acpi_gbl_fc_decode[]; | ||
87 | extern const char *acpi_gbl_pt_decode[]; | ||
72 | #endif | 88 | #endif |
73 | 89 | ||
74 | /* Types for Resource descriptor entries */ | 90 | /* Types for Resource descriptor entries */ |
@@ -79,14 +95,14 @@ extern const char *acpi_gbl_typ_decode[]; | |||
79 | #define ACPI_SMALL_VARIABLE_LENGTH 3 | 95 | #define ACPI_SMALL_VARIABLE_LENGTH 3 |
80 | 96 | ||
81 | typedef | 97 | typedef |
82 | acpi_status(*acpi_walk_aml_callback) (u8 * aml, | 98 | acpi_status(*acpi_walk_aml_callback) (u8 *aml, |
83 | u32 length, | 99 | u32 length, |
84 | u32 offset, | 100 | u32 offset, |
85 | u8 resource_index, void **context); | 101 | u8 resource_index, void **context); |
86 | 102 | ||
87 | typedef | 103 | typedef |
88 | acpi_status(*acpi_pkg_callback) (u8 object_type, | 104 | acpi_status(*acpi_pkg_callback) (u8 object_type, |
89 | union acpi_operand_object * source_object, | 105 | union acpi_operand_object *source_object, |
90 | union acpi_generic_state * state, | 106 | union acpi_generic_state * state, |
91 | void *context); | 107 | void *context); |
92 | 108 | ||
@@ -202,7 +218,9 @@ extern const u8 _acpi_ctype[]; | |||
202 | #define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) | 218 | #define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) |
203 | #define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) | 219 | #define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) |
204 | 220 | ||
205 | #endif /* ACPI_USE_SYSTEM_CLIBRARY */ | 221 | #endif /* !ACPI_USE_SYSTEM_CLIBRARY */ |
222 | |||
223 | #define ACPI_IS_ASCII(c) ((c) < 0x80) | ||
206 | 224 | ||
207 | /* | 225 | /* |
208 | * utcopy - Object construction and conversion interfaces | 226 | * utcopy - Object construction and conversion interfaces |
@@ -210,11 +228,11 @@ extern const u8 _acpi_ctype[]; | |||
210 | acpi_status | 228 | acpi_status |
211 | acpi_ut_build_simple_object(union acpi_operand_object *obj, | 229 | acpi_ut_build_simple_object(union acpi_operand_object *obj, |
212 | union acpi_object *user_obj, | 230 | union acpi_object *user_obj, |
213 | u8 * data_space, u32 * buffer_space_used); | 231 | u8 *data_space, u32 *buffer_space_used); |
214 | 232 | ||
215 | acpi_status | 233 | acpi_status |
216 | acpi_ut_build_package_object(union acpi_operand_object *obj, | 234 | acpi_ut_build_package_object(union acpi_operand_object *obj, |
217 | u8 * buffer, u32 * space_used); | 235 | u8 *buffer, u32 *space_used); |
218 | 236 | ||
219 | acpi_status | 237 | acpi_status |
220 | acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj, | 238 | acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj, |
@@ -287,9 +305,10 @@ acpi_ut_ptr_exit(u32 line_number, | |||
287 | const char *function_name, | 305 | const char *function_name, |
288 | const char *module_name, u32 component_id, u8 *ptr); | 306 | const char *module_name, u32 component_id, u8 *ptr); |
289 | 307 | ||
290 | void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id); | 308 | void |
309 | acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id); | ||
291 | 310 | ||
292 | void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display); | 311 | void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset); |
293 | 312 | ||
294 | void acpi_ut_report_error(char *module_name, u32 line_number); | 313 | void acpi_ut_report_error(char *module_name, u32 line_number); |
295 | 314 | ||
@@ -337,15 +356,19 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, | |||
337 | */ | 356 | */ |
338 | acpi_status | 357 | acpi_status |
339 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | 358 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, |
340 | struct acpica_device_id **return_id); | 359 | struct acpi_pnp_device_id ** return_id); |
341 | 360 | ||
342 | acpi_status | 361 | acpi_status |
343 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | 362 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, |
344 | struct acpica_device_id **return_id); | 363 | struct acpi_pnp_device_id ** return_id); |
364 | |||
365 | acpi_status | ||
366 | acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, | ||
367 | struct acpi_pnp_device_id **return_id); | ||
345 | 368 | ||
346 | acpi_status | 369 | acpi_status |
347 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | 370 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, |
348 | struct acpica_device_id_list **return_cid_list); | 371 | struct acpi_pnp_device_id_list ** return_cid_list); |
349 | 372 | ||
350 | /* | 373 | /* |
351 | * utlock - reader/writer locks | 374 | * utlock - reader/writer locks |
@@ -479,15 +502,19 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, | |||
479 | 502 | ||
480 | void acpi_ut_strupr(char *src_string); | 503 | void acpi_ut_strupr(char *src_string); |
481 | 504 | ||
505 | void acpi_ut_strlwr(char *src_string); | ||
506 | |||
507 | int acpi_ut_stricmp(char *string1, char *string2); | ||
508 | |||
482 | void acpi_ut_print_string(char *string, u8 max_length); | 509 | void acpi_ut_print_string(char *string, u8 max_length); |
483 | 510 | ||
484 | u8 acpi_ut_valid_acpi_name(u32 name); | 511 | u8 acpi_ut_valid_acpi_name(u32 name); |
485 | 512 | ||
486 | acpi_name acpi_ut_repair_name(char *name); | 513 | void acpi_ut_repair_name(char *name); |
487 | 514 | ||
488 | u8 acpi_ut_valid_acpi_char(char character, u32 position); | 515 | u8 acpi_ut_valid_acpi_char(char character, u32 position); |
489 | 516 | ||
490 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer); | 517 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); |
491 | 518 | ||
492 | /* Values for Base above (16=Hex, 10=Decimal) */ | 519 | /* Values for Base above (16=Hex, 10=Decimal) */ |
493 | 520 | ||
@@ -508,12 +535,12 @@ acpi_ut_display_init_pathname(u8 type, | |||
508 | * utresrc | 535 | * utresrc |
509 | */ | 536 | */ |
510 | acpi_status | 537 | acpi_status |
511 | acpi_ut_walk_aml_resources(u8 * aml, | 538 | acpi_ut_walk_aml_resources(u8 *aml, |
512 | acpi_size aml_length, | 539 | acpi_size aml_length, |
513 | acpi_walk_aml_callback user_function, | 540 | acpi_walk_aml_callback user_function, |
514 | void **context); | 541 | void **context); |
515 | 542 | ||
516 | acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index); | 543 | acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index); |
517 | 544 | ||
518 | u32 acpi_ut_get_descriptor_length(void *aml); | 545 | u32 acpi_ut_get_descriptor_length(void *aml); |
519 | 546 | ||
@@ -524,8 +551,7 @@ u8 acpi_ut_get_resource_header_length(void *aml); | |||
524 | u8 acpi_ut_get_resource_type(void *aml); | 551 | u8 acpi_ut_get_resource_type(void *aml); |
525 | 552 | ||
526 | acpi_status | 553 | acpi_status |
527 | acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, | 554 | acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag); |
528 | u8 ** end_tag); | ||
529 | 555 | ||
530 | /* | 556 | /* |
531 | * utmutex - mutex support | 557 | * utmutex - mutex support |
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index af4947956ec2..968449685e06 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: amlresrc.h - AML resource descriptors | 3 | * Module Name: amlresrc.h - AML resource descriptors |
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 465f02134b89..57895db3231a 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c | |||
@@ -280,7 +280,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, | |||
280 | 280 | ||
281 | /* | 281 | /* |
282 | * Get the return value and save as the last result | 282 | * Get the return value and save as the last result |
283 | * value. This is the only place where walk_state->return_desc | 283 | * value. This is the only place where walk_state->return_desc |
284 | * is set to anything other than zero! | 284 | * is set to anything other than zero! |
285 | */ | 285 | */ |
286 | walk_state->return_desc = walk_state->operands[0]; | 286 | walk_state->return_desc = walk_state->operands[0]; |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3da6fd8530c5..b5b904ee815f 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -277,7 +277,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, | |||
277 | * | 277 | * |
278 | * RETURN: Status | 278 | * RETURN: Status |
279 | * | 279 | * |
280 | * DESCRIPTION: Process all named fields in a field declaration. Names are | 280 | * DESCRIPTION: Process all named fields in a field declaration. Names are |
281 | * entered into the namespace. | 281 | * entered into the namespace. |
282 | * | 282 | * |
283 | ******************************************************************************/ | 283 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index aa9a5d4e4052..52eb4e01622a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -170,7 +170,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) | |||
170 | * | 170 | * |
171 | * RETURN: Status | 171 | * RETURN: Status |
172 | * | 172 | * |
173 | * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, | 173 | * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, |
174 | * increments the thread count, and waits at the method semaphore | 174 | * increments the thread count, and waits at the method semaphore |
175 | * for clearance to execute. | 175 | * for clearance to execute. |
176 | * | 176 | * |
@@ -444,7 +444,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, | |||
444 | * RETURN: Status | 444 | * RETURN: Status |
445 | * | 445 | * |
446 | * DESCRIPTION: Restart a method that was preempted by another (nested) method | 446 | * DESCRIPTION: Restart a method that was preempted by another (nested) method |
447 | * invocation. Handle the return value (if any) from the callee. | 447 | * invocation. Handle the return value (if any) from the callee. |
448 | * | 448 | * |
449 | ******************************************************************************/ | 449 | ******************************************************************************/ |
450 | 450 | ||
@@ -530,7 +530,7 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, | |||
530 | * | 530 | * |
531 | * RETURN: None | 531 | * RETURN: None |
532 | * | 532 | * |
533 | * DESCRIPTION: Terminate a control method. Delete everything that the method | 533 | * DESCRIPTION: Terminate a control method. Delete everything that the method |
534 | * created, delete all locals and arguments, and delete the parse | 534 | * created, delete all locals and arguments, and delete the parse |
535 | * tree if requested. | 535 | * tree if requested. |
536 | * | 536 | * |
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 8d55cebaa656..9a83b7e0f3ba 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c | |||
@@ -76,7 +76,7 @@ acpi_ds_method_data_get_type(u16 opcode, | |||
76 | * RETURN: Status | 76 | * RETURN: Status |
77 | * | 77 | * |
78 | * DESCRIPTION: Initialize the data structures that hold the method's arguments | 78 | * DESCRIPTION: Initialize the data structures that hold the method's arguments |
79 | * and locals. The data struct is an array of namespace nodes for | 79 | * and locals. The data struct is an array of namespace nodes for |
80 | * each - this allows ref_of and de_ref_of to work properly for these | 80 | * each - this allows ref_of and de_ref_of to work properly for these |
81 | * special data types. | 81 | * special data types. |
82 | * | 82 | * |
@@ -129,7 +129,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state) | |||
129 | * | 129 | * |
130 | * RETURN: None | 130 | * RETURN: None |
131 | * | 131 | * |
132 | * DESCRIPTION: Delete method locals and arguments. Arguments are only | 132 | * DESCRIPTION: Delete method locals and arguments. Arguments are only |
133 | * deleted if this method was called from another method. | 133 | * deleted if this method was called from another method. |
134 | * | 134 | * |
135 | ******************************************************************************/ | 135 | ******************************************************************************/ |
@@ -183,7 +183,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state) | |||
183 | * | 183 | * |
184 | * RETURN: Status | 184 | * RETURN: Status |
185 | * | 185 | * |
186 | * DESCRIPTION: Initialize arguments for a method. The parameter list is a list | 186 | * DESCRIPTION: Initialize arguments for a method. The parameter list is a list |
187 | * of ACPI operand objects, either null terminated or whose length | 187 | * of ACPI operand objects, either null terminated or whose length |
188 | * is defined by max_param_count. | 188 | * is defined by max_param_count. |
189 | * | 189 | * |
@@ -401,7 +401,7 @@ acpi_ds_method_data_get_value(u8 type, | |||
401 | * This means that either 1) The expected argument was | 401 | * This means that either 1) The expected argument was |
402 | * not passed to the method, or 2) A local variable | 402 | * not passed to the method, or 2) A local variable |
403 | * was referenced by the method (via the ASL) | 403 | * was referenced by the method (via the ASL) |
404 | * before it was initialized. Either case is an error. | 404 | * before it was initialized. Either case is an error. |
405 | */ | 405 | */ |
406 | 406 | ||
407 | /* If slack enabled, init the local_x/arg_x to an Integer of value zero */ | 407 | /* If slack enabled, init the local_x/arg_x to an Integer of value zero */ |
@@ -465,7 +465,7 @@ acpi_ds_method_data_get_value(u8 type, | |||
465 | * | 465 | * |
466 | * RETURN: None | 466 | * RETURN: None |
467 | * | 467 | * |
468 | * DESCRIPTION: Delete the entry at Opcode:Index. Inserts | 468 | * DESCRIPTION: Delete the entry at Opcode:Index. Inserts |
469 | * a null into the stack slot after the object is deleted. | 469 | * a null into the stack slot after the object is deleted. |
470 | * | 470 | * |
471 | ******************************************************************************/ | 471 | ******************************************************************************/ |
@@ -523,7 +523,7 @@ acpi_ds_method_data_delete_value(u8 type, | |||
523 | * | 523 | * |
524 | * RETURN: Status | 524 | * RETURN: Status |
525 | * | 525 | * |
526 | * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed | 526 | * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed |
527 | * as the new value for the Arg or Local and the reference count | 527 | * as the new value for the Arg or Local and the reference count |
528 | * for obj_desc is incremented. | 528 | * for obj_desc is incremented. |
529 | * | 529 | * |
@@ -566,7 +566,7 @@ acpi_ds_store_object_to_local(u8 type, | |||
566 | 566 | ||
567 | /* | 567 | /* |
568 | * If the reference count on the object is more than one, we must | 568 | * If the reference count on the object is more than one, we must |
569 | * take a copy of the object before we store. A reference count | 569 | * take a copy of the object before we store. A reference count |
570 | * of exactly 1 means that the object was just created during the | 570 | * of exactly 1 means that the object was just created during the |
571 | * evaluation of an expression, and we can safely use it since it | 571 | * evaluation of an expression, and we can safely use it since it |
572 | * is not used anywhere else. | 572 | * is not used anywhere else. |
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 68592dd34960..c9f15d3a3686 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c | |||
@@ -293,7 +293,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, | |||
293 | 293 | ||
294 | /* | 294 | /* |
295 | * Second arg is the buffer data (optional) byte_list can be either | 295 | * Second arg is the buffer data (optional) byte_list can be either |
296 | * individual bytes or a string initializer. In either case, a | 296 | * individual bytes or a string initializer. In either case, a |
297 | * byte_list appears in the AML. | 297 | * byte_list appears in the AML. |
298 | */ | 298 | */ |
299 | arg = op->common.value.arg; /* skip first arg */ | 299 | arg = op->common.value.arg; /* skip first arg */ |
@@ -568,7 +568,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, | |||
568 | 568 | ||
569 | /* | 569 | /* |
570 | * Because of the execution pass through the non-control-method | 570 | * Because of the execution pass through the non-control-method |
571 | * parts of the table, we can arrive here twice. Only init | 571 | * parts of the table, we can arrive here twice. Only init |
572 | * the named object node the first time through | 572 | * the named object node the first time through |
573 | */ | 573 | */ |
574 | if (acpi_ns_get_attached_object(node)) { | 574 | if (acpi_ns_get_attached_object(node)) { |
@@ -618,7 +618,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, | |||
618 | * RETURN: Status | 618 | * RETURN: Status |
619 | * | 619 | * |
620 | * DESCRIPTION: Initialize a namespace object from a parser Op and its | 620 | * DESCRIPTION: Initialize a namespace object from a parser Op and its |
621 | * associated arguments. The namespace object is a more compact | 621 | * associated arguments. The namespace object is a more compact |
622 | * representation of the Op and its arguments. | 622 | * representation of the Op and its arguments. |
623 | * | 623 | * |
624 | ******************************************************************************/ | 624 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index aa34d8984d34..0df024e5fb63 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -649,7 +649,8 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, | |||
649 | ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && | 649 | ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && |
650 | (op->common.parent->common.aml_opcode != | 650 | (op->common.parent->common.aml_opcode != |
651 | AML_VAR_PACKAGE_OP) | 651 | AML_VAR_PACKAGE_OP) |
652 | && (op->common.parent->common.aml_opcode != AML_NAME_OP))) { | 652 | && (op->common.parent->common.aml_opcode != |
653 | AML_NAME_OP))) { | ||
653 | walk_state->result_obj = obj_desc; | 654 | walk_state->result_obj = obj_desc; |
654 | } | 655 | } |
655 | } | 656 | } |
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 73a5447475f5..afeb99f49482 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c | |||
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("dsutils") | |||
61 | * | 61 | * |
62 | * RETURN: None. | 62 | * RETURN: None. |
63 | * | 63 | * |
64 | * DESCRIPTION: Clear and remove a reference on an implicit return value. Used | 64 | * DESCRIPTION: Clear and remove a reference on an implicit return value. Used |
65 | * to delete "stale" return values (if enabled, the return value | 65 | * to delete "stale" return values (if enabled, the return value |
66 | * from every operator is saved at least momentarily, in case the | 66 | * from every operator is saved at least momentarily, in case the |
67 | * parent method exits.) | 67 | * parent method exits.) |
@@ -107,7 +107,7 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state) | |||
107 | * | 107 | * |
108 | * DESCRIPTION: Implements the optional "implicit return". We save the result | 108 | * DESCRIPTION: Implements the optional "implicit return". We save the result |
109 | * of every ASL operator and control method invocation in case the | 109 | * of every ASL operator and control method invocation in case the |
110 | * parent method exit. Before storing a new return value, we | 110 | * parent method exit. Before storing a new return value, we |
111 | * delete the previous return value. | 111 | * delete the previous return value. |
112 | * | 112 | * |
113 | ******************************************************************************/ | 113 | ******************************************************************************/ |
@@ -198,7 +198,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
198 | * | 198 | * |
199 | * If there is no parent, or the parent is a scope_op, we are executing | 199 | * If there is no parent, or the parent is a scope_op, we are executing |
200 | * at the method level. An executing method typically has no parent, | 200 | * at the method level. An executing method typically has no parent, |
201 | * since each method is parsed separately. A method invoked externally | 201 | * since each method is parsed separately. A method invoked externally |
202 | * via execute_control_method has a scope_op as the parent. | 202 | * via execute_control_method has a scope_op as the parent. |
203 | */ | 203 | */ |
204 | if ((!op->common.parent) || | 204 | if ((!op->common.parent) || |
@@ -223,7 +223,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
223 | } | 223 | } |
224 | 224 | ||
225 | /* | 225 | /* |
226 | * Decide what to do with the result based on the parent. If | 226 | * Decide what to do with the result based on the parent. If |
227 | * the parent opcode will not use the result, delete the object. | 227 | * the parent opcode will not use the result, delete the object. |
228 | * Otherwise leave it as is, it will be deleted when it is used | 228 | * Otherwise leave it as is, it will be deleted when it is used |
229 | * as an operand later. | 229 | * as an operand later. |
@@ -266,7 +266,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
266 | 266 | ||
267 | /* | 267 | /* |
268 | * These opcodes allow term_arg(s) as operands and therefore | 268 | * These opcodes allow term_arg(s) as operands and therefore |
269 | * the operands can be method calls. The result is used. | 269 | * the operands can be method calls. The result is used. |
270 | */ | 270 | */ |
271 | goto result_used; | 271 | goto result_used; |
272 | 272 | ||
@@ -284,7 +284,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
284 | AML_BANK_FIELD_OP)) { | 284 | AML_BANK_FIELD_OP)) { |
285 | /* | 285 | /* |
286 | * These opcodes allow term_arg(s) as operands and therefore | 286 | * These opcodes allow term_arg(s) as operands and therefore |
287 | * the operands can be method calls. The result is used. | 287 | * the operands can be method calls. The result is used. |
288 | */ | 288 | */ |
289 | goto result_used; | 289 | goto result_used; |
290 | } | 290 | } |
@@ -329,9 +329,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op, | |||
329 | * | 329 | * |
330 | * RETURN: Status | 330 | * RETURN: Status |
331 | * | 331 | * |
332 | * DESCRIPTION: Used after interpretation of an opcode. If there is an internal | 332 | * DESCRIPTION: Used after interpretation of an opcode. If there is an internal |
333 | * result descriptor, check if the parent opcode will actually use | 333 | * result descriptor, check if the parent opcode will actually use |
334 | * this result. If not, delete the result now so that it will | 334 | * this result. If not, delete the result now so that it will |
335 | * not become orphaned. | 335 | * not become orphaned. |
336 | * | 336 | * |
337 | ******************************************************************************/ | 337 | ******************************************************************************/ |
@@ -376,7 +376,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op, | |||
376 | * | 376 | * |
377 | * RETURN: Status | 377 | * RETURN: Status |
378 | * | 378 | * |
379 | * DESCRIPTION: Resolve all operands to their values. Used to prepare | 379 | * DESCRIPTION: Resolve all operands to their values. Used to prepare |
380 | * arguments to a control method invocation (a call from one | 380 | * arguments to a control method invocation (a call from one |
381 | * method to another.) | 381 | * method to another.) |
382 | * | 382 | * |
@@ -391,7 +391,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state) | |||
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Attempt to resolve each of the valid operands | 393 | * Attempt to resolve each of the valid operands |
394 | * Method arguments are passed by reference, not by value. This means | 394 | * Method arguments are passed by reference, not by value. This means |
395 | * that the actual objects are passed, not copies of the objects. | 395 | * that the actual objects are passed, not copies of the objects. |
396 | */ | 396 | */ |
397 | for (i = 0; i < walk_state->num_operands; i++) { | 397 | for (i = 0; i < walk_state->num_operands; i++) { |
@@ -451,7 +451,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state) | |||
451 | * RETURN: Status | 451 | * RETURN: Status |
452 | * | 452 | * |
453 | * DESCRIPTION: Translate a parse tree object that is an argument to an AML | 453 | * DESCRIPTION: Translate a parse tree object that is an argument to an AML |
454 | * opcode to the equivalent interpreter object. This may include | 454 | * opcode to the equivalent interpreter object. This may include |
455 | * looking up a name or entering a new name into the internal | 455 | * looking up a name or entering a new name into the internal |
456 | * namespace. | 456 | * namespace. |
457 | * | 457 | * |
@@ -496,9 +496,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
496 | /* | 496 | /* |
497 | * Special handling for buffer_field declarations. This is a deferred | 497 | * Special handling for buffer_field declarations. This is a deferred |
498 | * opcode that unfortunately defines the field name as the last | 498 | * opcode that unfortunately defines the field name as the last |
499 | * parameter instead of the first. We get here when we are performing | 499 | * parameter instead of the first. We get here when we are performing |
500 | * the deferred execution, so the actual name of the field is already | 500 | * the deferred execution, so the actual name of the field is already |
501 | * in the namespace. We don't want to attempt to look it up again | 501 | * in the namespace. We don't want to attempt to look it up again |
502 | * because we may be executing in a different scope than where the | 502 | * because we may be executing in a different scope than where the |
503 | * actual opcode exists. | 503 | * actual opcode exists. |
504 | */ | 504 | */ |
@@ -560,7 +560,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
560 | * indicate this to the interpreter, set the | 560 | * indicate this to the interpreter, set the |
561 | * object to the root | 561 | * object to the root |
562 | */ | 562 | */ |
563 | obj_desc = ACPI_CAST_PTR(union | 563 | obj_desc = |
564 | ACPI_CAST_PTR(union | ||
564 | acpi_operand_object, | 565 | acpi_operand_object, |
565 | acpi_gbl_root_node); | 566 | acpi_gbl_root_node); |
566 | status = AE_OK; | 567 | status = AE_OK; |
@@ -604,8 +605,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, | |||
604 | /* | 605 | /* |
605 | * If the name is null, this means that this is an | 606 | * If the name is null, this means that this is an |
606 | * optional result parameter that was not specified | 607 | * optional result parameter that was not specified |
607 | * in the original ASL. Create a Zero Constant for a | 608 | * in the original ASL. Create a Zero Constant for a |
608 | * placeholder. (Store to a constant is a Noop.) | 609 | * placeholder. (Store to a constant is a Noop.) |
609 | */ | 610 | */ |
610 | opcode = AML_ZERO_OP; /* Has no arguments! */ | 611 | opcode = AML_ZERO_OP; /* Has no arguments! */ |
611 | 612 | ||
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 642f3c053e87..58593931be96 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c | |||
@@ -57,7 +57,7 @@ ACPI_MODULE_NAME("dswexec") | |||
57 | /* | 57 | /* |
58 | * Dispatch table for opcode classes | 58 | * Dispatch table for opcode classes |
59 | */ | 59 | */ |
60 | static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = { | 60 | static acpi_execute_op acpi_gbl_op_type_dispatch[] = { |
61 | acpi_ex_opcode_0A_0T_1R, | 61 | acpi_ex_opcode_0A_0T_1R, |
62 | acpi_ex_opcode_1A_0T_0R, | 62 | acpi_ex_opcode_1A_0T_0R, |
63 | acpi_ex_opcode_1A_0T_1R, | 63 | acpi_ex_opcode_1A_0T_1R, |
@@ -204,7 +204,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, | |||
204 | * RETURN: Status | 204 | * RETURN: Status |
205 | * | 205 | * |
206 | * DESCRIPTION: Descending callback used during the execution of control | 206 | * DESCRIPTION: Descending callback used during the execution of control |
207 | * methods. This is where most operators and operands are | 207 | * methods. This is where most operators and operands are |
208 | * dispatched to the interpreter. | 208 | * dispatched to the interpreter. |
209 | * | 209 | * |
210 | ****************************************************************************/ | 210 | ****************************************************************************/ |
@@ -297,7 +297,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, | |||
297 | if (walk_state->walk_type & ACPI_WALK_METHOD) { | 297 | if (walk_state->walk_type & ACPI_WALK_METHOD) { |
298 | /* | 298 | /* |
299 | * Found a named object declaration during method execution; | 299 | * Found a named object declaration during method execution; |
300 | * we must enter this object into the namespace. The created | 300 | * we must enter this object into the namespace. The created |
301 | * object is temporary and will be deleted upon completion of | 301 | * object is temporary and will be deleted upon completion of |
302 | * the execution of this method. | 302 | * the execution of this method. |
303 | * | 303 | * |
@@ -348,7 +348,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, | |||
348 | * RETURN: Status | 348 | * RETURN: Status |
349 | * | 349 | * |
350 | * DESCRIPTION: Ascending callback used during the execution of control | 350 | * DESCRIPTION: Ascending callback used during the execution of control |
351 | * methods. The only thing we really need to do here is to | 351 | * methods. The only thing we really need to do here is to |
352 | * notice the beginning of IF, ELSE, and WHILE blocks. | 352 | * notice the beginning of IF, ELSE, and WHILE blocks. |
353 | * | 353 | * |
354 | ****************************************************************************/ | 354 | ****************************************************************************/ |
@@ -432,7 +432,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
432 | if (ACPI_SUCCESS(status)) { | 432 | if (ACPI_SUCCESS(status)) { |
433 | /* | 433 | /* |
434 | * Dispatch the request to the appropriate interpreter handler | 434 | * Dispatch the request to the appropriate interpreter handler |
435 | * routine. There is one routine per opcode "type" based upon the | 435 | * routine. There is one routine per opcode "type" based upon the |
436 | * number of opcode arguments and return type. | 436 | * number of opcode arguments and return type. |
437 | */ | 437 | */ |
438 | status = | 438 | status = |
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 89c0114210c0..379835748357 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c | |||
@@ -254,7 +254,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
254 | acpi_ut_get_type_name(node->type), | 254 | acpi_ut_get_type_name(node->type), |
255 | acpi_ut_get_node_name(node))); | 255 | acpi_ut_get_node_name(node))); |
256 | 256 | ||
257 | return (AE_AML_OPERAND_TYPE); | 257 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); |
258 | } | 258 | } |
259 | break; | 259 | break; |
260 | 260 | ||
@@ -602,7 +602,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
602 | region_space, | 602 | region_space, |
603 | walk_state); | 603 | walk_state); |
604 | if (ACPI_FAILURE(status)) { | 604 | if (ACPI_FAILURE(status)) { |
605 | return (status); | 605 | return_ACPI_STATUS(status); |
606 | } | 606 | } |
607 | 607 | ||
608 | acpi_ex_exit_interpreter(); | 608 | acpi_ex_exit_interpreter(); |
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index d0e6555061e4..3e65a15a735f 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c | |||
@@ -51,8 +51,9 @@ | |||
51 | ACPI_MODULE_NAME("dswstate") | 51 | ACPI_MODULE_NAME("dswstate") |
52 | 52 | ||
53 | /* Local prototypes */ | 53 | /* Local prototypes */ |
54 | static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws); | 54 | static acpi_status |
55 | static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws); | 55 | acpi_ds_result_stack_push(struct acpi_walk_state *walk_state); |
56 | static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state); | ||
56 | 57 | ||
57 | /******************************************************************************* | 58 | /******************************************************************************* |
58 | * | 59 | * |
@@ -347,7 +348,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state) | |||
347 | * | 348 | * |
348 | * RETURN: Status | 349 | * RETURN: Status |
349 | * | 350 | * |
350 | * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT | 351 | * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT |
351 | * deleted by this routine. | 352 | * deleted by this routine. |
352 | * | 353 | * |
353 | ******************************************************************************/ | 354 | ******************************************************************************/ |
@@ -491,7 +492,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state, | |||
491 | * RETURN: A walk_state object popped from the thread's stack | 492 | * RETURN: A walk_state object popped from the thread's stack |
492 | * | 493 | * |
493 | * DESCRIPTION: Remove and return the walkstate object that is at the head of | 494 | * DESCRIPTION: Remove and return the walkstate object that is at the head of |
494 | * the walk stack for the given walk list. NULL indicates that | 495 | * the walk stack for the given walk list. NULL indicates that |
495 | * the list is empty. | 496 | * the list is empty. |
496 | * | 497 | * |
497 | ******************************************************************************/ | 498 | ******************************************************************************/ |
@@ -531,14 +532,17 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread) | |||
531 | * | 532 | * |
532 | * RETURN: Pointer to the new walk state. | 533 | * RETURN: Pointer to the new walk state. |
533 | * | 534 | * |
534 | * DESCRIPTION: Allocate and initialize a new walk state. The current walk | 535 | * DESCRIPTION: Allocate and initialize a new walk state. The current walk |
535 | * state is set to this new state. | 536 | * state is set to this new state. |
536 | * | 537 | * |
537 | ******************************************************************************/ | 538 | ******************************************************************************/ |
538 | 539 | ||
539 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object | 540 | struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, |
540 | *origin, union acpi_operand_object | 541 | union acpi_parse_object |
541 | *method_desc, struct acpi_thread_state | 542 | *origin, |
543 | union acpi_operand_object | ||
544 | *method_desc, | ||
545 | struct acpi_thread_state | ||
542 | *thread) | 546 | *thread) |
543 | { | 547 | { |
544 | struct acpi_walk_state *walk_state; | 548 | struct acpi_walk_state *walk_state; |
@@ -653,7 +657,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, | |||
653 | /* | 657 | /* |
654 | * Setup the current scope. | 658 | * Setup the current scope. |
655 | * Find a Named Op that has a namespace node associated with it. | 659 | * Find a Named Op that has a namespace node associated with it. |
656 | * search upwards from this Op. Current scope is the first | 660 | * search upwards from this Op. Current scope is the first |
657 | * Op with a namespace node. | 661 | * Op with a namespace node. |
658 | */ | 662 | */ |
659 | extra_op = parser_state->start_op; | 663 | extra_op = parser_state->start_op; |
@@ -704,13 +708,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state) | |||
704 | ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); | 708 | ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); |
705 | 709 | ||
706 | if (!walk_state) { | 710 | if (!walk_state) { |
707 | return; | 711 | return_VOID; |
708 | } | 712 | } |
709 | 713 | ||
710 | if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { | 714 | if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { |
711 | ACPI_ERROR((AE_INFO, "%p is not a valid walk state", | 715 | ACPI_ERROR((AE_INFO, "%p is not a valid walk state", |
712 | walk_state)); | 716 | walk_state)); |
713 | return; | 717 | return_VOID; |
714 | } | 718 | } |
715 | 719 | ||
716 | /* There should not be any open scopes */ | 720 | /* There should not be any open scopes */ |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index ef0193d74b5d..36d120574423 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -89,7 +89,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
89 | /* Set the mask bit only if there are references to this GPE */ | 89 | /* Set the mask bit only if there are references to this GPE */ |
90 | 90 | ||
91 | if (gpe_event_info->runtime_count) { | 91 | if (gpe_event_info->runtime_count) { |
92 | ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); | 92 | ACPI_SET_BIT(gpe_register_info->enable_for_run, |
93 | (u8)register_bit); | ||
93 | } | 94 | } |
94 | 95 | ||
95 | return_ACPI_STATUS(AE_OK); | 96 | return_ACPI_STATUS(AE_OK); |
@@ -106,8 +107,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) | |||
106 | * DESCRIPTION: Clear a GPE of stale events and enable it. | 107 | * DESCRIPTION: Clear a GPE of stale events and enable it. |
107 | * | 108 | * |
108 | ******************************************************************************/ | 109 | ******************************************************************************/ |
109 | acpi_status | 110 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) |
110 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | ||
111 | { | 111 | { |
112 | acpi_status status; | 112 | acpi_status status; |
113 | 113 | ||
@@ -131,8 +131,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* Enable the requested GPE */ | 133 | /* Enable the requested GPE */ |
134 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | ||
135 | 134 | ||
135 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | ||
136 | return_ACPI_STATUS(status); | 136 | return_ACPI_STATUS(status); |
137 | } | 137 | } |
138 | 138 | ||
@@ -150,7 +150,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
150 | * | 150 | * |
151 | ******************************************************************************/ | 151 | ******************************************************************************/ |
152 | 152 | ||
153 | acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | 153 | acpi_status |
154 | acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | ||
154 | { | 155 | { |
155 | acpi_status status = AE_OK; | 156 | acpi_status status = AE_OK; |
156 | 157 | ||
@@ -191,7 +192,8 @@ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info | |||
191 | * | 192 | * |
192 | ******************************************************************************/ | 193 | ******************************************************************************/ |
193 | 194 | ||
194 | acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | 195 | acpi_status |
196 | acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) | ||
195 | { | 197 | { |
196 | acpi_status status = AE_OK; | 198 | acpi_status status = AE_OK; |
197 | 199 | ||
@@ -208,7 +210,8 @@ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_i | |||
208 | 210 | ||
209 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); | 211 | status = acpi_ev_update_gpe_enable_mask(gpe_event_info); |
210 | if (ACPI_SUCCESS(status)) { | 212 | if (ACPI_SUCCESS(status)) { |
211 | status = acpi_hw_low_set_gpe(gpe_event_info, | 213 | status = |
214 | acpi_hw_low_set_gpe(gpe_event_info, | ||
212 | ACPI_GPE_DISABLE); | 215 | ACPI_GPE_DISABLE); |
213 | } | 216 | } |
214 | 217 | ||
@@ -306,7 +309,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
306 | 309 | ||
307 | /* A Non-NULL gpe_device means this is a GPE Block Device */ | 310 | /* A Non-NULL gpe_device means this is a GPE Block Device */ |
308 | 311 | ||
309 | obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) | 312 | obj_desc = |
313 | acpi_ns_get_attached_object((struct acpi_namespace_node *) | ||
310 | gpe_device); | 314 | gpe_device); |
311 | if (!obj_desc || !obj_desc->device.gpe_block) { | 315 | if (!obj_desc || !obj_desc->device.gpe_block) { |
312 | return (NULL); | 316 | return (NULL); |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 8cf4c104c7b7..1571a61a7833 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -486,7 +486,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
486 | if (ACPI_FAILURE(status)) { | 486 | if (ACPI_FAILURE(status)) { |
487 | ACPI_EXCEPTION((AE_INFO, status, | 487 | ACPI_EXCEPTION((AE_INFO, status, |
488 | "Could not enable GPE 0x%02X", | 488 | "Could not enable GPE 0x%02X", |
489 | gpe_index + gpe_block->block_base_number)); | 489 | gpe_index + |
490 | gpe_block->block_base_number)); | ||
490 | continue; | 491 | continue; |
491 | } | 492 | } |
492 | 493 | ||
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index cb50dd91bc18..228a0c3b1d49 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
@@ -374,7 +374,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
374 | gpe_event_info->dispatch.handler = NULL; | 374 | gpe_event_info->dispatch.handler = NULL; |
375 | gpe_event_info->flags &= | 375 | gpe_event_info->flags &= |
376 | ~ACPI_GPE_DISPATCH_MASK; | 376 | ~ACPI_GPE_DISPATCH_MASK; |
377 | } else if ((gpe_event_info-> | 377 | } else |
378 | if ((gpe_event_info-> | ||
378 | flags & ACPI_GPE_DISPATCH_MASK) == | 379 | flags & ACPI_GPE_DISPATCH_MASK) == |
379 | ACPI_GPE_DISPATCH_NOTIFY) { | 380 | ACPI_GPE_DISPATCH_NOTIFY) { |
380 | 381 | ||
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 4c1c8261166f..1474241bfc7e 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c | |||
@@ -227,8 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
227 | 227 | ||
228 | /* Install a handler for this PCI root bridge */ | 228 | /* Install a handler for this PCI root bridge */ |
229 | 229 | ||
230 | status = | 230 | status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); |
231 | acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); | ||
232 | if (ACPI_FAILURE(status)) { | 231 | if (ACPI_FAILURE(status)) { |
233 | if (status == AE_SAME_HANDLER) { | 232 | if (status == AE_SAME_HANDLER) { |
234 | /* | 233 | /* |
@@ -350,8 +349,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, | |||
350 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) | 349 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) |
351 | { | 350 | { |
352 | acpi_status status; | 351 | acpi_status status; |
353 | struct acpica_device_id *hid; | 352 | struct acpi_pnp_device_id *hid; |
354 | struct acpica_device_id_list *cid; | 353 | struct acpi_pnp_device_id_list *cid; |
355 | u32 i; | 354 | u32 i; |
356 | u8 match; | 355 | u8 match; |
357 | 356 | ||
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 7587eb6c9584..ae668f32cf16 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -398,7 +398,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | |||
398 | * | 398 | * |
399 | ******************************************************************************/ | 399 | ******************************************************************************/ |
400 | acpi_status | 400 | acpi_status |
401 | acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) | 401 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) |
402 | { | 402 | { |
403 | acpi_status status; | 403 | acpi_status status; |
404 | 404 | ||
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 87c5f2332260..3f30e753b652 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -221,7 +221,8 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
221 | if (wake_device == ACPI_ROOT_OBJECT) { | 221 | if (wake_device == ACPI_ROOT_OBJECT) { |
222 | device_node = acpi_gbl_root_node; | 222 | device_node = acpi_gbl_root_node; |
223 | } else { | 223 | } else { |
224 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | 224 | device_node = |
225 | ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
225 | } | 226 | } |
226 | 227 | ||
227 | /* Validate WakeDevice is of type Device */ | 228 | /* Validate WakeDevice is of type Device */ |
@@ -324,7 +325,8 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) | |||
324 | * | 325 | * |
325 | ******************************************************************************/ | 326 | ******************************************************************************/ |
326 | 327 | ||
327 | acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | 328 | acpi_status |
329 | acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
328 | { | 330 | { |
329 | acpi_status status = AE_OK; | 331 | acpi_status status = AE_OK; |
330 | struct acpi_gpe_event_info *gpe_event_info; | 332 | struct acpi_gpe_event_info *gpe_event_info; |
@@ -567,7 +569,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, | |||
567 | 569 | ||
568 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 570 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
569 | if (ACPI_FAILURE(status)) { | 571 | if (ACPI_FAILURE(status)) { |
570 | return (status); | 572 | return_ACPI_STATUS(status); |
571 | } | 573 | } |
572 | 574 | ||
573 | node = acpi_ns_validate_handle(gpe_device); | 575 | node = acpi_ns_validate_handle(gpe_device); |
@@ -650,7 +652,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | |||
650 | 652 | ||
651 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 653 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
652 | if (ACPI_FAILURE(status)) { | 654 | if (ACPI_FAILURE(status)) { |
653 | return (status); | 655 | return_ACPI_STATUS(status); |
654 | } | 656 | } |
655 | 657 | ||
656 | node = acpi_ns_validate_handle(gpe_device); | 658 | node = acpi_ns_validate_handle(gpe_device); |
@@ -694,8 +696,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | |||
694 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | 696 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. |
695 | * | 697 | * |
696 | ******************************************************************************/ | 698 | ******************************************************************************/ |
697 | acpi_status | 699 | acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device) |
698 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
699 | { | 700 | { |
700 | struct acpi_gpe_device_info info; | 701 | struct acpi_gpe_device_info info; |
701 | acpi_status status; | 702 | acpi_status status; |
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index bfb062e4c4b4..4492a4e03022 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c | |||
@@ -516,8 +516,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, | |||
516 | string_length--; | 516 | string_length--; |
517 | } | 517 | } |
518 | 518 | ||
519 | return_desc = acpi_ut_create_string_object((acpi_size) | 519 | return_desc = |
520 | string_length); | 520 | acpi_ut_create_string_object((acpi_size) string_length); |
521 | if (!return_desc) { | 521 | if (!return_desc) { |
522 | return_ACPI_STATUS(AE_NO_MEMORY); | 522 | return_ACPI_STATUS(AE_NO_MEMORY); |
523 | } | 523 | } |
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 691d4763102c..66554bc6f9a8 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c | |||
@@ -78,7 +78,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
78 | (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { | 78 | (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { |
79 | /* | 79 | /* |
80 | * Dereference an existing alias so that we don't create a chain | 80 | * Dereference an existing alias so that we don't create a chain |
81 | * of aliases. With this code, we guarantee that an alias is | 81 | * of aliases. With this code, we guarantee that an alias is |
82 | * always exactly one level of indirection away from the | 82 | * always exactly one level of indirection away from the |
83 | * actual aliased name. | 83 | * actual aliased name. |
84 | */ | 84 | */ |
@@ -90,7 +90,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
90 | /* | 90 | /* |
91 | * For objects that can never change (i.e., the NS node will | 91 | * For objects that can never change (i.e., the NS node will |
92 | * permanently point to the same object), we can simply attach | 92 | * permanently point to the same object), we can simply attach |
93 | * the object to the new NS node. For other objects (such as | 93 | * the object to the new NS node. For other objects (such as |
94 | * Integers, buffers, etc.), we have to point the Alias node | 94 | * Integers, buffers, etc.), we have to point the Alias node |
95 | * to the original Node. | 95 | * to the original Node. |
96 | */ | 96 | */ |
@@ -139,7 +139,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) | |||
139 | 139 | ||
140 | /* | 140 | /* |
141 | * The new alias assumes the type of the target, and it points | 141 | * The new alias assumes the type of the target, and it points |
142 | * to the same object. The reference count of the object has an | 142 | * to the same object. The reference count of the object has an |
143 | * additional reference to prevent deletion out from under either the | 143 | * additional reference to prevent deletion out from under either the |
144 | * target node or the alias Node | 144 | * target node or the alias Node |
145 | */ | 145 | */ |
@@ -243,8 +243,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) | |||
243 | 243 | ||
244 | /* Init object and attach to NS node */ | 244 | /* Init object and attach to NS node */ |
245 | 245 | ||
246 | obj_desc->mutex.sync_level = | 246 | obj_desc->mutex.sync_level = (u8)walk_state->operands[1]->integer.value; |
247 | (u8) walk_state->operands[1]->integer.value; | ||
248 | obj_desc->mutex.node = | 247 | obj_desc->mutex.node = |
249 | (struct acpi_namespace_node *)walk_state->operands[0]; | 248 | (struct acpi_namespace_node *)walk_state->operands[0]; |
250 | 249 | ||
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index bc5b9a6a1316..d7c9f51608a7 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c | |||
@@ -145,10 +145,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, | |||
145 | case ACPI_TYPE_BUFFER: | 145 | case ACPI_TYPE_BUFFER: |
146 | 146 | ||
147 | acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); | 147 | acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); |
148 | acpi_ut_dump_buffer2(source_desc->buffer.pointer, | 148 | acpi_ut_dump_buffer(source_desc->buffer.pointer, |
149 | (source_desc->buffer.length < 256) ? | 149 | (source_desc->buffer.length < 256) ? |
150 | source_desc->buffer.length : 256, | 150 | source_desc->buffer.length : 256, |
151 | DB_BYTE_DISPLAY); | 151 | DB_BYTE_DISPLAY, 0); |
152 | break; | 152 | break; |
153 | 153 | ||
154 | case ACPI_TYPE_STRING: | 154 | case ACPI_TYPE_STRING: |
@@ -190,7 +190,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, | |||
190 | 190 | ||
191 | acpi_os_printf("Table Index 0x%X\n", | 191 | acpi_os_printf("Table Index 0x%X\n", |
192 | source_desc->reference.value); | 192 | source_desc->reference.value); |
193 | return; | 193 | return_VOID; |
194 | 194 | ||
195 | default: | 195 | default: |
196 | break; | 196 | break; |
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 213c081776fc..858b43a7dcf6 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c | |||
@@ -464,7 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) | |||
464 | 464 | ||
465 | ACPI_FUNCTION_NAME(ex_dump_operand) | 465 | ACPI_FUNCTION_NAME(ex_dump_operand) |
466 | 466 | ||
467 | if (!((ACPI_LV_EXEC & acpi_dbg_level) | 467 | if (! |
468 | ((ACPI_LV_EXEC & acpi_dbg_level) | ||
468 | && (_COMPONENT & acpi_dbg_layer))) { | 469 | && (_COMPONENT & acpi_dbg_layer))) { |
469 | return; | 470 | return; |
470 | } | 471 | } |
@@ -777,7 +778,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands, | |||
777 | * PARAMETERS: title - Descriptive text | 778 | * PARAMETERS: title - Descriptive text |
778 | * value - Value to be displayed | 779 | * value - Value to be displayed |
779 | * | 780 | * |
780 | * DESCRIPTION: Object dump output formatting functions. These functions | 781 | * DESCRIPTION: Object dump output formatting functions. These functions |
781 | * reduce the number of format strings required and keeps them | 782 | * reduce the number of format strings required and keeps them |
782 | * all in one place for easy modification. | 783 | * all in one place for easy modification. |
783 | * | 784 | * |
@@ -810,7 +811,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags) | |||
810 | ACPI_FUNCTION_ENTRY(); | 811 | ACPI_FUNCTION_ENTRY(); |
811 | 812 | ||
812 | if (!flags) { | 813 | if (!flags) { |
813 | if (!((ACPI_LV_OBJECTS & acpi_dbg_level) | 814 | if (! |
815 | ((ACPI_LV_OBJECTS & acpi_dbg_level) | ||
814 | && (_COMPONENT & acpi_dbg_layer))) { | 816 | && (_COMPONENT & acpi_dbg_layer))) { |
815 | return; | 817 | return; |
816 | } | 818 | } |
@@ -940,10 +942,11 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, | |||
940 | acpi_os_printf("[Buffer] Length %.2X = ", | 942 | acpi_os_printf("[Buffer] Length %.2X = ", |
941 | obj_desc->buffer.length); | 943 | obj_desc->buffer.length); |
942 | if (obj_desc->buffer.length) { | 944 | if (obj_desc->buffer.length) { |
943 | acpi_ut_dump_buffer(ACPI_CAST_PTR | 945 | acpi_ut_debug_dump_buffer(ACPI_CAST_PTR |
944 | (u8, obj_desc->buffer.pointer), | 946 | (u8, |
945 | obj_desc->buffer.length, | 947 | obj_desc->buffer.pointer), |
946 | DB_DWORD_DISPLAY, _COMPONENT); | 948 | obj_desc->buffer.length, |
949 | DB_DWORD_DISPLAY, _COMPONENT); | ||
947 | } else { | 950 | } else { |
948 | acpi_os_printf("\n"); | 951 | acpi_os_printf("\n"); |
949 | } | 952 | } |
@@ -996,7 +999,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) | |||
996 | } | 999 | } |
997 | 1000 | ||
998 | if (!flags) { | 1001 | if (!flags) { |
999 | if (!((ACPI_LV_OBJECTS & acpi_dbg_level) | 1002 | if (! |
1003 | ((ACPI_LV_OBJECTS & acpi_dbg_level) | ||
1000 | && (_COMPONENT & acpi_dbg_layer))) { | 1004 | && (_COMPONENT & acpi_dbg_layer))) { |
1001 | return_VOID; | 1005 | return_VOID; |
1002 | } | 1006 | } |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index dc092f5b35d6..ebc55fbf3ff7 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -59,7 +59,7 @@ ACPI_MODULE_NAME("exfield") | |||
59 | * | 59 | * |
60 | * RETURN: Status | 60 | * RETURN: Status |
61 | * | 61 | * |
62 | * DESCRIPTION: Read from a named field. Returns either an Integer or a | 62 | * DESCRIPTION: Read from a named field. Returns either an Integer or a |
63 | * Buffer, depending on the size of the field. | 63 | * Buffer, depending on the size of the field. |
64 | * | 64 | * |
65 | ******************************************************************************/ | 65 | ******************************************************************************/ |
@@ -149,7 +149,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, | |||
149 | * Allocate a buffer for the contents of the field. | 149 | * Allocate a buffer for the contents of the field. |
150 | * | 150 | * |
151 | * If the field is larger than the current integer width, create | 151 | * If the field is larger than the current integer width, create |
152 | * a BUFFER to hold it. Otherwise, use an INTEGER. This allows | 152 | * a BUFFER to hold it. Otherwise, use an INTEGER. This allows |
153 | * the use of arithmetic operators on the returned value if the | 153 | * the use of arithmetic operators on the returned value if the |
154 | * field size is equal or smaller than an Integer. | 154 | * field size is equal or smaller than an Integer. |
155 | * | 155 | * |
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index a7784152ed30..aa2ccfb7cb61 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -54,8 +54,7 @@ ACPI_MODULE_NAME("exfldio") | |||
54 | /* Local prototypes */ | 54 | /* Local prototypes */ |
55 | static acpi_status | 55 | static acpi_status |
56 | acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, | 56 | acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, |
57 | u32 field_datum_byte_offset, | 57 | u32 field_datum_byte_offset, u64 *value, u32 read_write); |
58 | u64 *value, u32 read_write); | ||
59 | 58 | ||
60 | static u8 | 59 | static u8 |
61 | acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); | 60 | acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); |
@@ -155,7 +154,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
155 | #endif | 154 | #endif |
156 | 155 | ||
157 | /* | 156 | /* |
158 | * Validate the request. The entire request from the byte offset for a | 157 | * Validate the request. The entire request from the byte offset for a |
159 | * length of one field datum (access width) must fit within the region. | 158 | * length of one field datum (access width) must fit within the region. |
160 | * (Region length is specified in bytes) | 159 | * (Region length is specified in bytes) |
161 | */ | 160 | */ |
@@ -183,7 +182,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
183 | obj_desc->common_field.access_byte_width) { | 182 | obj_desc->common_field.access_byte_width) { |
184 | /* | 183 | /* |
185 | * This is the case where the access_type (acc_word, etc.) is wider | 184 | * This is the case where the access_type (acc_word, etc.) is wider |
186 | * than the region itself. For example, a region of length one | 185 | * than the region itself. For example, a region of length one |
187 | * byte, and a field with Dword access specified. | 186 | * byte, and a field with Dword access specified. |
188 | */ | 187 | */ |
189 | ACPI_ERROR((AE_INFO, | 188 | ACPI_ERROR((AE_INFO, |
@@ -321,7 +320,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, | |||
321 | * | 320 | * |
322 | * DESCRIPTION: Check if a value is out of range of the field being written. | 321 | * DESCRIPTION: Check if a value is out of range of the field being written. |
323 | * Used to check if the values written to Index and Bank registers | 322 | * Used to check if the values written to Index and Bank registers |
324 | * are out of range. Normally, the value is simply truncated | 323 | * are out of range. Normally, the value is simply truncated |
325 | * to fit the field, but this case is most likely a serious | 324 | * to fit the field, but this case is most likely a serious |
326 | * coding error in the ASL. | 325 | * coding error in the ASL. |
327 | * | 326 | * |
@@ -370,7 +369,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) | |||
370 | * | 369 | * |
371 | * RETURN: Status | 370 | * RETURN: Status |
372 | * | 371 | * |
373 | * DESCRIPTION: Read or Write a single datum of a field. The field_type is | 372 | * DESCRIPTION: Read or Write a single datum of a field. The field_type is |
374 | * demultiplexed here to handle the different types of fields | 373 | * demultiplexed here to handle the different types of fields |
375 | * (buffer_field, region_field, index_field, bank_field) | 374 | * (buffer_field, region_field, index_field, bank_field) |
376 | * | 375 | * |
@@ -860,7 +859,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
860 | ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); | 859 | ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); |
861 | /* | 860 | /* |
862 | * We must have a buffer that is at least as long as the field | 861 | * We must have a buffer that is at least as long as the field |
863 | * we are writing to. This is because individual fields are | 862 | * we are writing to. This is because individual fields are |
864 | * indivisible and partial writes are not supported -- as per | 863 | * indivisible and partial writes are not supported -- as per |
865 | * the ACPI specification. | 864 | * the ACPI specification. |
866 | */ | 865 | */ |
@@ -875,7 +874,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
875 | 874 | ||
876 | /* | 875 | /* |
877 | * Copy the original data to the new buffer, starting | 876 | * Copy the original data to the new buffer, starting |
878 | * at Byte zero. All unused (upper) bytes of the | 877 | * at Byte zero. All unused (upper) bytes of the |
879 | * buffer will be 0. | 878 | * buffer will be 0. |
880 | */ | 879 | */ |
881 | ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); | 880 | ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); |
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 271c0c57ea10..84058705ed12 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes | 3 | * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes |
@@ -254,7 +253,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, | |||
254 | ACPI_FUNCTION_TRACE(ex_do_concatenate); | 253 | ACPI_FUNCTION_TRACE(ex_do_concatenate); |
255 | 254 | ||
256 | /* | 255 | /* |
257 | * Convert the second operand if necessary. The first operand | 256 | * Convert the second operand if necessary. The first operand |
258 | * determines the type of the second operand, (See the Data Types | 257 | * determines the type of the second operand, (See the Data Types |
259 | * section of the ACPI specification.) Both object types are | 258 | * section of the ACPI specification.) Both object types are |
260 | * guaranteed to be either Integer/String/Buffer by the operand | 259 | * guaranteed to be either Integer/String/Buffer by the operand |
@@ -573,7 +572,7 @@ acpi_ex_do_logical_op(u16 opcode, | |||
573 | ACPI_FUNCTION_TRACE(ex_do_logical_op); | 572 | ACPI_FUNCTION_TRACE(ex_do_logical_op); |
574 | 573 | ||
575 | /* | 574 | /* |
576 | * Convert the second operand if necessary. The first operand | 575 | * Convert the second operand if necessary. The first operand |
577 | * determines the type of the second operand, (See the Data Types | 576 | * determines the type of the second operand, (See the Data Types |
578 | * section of the ACPI 3.0+ specification.) Both object types are | 577 | * section of the ACPI 3.0+ specification.) Both object types are |
579 | * guaranteed to be either Integer/String/Buffer by the operand | 578 | * guaranteed to be either Integer/String/Buffer by the operand |
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index bcceda5be9e3..d1f449d93dcf 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exmutex - ASL Mutex Acquire/Release functions | 3 | * Module Name: exmutex - ASL Mutex Acquire/Release functions |
@@ -305,7 +304,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) | |||
305 | ACPI_FUNCTION_TRACE(ex_release_mutex_object); | 304 | ACPI_FUNCTION_TRACE(ex_release_mutex_object); |
306 | 305 | ||
307 | if (obj_desc->mutex.acquisition_depth == 0) { | 306 | if (obj_desc->mutex.acquisition_depth == 0) { |
308 | return (AE_NOT_ACQUIRED); | 307 | return_ACPI_STATUS(AE_NOT_ACQUIRED); |
309 | } | 308 | } |
310 | 309 | ||
311 | /* Match multiple Acquires with multiple Releases */ | 310 | /* Match multiple Acquires with multiple Releases */ |
@@ -462,7 +461,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) | |||
462 | union acpi_operand_object *next = thread->acquired_mutex_list; | 461 | union acpi_operand_object *next = thread->acquired_mutex_list; |
463 | union acpi_operand_object *obj_desc; | 462 | union acpi_operand_object *obj_desc; |
464 | 463 | ||
465 | ACPI_FUNCTION_ENTRY(); | 464 | ACPI_FUNCTION_NAME(ex_release_all_mutexes); |
466 | 465 | ||
467 | /* Traverse the list of owned mutexes, releasing each one */ | 466 | /* Traverse the list of owned mutexes, releasing each one */ |
468 | 467 | ||
@@ -474,6 +473,10 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) | |||
474 | obj_desc->mutex.next = NULL; | 473 | obj_desc->mutex.next = NULL; |
475 | obj_desc->mutex.acquisition_depth = 0; | 474 | obj_desc->mutex.acquisition_depth = 0; |
476 | 475 | ||
476 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
477 | "Force-releasing held mutex: %p\n", | ||
478 | obj_desc)); | ||
479 | |||
477 | /* Release the mutex, special case for Global Lock */ | 480 | /* Release the mutex, special case for Global Lock */ |
478 | 481 | ||
479 | if (obj_desc == acpi_gbl_global_lock_mutex) { | 482 | if (obj_desc == acpi_gbl_global_lock_mutex) { |
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index fcc75fa27d32..2ff578a16adc 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exnames - interpreter/scanner name load/execute | 3 | * Module Name: exnames - interpreter/scanner name load/execute |
@@ -53,8 +52,7 @@ ACPI_MODULE_NAME("exnames") | |||
53 | /* Local prototypes */ | 52 | /* Local prototypes */ |
54 | static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs); | 53 | static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs); |
55 | 54 | ||
56 | static acpi_status | 55 | static acpi_status acpi_ex_name_segment(u8 **in_aml_address, char *name_string); |
57 | acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); | ||
58 | 56 | ||
59 | /******************************************************************************* | 57 | /******************************************************************************* |
60 | * | 58 | * |
@@ -64,7 +62,7 @@ acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); | |||
64 | * (-1)==root, 0==none | 62 | * (-1)==root, 0==none |
65 | * num_name_segs - count of 4-character name segments | 63 | * num_name_segs - count of 4-character name segments |
66 | * | 64 | * |
67 | * RETURN: A pointer to the allocated string segment. This segment must | 65 | * RETURN: A pointer to the allocated string segment. This segment must |
68 | * be deleted by the caller. | 66 | * be deleted by the caller. |
69 | * | 67 | * |
70 | * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name | 68 | * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name |
@@ -178,7 +176,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string) | |||
178 | 176 | ||
179 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); | 177 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); |
180 | 178 | ||
181 | for (index = 0; (index < ACPI_NAME_SIZE) | 179 | for (index = 0; |
180 | (index < ACPI_NAME_SIZE) | ||
182 | && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { | 181 | && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { |
183 | char_buf[index] = *aml_address++; | 182 | char_buf[index] = *aml_address++; |
184 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); | 183 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); |
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 9ba8c73cea16..bbf01e9bf057 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg1 - AML execution - opcodes with 1 argument | 3 | * Module Name: exoparg1 - AML execution - opcodes with 1 argument |
@@ -606,7 +605,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
606 | } | 605 | } |
607 | 606 | ||
608 | /* | 607 | /* |
609 | * Set result to ONES (TRUE) if Value == 0. Note: | 608 | * Set result to ONES (TRUE) if Value == 0. Note: |
610 | * return_desc->Integer.Value is initially == 0 (FALSE) from above. | 609 | * return_desc->Integer.Value is initially == 0 (FALSE) from above. |
611 | */ | 610 | */ |
612 | if (!operand[0]->integer.value) { | 611 | if (!operand[0]->integer.value) { |
@@ -618,7 +617,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
618 | case AML_INCREMENT_OP: /* Increment (Operand) */ | 617 | case AML_INCREMENT_OP: /* Increment (Operand) */ |
619 | 618 | ||
620 | /* | 619 | /* |
621 | * Create a new integer. Can't just get the base integer and | 620 | * Create a new integer. Can't just get the base integer and |
622 | * increment it because it may be an Arg or Field. | 621 | * increment it because it may be an Arg or Field. |
623 | */ | 622 | */ |
624 | return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); | 623 | return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); |
@@ -686,7 +685,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
686 | 685 | ||
687 | /* | 686 | /* |
688 | * Note: The operand is not resolved at this point because we want to | 687 | * Note: The operand is not resolved at this point because we want to |
689 | * get the associated object, not its value. For example, we don't | 688 | * get the associated object, not its value. For example, we don't |
690 | * want to resolve a field_unit to its value, we want the actual | 689 | * want to resolve a field_unit to its value, we want the actual |
691 | * field_unit object. | 690 | * field_unit object. |
692 | */ | 691 | */ |
@@ -727,7 +726,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
727 | 726 | ||
728 | /* | 727 | /* |
729 | * The type of the base object must be integer, buffer, string, or | 728 | * The type of the base object must be integer, buffer, string, or |
730 | * package. All others are not supported. | 729 | * package. All others are not supported. |
731 | * | 730 | * |
732 | * NOTE: Integer is not specifically supported by the ACPI spec, | 731 | * NOTE: Integer is not specifically supported by the ACPI spec, |
733 | * but is supported implicitly via implicit operand conversion. | 732 | * but is supported implicitly via implicit operand conversion. |
@@ -965,7 +964,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) | |||
965 | case ACPI_TYPE_PACKAGE: | 964 | case ACPI_TYPE_PACKAGE: |
966 | 965 | ||
967 | /* | 966 | /* |
968 | * Return the referenced element of the package. We must | 967 | * Return the referenced element of the package. We must |
969 | * add another reference to the referenced object, however. | 968 | * add another reference to the referenced object, however. |
970 | */ | 969 | */ |
971 | return_desc = | 970 | return_desc = |
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 879e8a277b94..ee5634a074c4 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c | |||
@@ -123,7 +123,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state) | |||
123 | /* | 123 | /* |
124 | * Dispatch the notify to the appropriate handler | 124 | * Dispatch the notify to the appropriate handler |
125 | * NOTE: the request is queued for execution after this method | 125 | * NOTE: the request is queued for execution after this method |
126 | * completes. The notify handlers are NOT invoked synchronously | 126 | * completes. The notify handlers are NOT invoked synchronously |
127 | * from this thread -- because handlers may in turn run other | 127 | * from this thread -- because handlers may in turn run other |
128 | * control methods. | 128 | * control methods. |
129 | */ | 129 | */ |
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 71fcc65c9ffa..2c89b4651f08 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg3 - AML execution - opcodes with 3 arguments | 3 | * Module Name: exoparg3 - AML execution - opcodes with 3 arguments |
@@ -158,7 +157,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) | |||
158 | case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */ | 157 | case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */ |
159 | 158 | ||
160 | /* | 159 | /* |
161 | * Create the return object. The Source operand is guaranteed to be | 160 | * Create the return object. The Source operand is guaranteed to be |
162 | * either a String or a Buffer, so just use its type. | 161 | * either a String or a Buffer, so just use its type. |
163 | */ | 162 | */ |
164 | return_desc = acpi_ut_create_internal_object((operand[0])-> | 163 | return_desc = acpi_ut_create_internal_object((operand[0])-> |
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 0786b8659061..3e08695c3b30 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exoparg6 - AML execution - opcodes with 6 arguments | 3 | * Module Name: exoparg6 - AML execution - opcodes with 6 arguments |
@@ -198,7 +197,7 @@ acpi_ex_do_match(u32 match_op, | |||
198 | return (FALSE); | 197 | return (FALSE); |
199 | } | 198 | } |
200 | 199 | ||
201 | return logical_result; | 200 | return (logical_result); |
202 | } | 201 | } |
203 | 202 | ||
204 | /******************************************************************************* | 203 | /******************************************************************************* |
@@ -269,7 +268,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state) | |||
269 | * and the next should be examined. | 268 | * and the next should be examined. |
270 | * | 269 | * |
271 | * Upon finding a match, the loop will terminate via "break" at | 270 | * Upon finding a match, the loop will terminate via "break" at |
272 | * the bottom. If it terminates "normally", match_value will be | 271 | * the bottom. If it terminates "normally", match_value will be |
273 | * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no | 272 | * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no |
274 | * match was found. | 273 | * match was found. |
275 | */ | 274 | */ |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 81eca60d2748..ba9db4de7c89 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities | 3 | * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities |
@@ -78,8 +77,8 @@ acpi_ex_generate_access(u32 field_bit_offset, | |||
78 | * any_acc keyword. | 77 | * any_acc keyword. |
79 | * | 78 | * |
80 | * NOTE: Need to have the region_length in order to check for boundary | 79 | * NOTE: Need to have the region_length in order to check for boundary |
81 | * conditions (end-of-region). However, the region_length is a deferred | 80 | * conditions (end-of-region). However, the region_length is a deferred |
82 | * operation. Therefore, to complete this implementation, the generation | 81 | * operation. Therefore, to complete this implementation, the generation |
83 | * of this access width must be deferred until the region length has | 82 | * of this access width must be deferred until the region length has |
84 | * been evaluated. | 83 | * been evaluated. |
85 | * | 84 | * |
@@ -308,7 +307,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc, | |||
308 | * RETURN: Status | 307 | * RETURN: Status |
309 | * | 308 | * |
310 | * DESCRIPTION: Initialize the areas of the field object that are common | 309 | * DESCRIPTION: Initialize the areas of the field object that are common |
311 | * to the various types of fields. Note: This is very "sensitive" | 310 | * to the various types of fields. Note: This is very "sensitive" |
312 | * code because we are solving the general case for field | 311 | * code because we are solving the general case for field |
313 | * alignment. | 312 | * alignment. |
314 | * | 313 | * |
@@ -336,13 +335,13 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, | |||
336 | obj_desc->common_field.bit_length = field_bit_length; | 335 | obj_desc->common_field.bit_length = field_bit_length; |
337 | 336 | ||
338 | /* | 337 | /* |
339 | * Decode the access type so we can compute offsets. The access type gives | 338 | * Decode the access type so we can compute offsets. The access type gives |
340 | * two pieces of information - the width of each field access and the | 339 | * two pieces of information - the width of each field access and the |
341 | * necessary byte_alignment (address granularity) of the access. | 340 | * necessary byte_alignment (address granularity) of the access. |
342 | * | 341 | * |
343 | * For any_acc, the access_bit_width is the largest width that is both | 342 | * For any_acc, the access_bit_width is the largest width that is both |
344 | * necessary and possible in an attempt to access the whole field in one | 343 | * necessary and possible in an attempt to access the whole field in one |
345 | * I/O operation. However, for any_acc, the byte_alignment is always one | 344 | * I/O operation. However, for any_acc, the byte_alignment is always one |
346 | * byte. | 345 | * byte. |
347 | * | 346 | * |
348 | * For all Buffer Fields, the byte_alignment is always one byte. | 347 | * For all Buffer Fields, the byte_alignment is always one byte. |
@@ -363,7 +362,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, | |||
363 | 362 | ||
364 | /* | 363 | /* |
365 | * base_byte_offset is the address of the start of the field within the | 364 | * base_byte_offset is the address of the start of the field within the |
366 | * region. It is the byte address of the first *datum* (field-width data | 365 | * region. It is the byte address of the first *datum* (field-width data |
367 | * unit) of the field. (i.e., the first datum that contains at least the | 366 | * unit) of the field. (i.e., the first datum that contains at least the |
368 | * first *bit* of the field.) | 367 | * first *bit* of the field.) |
369 | * | 368 | * |
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 1f1ce0c3d2f8..1db2c0bfde0b 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exregion - ACPI default op_region (address space) handlers | 3 | * Module Name: exregion - ACPI default op_region (address space) handlers |
@@ -202,7 +201,7 @@ acpi_ex_system_memory_space_handler(u32 function, | |||
202 | * Perform the memory read or write | 201 | * Perform the memory read or write |
203 | * | 202 | * |
204 | * Note: For machines that do not support non-aligned transfers, the target | 203 | * Note: For machines that do not support non-aligned transfers, the target |
205 | * address was checked for alignment above. We do not attempt to break the | 204 | * address was checked for alignment above. We do not attempt to break the |
206 | * transfer up into smaller (byte-size) chunks because the AML specifically | 205 | * transfer up into smaller (byte-size) chunks because the AML specifically |
207 | * asked for a transfer width that the hardware may require. | 206 | * asked for a transfer width that the hardware may require. |
208 | */ | 207 | */ |
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index fa50e77e64a8..6239956786eb 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresnte - AML Interpreter object resolution | 3 | * Module Name: exresnte - AML Interpreter object resolution |
@@ -58,8 +57,8 @@ ACPI_MODULE_NAME("exresnte") | |||
58 | * PARAMETERS: object_ptr - Pointer to a location that contains | 57 | * PARAMETERS: object_ptr - Pointer to a location that contains |
59 | * a pointer to a NS node, and will receive a | 58 | * a pointer to a NS node, and will receive a |
60 | * pointer to the resolved object. | 59 | * pointer to the resolved object. |
61 | * walk_state - Current state. Valid only if executing AML | 60 | * walk_state - Current state. Valid only if executing AML |
62 | * code. NULL if simply resolving an object | 61 | * code. NULL if simply resolving an object |
63 | * | 62 | * |
64 | * RETURN: Status | 63 | * RETURN: Status |
65 | * | 64 | * |
@@ -67,7 +66,7 @@ ACPI_MODULE_NAME("exresnte") | |||
67 | * | 66 | * |
68 | * Note: for some of the data types, the pointer attached to the Node | 67 | * Note: for some of the data types, the pointer attached to the Node |
69 | * can be either a pointer to an actual internal object or a pointer into the | 68 | * can be either a pointer to an actual internal object or a pointer into the |
70 | * AML stream itself. These types are currently: | 69 | * AML stream itself. These types are currently: |
71 | * | 70 | * |
72 | * ACPI_TYPE_INTEGER | 71 | * ACPI_TYPE_INTEGER |
73 | * ACPI_TYPE_STRING | 72 | * ACPI_TYPE_STRING |
@@ -89,7 +88,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, | |||
89 | ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); | 88 | ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); |
90 | 89 | ||
91 | /* | 90 | /* |
92 | * The stack pointer points to a struct acpi_namespace_node (Node). Get the | 91 | * The stack pointer points to a struct acpi_namespace_node (Node). Get the |
93 | * object that is attached to the Node. | 92 | * object that is attached to the Node. |
94 | */ | 93 | */ |
95 | node = *object_ptr; | 94 | node = *object_ptr; |
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index bbf40ac27585..cc176b245e22 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresolv - AML Interpreter object resolution | 3 | * Module Name: exresolv - AML Interpreter object resolution |
@@ -327,7 +326,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, | |||
327 | * | 326 | * |
328 | * RETURN: Status | 327 | * RETURN: Status |
329 | * | 328 | * |
330 | * DESCRIPTION: Return the base object and type. Traverse a reference list if | 329 | * DESCRIPTION: Return the base object and type. Traverse a reference list if |
331 | * necessary to get to the base object. | 330 | * necessary to get to the base object. |
332 | * | 331 | * |
333 | ******************************************************************************/ | 332 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index f232fbabdea8..b9ebff2f6a09 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exresop - AML Interpreter operand/object resolution | 3 | * Module Name: exresop - AML Interpreter operand/object resolution |
@@ -87,7 +86,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed, | |||
87 | if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { | 86 | if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { |
88 | /* | 87 | /* |
89 | * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference | 88 | * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference |
90 | * objects and thus allow them to be targets. (As per the ACPI | 89 | * objects and thus allow them to be targets. (As per the ACPI |
91 | * specification, a store to a constant is a noop.) | 90 | * specification, a store to a constant is a noop.) |
92 | */ | 91 | */ |
93 | if ((this_type == ACPI_TYPE_INTEGER) && | 92 | if ((this_type == ACPI_TYPE_INTEGER) && |
@@ -337,7 +336,8 @@ acpi_ex_resolve_operands(u16 opcode, | |||
337 | if ((opcode == AML_STORE_OP) && | 336 | if ((opcode == AML_STORE_OP) && |
338 | ((*stack_ptr)->common.type == | 337 | ((*stack_ptr)->common.type == |
339 | ACPI_TYPE_LOCAL_REFERENCE) | 338 | ACPI_TYPE_LOCAL_REFERENCE) |
340 | && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) { | 339 | && ((*stack_ptr)->reference.class == |
340 | ACPI_REFCLASS_INDEX)) { | ||
341 | goto next_operand; | 341 | goto next_operand; |
342 | } | 342 | } |
343 | break; | 343 | break; |
@@ -638,7 +638,7 @@ acpi_ex_resolve_operands(u16 opcode, | |||
638 | if (acpi_gbl_enable_interpreter_slack) { | 638 | if (acpi_gbl_enable_interpreter_slack) { |
639 | /* | 639 | /* |
640 | * Enable original behavior of Store(), allowing any and all | 640 | * Enable original behavior of Store(), allowing any and all |
641 | * objects as the source operand. The ACPI spec does not | 641 | * objects as the source operand. The ACPI spec does not |
642 | * allow this, however. | 642 | * allow this, however. |
643 | */ | 643 | */ |
644 | break; | 644 | break; |
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 5fffe7ab5ece..90431f12f831 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c | |||
@@ -374,7 +374,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc, | |||
374 | * with the input value. | 374 | * with the input value. |
375 | * | 375 | * |
376 | * When storing into an object the data is converted to the | 376 | * When storing into an object the data is converted to the |
377 | * target object type then stored in the object. This means | 377 | * target object type then stored in the object. This means |
378 | * that the target object type (for an initialized target) will | 378 | * that the target object type (for an initialized target) will |
379 | * not be changed by a store operation. | 379 | * not be changed by a store operation. |
380 | * | 380 | * |
@@ -491,7 +491,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, | |||
491 | acpi_ut_get_object_type_name(source_desc), | 491 | acpi_ut_get_object_type_name(source_desc), |
492 | source_desc, node)); | 492 | source_desc, node)); |
493 | 493 | ||
494 | /* No conversions for all other types. Just attach the source object */ | 494 | /* No conversions for all other types. Just attach the source object */ |
495 | 495 | ||
496 | status = acpi_ns_attach_object(node, source_desc, | 496 | status = acpi_ns_attach_object(node, source_desc, |
497 | source_desc->common.type); | 497 | source_desc->common.type); |
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index b35bed52e061..87153bbc4b43 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exstoren - AML Interpreter object store support, | 3 | * Module Name: exstoren - AML Interpreter object store support, |
@@ -61,7 +60,7 @@ ACPI_MODULE_NAME("exstoren") | |||
61 | * | 60 | * |
62 | * RETURN: Status, resolved object in source_desc_ptr. | 61 | * RETURN: Status, resolved object in source_desc_ptr. |
63 | * | 62 | * |
64 | * DESCRIPTION: Resolve an object. If the object is a reference, dereference | 63 | * DESCRIPTION: Resolve an object. If the object is a reference, dereference |
65 | * it and return the actual object in the source_desc_ptr. | 64 | * it and return the actual object in the source_desc_ptr. |
66 | * | 65 | * |
67 | ******************************************************************************/ | 66 | ******************************************************************************/ |
@@ -93,7 +92,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
93 | 92 | ||
94 | /* | 93 | /* |
95 | * Stores into a Field/Region or into a Integer/Buffer/String | 94 | * Stores into a Field/Region or into a Integer/Buffer/String |
96 | * are all essentially the same. This case handles the | 95 | * are all essentially the same. This case handles the |
97 | * "interchangeable" types Integer, String, and Buffer. | 96 | * "interchangeable" types Integer, String, and Buffer. |
98 | */ | 97 | */ |
99 | if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { | 98 | if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { |
@@ -167,7 +166,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
167 | * | 166 | * |
168 | * RETURN: Status | 167 | * RETURN: Status |
169 | * | 168 | * |
170 | * DESCRIPTION: "Store" an object to another object. This may include | 169 | * DESCRIPTION: "Store" an object to another object. This may include |
171 | * converting the source type to the target type (implicit | 170 | * converting the source type to the target type (implicit |
172 | * conversion), and a copy of the value of the source to | 171 | * conversion), and a copy of the value of the source to |
173 | * the target. | 172 | * the target. |
@@ -178,14 +177,14 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, | |||
178 | * with the input value. | 177 | * with the input value. |
179 | * | 178 | * |
180 | * When storing into an object the data is converted to the | 179 | * When storing into an object the data is converted to the |
181 | * target object type then stored in the object. This means | 180 | * target object type then stored in the object. This means |
182 | * that the target object type (for an initialized target) will | 181 | * that the target object type (for an initialized target) will |
183 | * not be changed by a store operation. | 182 | * not be changed by a store operation. |
184 | * | 183 | * |
185 | * This module allows destination types of Number, String, | 184 | * This module allows destination types of Number, String, |
186 | * Buffer, and Package. | 185 | * Buffer, and Package. |
187 | * | 186 | * |
188 | * Assumes parameters are already validated. NOTE: source_desc | 187 | * Assumes parameters are already validated. NOTE: source_desc |
189 | * resolution (from a reference object) must be performed by | 188 | * resolution (from a reference object) must be performed by |
190 | * the caller if necessary. | 189 | * the caller if necessary. |
191 | * | 190 | * |
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 53c248473547..b5f339cb1305 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exstorob - AML Interpreter object store support, store to object | 3 | * Module Name: exstorob - AML Interpreter object store support, store to object |
@@ -108,7 +107,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, | |||
108 | #ifdef ACPI_OBSOLETE_BEHAVIOR | 107 | #ifdef ACPI_OBSOLETE_BEHAVIOR |
109 | /* | 108 | /* |
110 | * NOTE: ACPI versions up to 3.0 specified that the buffer must be | 109 | * NOTE: ACPI versions up to 3.0 specified that the buffer must be |
111 | * truncated if the string is smaller than the buffer. However, "other" | 110 | * truncated if the string is smaller than the buffer. However, "other" |
112 | * implementations of ACPI never did this and thus became the defacto | 111 | * implementations of ACPI never did this and thus became the defacto |
113 | * standard. ACPI 3.0A changes this behavior such that the buffer | 112 | * standard. ACPI 3.0A changes this behavior such that the buffer |
114 | * is no longer truncated. | 113 | * is no longer truncated. |
@@ -117,7 +116,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, | |||
117 | /* | 116 | /* |
118 | * OBSOLETE BEHAVIOR: | 117 | * OBSOLETE BEHAVIOR: |
119 | * If the original source was a string, we must truncate the buffer, | 118 | * If the original source was a string, we must truncate the buffer, |
120 | * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer | 119 | * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer |
121 | * copy must not truncate the original buffer. | 120 | * copy must not truncate the original buffer. |
122 | */ | 121 | */ |
123 | if (original_src_type == ACPI_TYPE_STRING) { | 122 | if (original_src_type == ACPI_TYPE_STRING) { |
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index b760641e2fc6..c8a0ad5c1f55 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exsystem - Interface to OS services | 3 | * Module Name: exsystem - Interface to OS services |
@@ -59,7 +58,7 @@ ACPI_MODULE_NAME("exsystem") | |||
59 | * RETURN: Status | 58 | * RETURN: Status |
60 | * | 59 | * |
61 | * DESCRIPTION: Implements a semaphore wait with a check to see if the | 60 | * DESCRIPTION: Implements a semaphore wait with a check to see if the |
62 | * semaphore is available immediately. If it is not, the | 61 | * semaphore is available immediately. If it is not, the |
63 | * interpreter is released before waiting. | 62 | * interpreter is released before waiting. |
64 | * | 63 | * |
65 | ******************************************************************************/ | 64 | ******************************************************************************/ |
@@ -104,7 +103,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) | |||
104 | * RETURN: Status | 103 | * RETURN: Status |
105 | * | 104 | * |
106 | * DESCRIPTION: Implements a mutex wait with a check to see if the | 105 | * DESCRIPTION: Implements a mutex wait with a check to see if the |
107 | * mutex is available immediately. If it is not, the | 106 | * mutex is available immediately. If it is not, the |
108 | * interpreter is released before waiting. | 107 | * interpreter is released before waiting. |
109 | * | 108 | * |
110 | ******************************************************************************/ | 109 | ******************************************************************************/ |
@@ -152,7 +151,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) | |||
152 | * DESCRIPTION: Suspend running thread for specified amount of time. | 151 | * DESCRIPTION: Suspend running thread for specified amount of time. |
153 | * Note: ACPI specification requires that Stall() does not | 152 | * Note: ACPI specification requires that Stall() does not |
154 | * relinquish the processor, and delays longer than 100 usec | 153 | * relinquish the processor, and delays longer than 100 usec |
155 | * should use Sleep() instead. We allow stalls up to 255 usec | 154 | * should use Sleep() instead. We allow stalls up to 255 usec |
156 | * for compatibility with other interpreters and existing BIOSs. | 155 | * for compatibility with other interpreters and existing BIOSs. |
157 | * | 156 | * |
158 | ******************************************************************************/ | 157 | ******************************************************************************/ |
@@ -254,7 +253,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc) | |||
254 | * RETURN: Status | 253 | * RETURN: Status |
255 | * | 254 | * |
256 | * DESCRIPTION: Provides an access point to perform synchronization operations | 255 | * DESCRIPTION: Provides an access point to perform synchronization operations |
257 | * within the AML. This operation is a request to wait for an | 256 | * within the AML. This operation is a request to wait for an |
258 | * event. | 257 | * event. |
259 | * | 258 | * |
260 | ******************************************************************************/ | 259 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index d1ab7917eed7..264d22d8018c 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: exutils - interpreter/scanner utilities | 3 | * Module Name: exutils - interpreter/scanner utilities |
@@ -45,12 +44,12 @@ | |||
45 | /* | 44 | /* |
46 | * DEFINE_AML_GLOBALS is tested in amlcode.h | 45 | * DEFINE_AML_GLOBALS is tested in amlcode.h |
47 | * to determine whether certain global names should be "defined" or only | 46 | * to determine whether certain global names should be "defined" or only |
48 | * "declared" in the current compilation. This enhances maintainability | 47 | * "declared" in the current compilation. This enhances maintainability |
49 | * by enabling a single header file to embody all knowledge of the names | 48 | * by enabling a single header file to embody all knowledge of the names |
50 | * in question. | 49 | * in question. |
51 | * | 50 | * |
52 | * Exactly one module of any executable should #define DEFINE_GLOBALS | 51 | * Exactly one module of any executable should #define DEFINE_GLOBALS |
53 | * before #including the header files which use this convention. The | 52 | * before #including the header files which use this convention. The |
54 | * names in question will be defined and initialized in that module, | 53 | * names in question will be defined and initialized in that module, |
55 | * and declared as extern in all other modules which #include those | 54 | * and declared as extern in all other modules which #include those |
56 | * header files. | 55 | * header files. |
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index a1e71d0ef57b..90a9aea1cee9 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface | 3 | * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface |
@@ -136,7 +135,7 @@ acpi_status acpi_hw_set_mode(u32 mode) | |||
136 | * | 135 | * |
137 | * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY | 136 | * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY |
138 | * | 137 | * |
139 | * DESCRIPTION: Return current operating state of system. Determined by | 138 | * DESCRIPTION: Return current operating state of system. Determined by |
140 | * querying the SCI_EN bit. | 139 | * querying the SCI_EN bit. |
141 | * | 140 | * |
142 | ******************************************************************************/ | 141 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index db4076580e2b..64560045052d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwgpe - Low level GPE enable/disable/clear functions | 3 | * Module Name: hwgpe - Low level GPE enable/disable/clear functions |
@@ -339,7 +338,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
339 | 338 | ||
340 | acpi_status | 339 | acpi_status |
341 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 340 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
342 | struct acpi_gpe_block_info *gpe_block, void *context) | 341 | struct acpi_gpe_block_info * gpe_block, |
342 | void *context) | ||
343 | { | 343 | { |
344 | u32 i; | 344 | u32 i; |
345 | acpi_status status; | 345 | acpi_status status; |
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 1455ddcdc32c..65bc3453a29c 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c | |||
@@ -259,7 +259,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, | |||
259 | status = acpi_hw_get_pci_device_info(pci_id, info->device, | 259 | status = acpi_hw_get_pci_device_info(pci_id, info->device, |
260 | &bus_number, &is_bridge); | 260 | &bus_number, &is_bridge); |
261 | if (ACPI_FAILURE(status)) { | 261 | if (ACPI_FAILURE(status)) { |
262 | return_ACPI_STATUS(status); | 262 | return (status); |
263 | } | 263 | } |
264 | 264 | ||
265 | info = info->next; | 265 | info = info->next; |
@@ -271,7 +271,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, | |||
271 | pci_id->segment, pci_id->bus, pci_id->device, | 271 | pci_id->segment, pci_id->bus, pci_id->device, |
272 | pci_id->function, status, bus_number, is_bridge)); | 272 | pci_id->function, status, bus_number, is_bridge)); |
273 | 273 | ||
274 | return_ACPI_STATUS(AE_OK); | 274 | return (AE_OK); |
275 | } | 275 | } |
276 | 276 | ||
277 | /******************************************************************************* | 277 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 4af6d20ef077..f4e57503576b 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /******************************************************************************* | 1 | /******************************************************************************* |
3 | * | 2 | * |
4 | * Module Name: hwregs - Read/write access functions for the various ACPI | 3 | * Module Name: hwregs - Read/write access functions for the various ACPI |
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b6411f16832f..bfdce22f3798 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: hwtimer.c - ACPI Power Management Timer Interface | 3 | * Name: hwtimer.c - ACPI Power Management Timer Interface |
@@ -101,8 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks) | |||
101 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 100 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
102 | } | 101 | } |
103 | 102 | ||
104 | status = | 103 | status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); |
105 | acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); | ||
106 | 104 | ||
107 | return_ACPI_STATUS(status); | 105 | return_ACPI_STATUS(status); |
108 | } | 106 | } |
@@ -129,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer) | |||
129 | * a versatile and accurate timer. | 127 | * a versatile and accurate timer. |
130 | * | 128 | * |
131 | * Note that this function accommodates only a single timer | 129 | * Note that this function accommodates only a single timer |
132 | * rollover. Thus for 24-bit timers, this function should only | 130 | * rollover. Thus for 24-bit timers, this function should only |
133 | * be used for calculating durations less than ~4.6 seconds | 131 | * be used for calculating durations less than ~4.6 seconds |
134 | * (~20 minutes for 32-bit timers) -- calculations below: | 132 | * (~20 minutes for 32-bit timers) -- calculations below: |
135 | * | 133 | * |
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index c99d546b217f..b6aae58299dc 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwvalid - I/O request validation | 3 | * Module Name: hwvalid - I/O request validation |
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 7bfd649d1996..05a154c3c9ac 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Module Name: hwxface - Public ACPICA hardware interfaces | 3 | * Module Name: hwxface - Public ACPICA hardware interfaces |
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 0ff1ecea5c3a..ae443fe2ebf6 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c | |||
@@ -49,8 +49,7 @@ | |||
49 | ACPI_MODULE_NAME("hwxfsleep") | 49 | ACPI_MODULE_NAME("hwxfsleep") |
50 | 50 | ||
51 | /* Local prototypes */ | 51 | /* Local prototypes */ |
52 | static acpi_status | 52 | static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); |
53 | acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); | ||
54 | 53 | ||
55 | /* | 54 | /* |
56 | * Dispatch table used to efficiently branch to the various sleep | 55 | * Dispatch table used to efficiently branch to the various sleep |
@@ -234,8 +233,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) | |||
234 | * function. | 233 | * function. |
235 | * | 234 | * |
236 | ******************************************************************************/ | 235 | ******************************************************************************/ |
237 | static acpi_status | 236 | static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) |
238 | acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) | ||
239 | { | 237 | { |
240 | acpi_status status; | 238 | acpi_status status; |
241 | struct acpi_sleep_functions *sleep_functions = | 239 | struct acpi_sleep_functions *sleep_functions = |
@@ -369,8 +367,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) | |||
369 | return_ACPI_STATUS(AE_AML_OPERAND_VALUE); | 367 | return_ACPI_STATUS(AE_AML_OPERAND_VALUE); |
370 | } | 368 | } |
371 | 369 | ||
372 | status = | 370 | status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); |
373 | acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); | ||
374 | return_ACPI_STATUS(status); | 371 | return_ACPI_STATUS(status); |
375 | } | 372 | } |
376 | 373 | ||
@@ -396,8 +393,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) | |||
396 | ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); | 393 | ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); |
397 | 394 | ||
398 | status = | 395 | status = |
399 | acpi_hw_sleep_dispatch(sleep_state, | 396 | acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); |
400 | ACPI_WAKE_PREP_FUNCTION_ID); | ||
401 | return_ACPI_STATUS(status); | 397 | return_ACPI_STATUS(status); |
402 | } | 398 | } |
403 | 399 | ||
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 23db53ce2293..d70eaf39dfdf 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c | |||
@@ -110,11 +110,11 @@ acpi_status acpi_ns_root_initialize(void) | |||
110 | status = acpi_ns_lookup(NULL, init_val->name, init_val->type, | 110 | status = acpi_ns_lookup(NULL, init_val->name, init_val->type, |
111 | ACPI_IMODE_LOAD_PASS2, | 111 | ACPI_IMODE_LOAD_PASS2, |
112 | ACPI_NS_NO_UPSEARCH, NULL, &new_node); | 112 | ACPI_NS_NO_UPSEARCH, NULL, &new_node); |
113 | 113 | if (ACPI_FAILURE(status)) { | |
114 | if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */ | ||
115 | ACPI_EXCEPTION((AE_INFO, status, | 114 | ACPI_EXCEPTION((AE_INFO, status, |
116 | "Could not create predefined name %s", | 115 | "Could not create predefined name %s", |
117 | init_val->name)); | 116 | init_val->name)); |
117 | continue; | ||
118 | } | 118 | } |
119 | 119 | ||
120 | /* | 120 | /* |
@@ -179,8 +179,7 @@ acpi_status acpi_ns_root_initialize(void) | |||
179 | 179 | ||
180 | /* Build an object around the static string */ | 180 | /* Build an object around the static string */ |
181 | 181 | ||
182 | obj_desc->string.length = | 182 | obj_desc->string.length = (u32)ACPI_STRLEN(val); |
183 | (u32) ACPI_STRLEN(val); | ||
184 | obj_desc->string.pointer = val; | 183 | obj_desc->string.pointer = val; |
185 | obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; | 184 | obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; |
186 | break; | 185 | break; |
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index ac389e5bb594..15143c44f5e5 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c | |||
@@ -332,7 +332,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) | |||
332 | * | 332 | * |
333 | * RETURN: None. | 333 | * RETURN: None. |
334 | * | 334 | * |
335 | * DESCRIPTION: Delete a subtree of the namespace. This includes all objects | 335 | * DESCRIPTION: Delete a subtree of the namespace. This includes all objects |
336 | * stored within the subtree. | 336 | * stored within the subtree. |
337 | * | 337 | * |
338 | ******************************************************************************/ | 338 | ******************************************************************************/ |
@@ -418,7 +418,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) | |||
418 | * RETURN: Status | 418 | * RETURN: Status |
419 | * | 419 | * |
420 | * DESCRIPTION: Delete entries within the namespace that are owned by a | 420 | * DESCRIPTION: Delete entries within the namespace that are owned by a |
421 | * specific ID. Used to delete entire ACPI tables. All | 421 | * specific ID. Used to delete entire ACPI tables. All |
422 | * reference counts are updated. | 422 | * reference counts are updated. |
423 | * | 423 | * |
424 | * MUTEX: Locks namespace during deletion walk. | 424 | * MUTEX: Locks namespace during deletion walk. |
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 2526aaf945ee..924b3c71473a 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c | |||
@@ -209,14 +209,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, | |||
209 | "Invalid ACPI Object Type 0x%08X", type)); | 209 | "Invalid ACPI Object Type 0x%08X", type)); |
210 | } | 210 | } |
211 | 211 | ||
212 | if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { | ||
213 | this_node->name.integer = | ||
214 | acpi_ut_repair_name(this_node->name.ascii); | ||
215 | |||
216 | ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", | ||
217 | this_node->name.integer)); | ||
218 | } | ||
219 | |||
220 | acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); | 212 | acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); |
221 | } | 213 | } |
222 | 214 | ||
@@ -700,7 +692,7 @@ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level) | |||
700 | * | 692 | * |
701 | * PARAMETERS: search_base - Root of subtree to be dumped, or | 693 | * PARAMETERS: search_base - Root of subtree to be dumped, or |
702 | * NS_ALL to dump the entire namespace | 694 | * NS_ALL to dump the entire namespace |
703 | * max_depth - Maximum depth of dump. Use INT_MAX | 695 | * max_depth - Maximum depth of dump. Use INT_MAX |
704 | * for an effectively unlimited depth. | 696 | * for an effectively unlimited depth. |
705 | * | 697 | * |
706 | * RETURN: None | 698 | * RETURN: None |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 95ffe8dfa1f1..4328e2adfeb9 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -96,8 +96,8 @@ acpi_status acpi_ns_initialize_objects(void) | |||
96 | /* Walk entire namespace from the supplied root */ | 96 | /* Walk entire namespace from the supplied root */ |
97 | 97 | ||
98 | status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, | 98 | status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, |
99 | ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL, | 99 | ACPI_UINT32_MAX, acpi_ns_init_one_object, |
100 | &info, NULL); | 100 | NULL, &info, NULL); |
101 | if (ACPI_FAILURE(status)) { | 101 | if (ACPI_FAILURE(status)) { |
102 | ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); | 102 | ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); |
103 | } | 103 | } |
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 76935ff29289..911f99127b99 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c | |||
@@ -80,8 +80,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) | |||
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Parse the table and load the namespace with all named | 82 | * Parse the table and load the namespace with all named |
83 | * objects found within. Control methods are NOT parsed | 83 | * objects found within. Control methods are NOT parsed |
84 | * at this time. In fact, the control methods cannot be | 84 | * at this time. In fact, the control methods cannot be |
85 | * parsed until the entire namespace is loaded, because | 85 | * parsed until the entire namespace is loaded, because |
86 | * if a control method makes a forward reference (call) | 86 | * if a control method makes a forward reference (call) |
87 | * to another control method, we can't continue parsing | 87 | * to another control method, we can't continue parsing |
@@ -122,7 +122,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) | |||
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Now we can parse the control methods. We always parse | 125 | * Now we can parse the control methods. We always parse |
126 | * them here for a sanity check, and if configured for | 126 | * them here for a sanity check, and if configured for |
127 | * just-in-time parsing, we delete the control method | 127 | * just-in-time parsing, we delete the control method |
128 | * parse trees. | 128 | * parse trees. |
@@ -166,7 +166,7 @@ acpi_status acpi_ns_load_namespace(void) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Load the namespace. The DSDT is required, | 169 | * Load the namespace. The DSDT is required, |
170 | * but the SSDT and PSDT tables are optional. | 170 | * but the SSDT and PSDT tables are optional. |
171 | */ | 171 | */ |
172 | status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); | 172 | status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); |
@@ -283,7 +283,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle) | |||
283 | * RETURN: Status | 283 | * RETURN: Status |
284 | * | 284 | * |
285 | * DESCRIPTION: Shrinks the namespace, typically in response to an undocking | 285 | * DESCRIPTION: Shrinks the namespace, typically in response to an undocking |
286 | * event. Deletes an entire subtree starting from (and | 286 | * event. Deletes an entire subtree starting from (and |
287 | * including) the given handle. | 287 | * including) the given handle. |
288 | * | 288 | * |
289 | ******************************************************************************/ | 289 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 96e0eb609bb4..55a175eadcc3 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c | |||
@@ -195,7 +195,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) | |||
195 | ACPI_ERROR((AE_INFO, | 195 | ACPI_ERROR((AE_INFO, |
196 | "Invalid Namespace Node (%p) while traversing namespace", | 196 | "Invalid Namespace Node (%p) while traversing namespace", |
197 | next_node)); | 197 | next_node)); |
198 | return 0; | 198 | return (0); |
199 | } | 199 | } |
200 | size += ACPI_PATH_SEGMENT_LENGTH; | 200 | size += ACPI_PATH_SEGMENT_LENGTH; |
201 | next_node = next_node->parent; | 201 | next_node = next_node->parent; |
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index d6c9a3cc6716..e69f7fa2579d 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c | |||
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("nsobject") | |||
61 | * RETURN: Status | 61 | * RETURN: Status |
62 | * | 62 | * |
63 | * DESCRIPTION: Record the given object as the value associated with the | 63 | * DESCRIPTION: Record the given object as the value associated with the |
64 | * name whose acpi_handle is passed. If Object is NULL | 64 | * name whose acpi_handle is passed. If Object is NULL |
65 | * and Type is ACPI_TYPE_ANY, set the name as having no value. | 65 | * and Type is ACPI_TYPE_ANY, set the name as having no value. |
66 | * Note: Future may require that the Node->Flags field be passed | 66 | * Note: Future may require that the Node->Flags field be passed |
67 | * as a parameter. | 67 | * as a parameter. |
@@ -133,7 +133,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node, | |||
133 | ((struct acpi_namespace_node *)object)->object) { | 133 | ((struct acpi_namespace_node *)object)->object) { |
134 | /* | 134 | /* |
135 | * Value passed is a name handle and that name has a | 135 | * Value passed is a name handle and that name has a |
136 | * non-null value. Use that name's value and type. | 136 | * non-null value. Use that name's value and type. |
137 | */ | 137 | */ |
138 | obj_desc = ((struct acpi_namespace_node *)object)->object; | 138 | obj_desc = ((struct acpi_namespace_node *)object)->object; |
139 | object_type = ((struct acpi_namespace_node *)object)->type; | 139 | object_type = ((struct acpi_namespace_node *)object)->type; |
@@ -321,7 +321,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union | |||
321 | * | 321 | * |
322 | * RETURN: Status | 322 | * RETURN: Status |
323 | * | 323 | * |
324 | * DESCRIPTION: Low-level attach data. Create and attach a Data object. | 324 | * DESCRIPTION: Low-level attach data. Create and attach a Data object. |
325 | * | 325 | * |
326 | ******************************************************************************/ | 326 | ******************************************************************************/ |
327 | 327 | ||
@@ -377,7 +377,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node, | |||
377 | * | 377 | * |
378 | * RETURN: Status | 378 | * RETURN: Status |
379 | * | 379 | * |
380 | * DESCRIPTION: Low-level detach data. Delete the data node, but the caller | 380 | * DESCRIPTION: Low-level detach data. Delete the data node, but the caller |
381 | * is responsible for the actual data. | 381 | * is responsible for the actual data. |
382 | * | 382 | * |
383 | ******************************************************************************/ | 383 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index ec7ba2d3463c..233f756d5cfa 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
@@ -168,11 +168,11 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
168 | /* | 168 | /* |
169 | * AML Parse, pass 1 | 169 | * AML Parse, pass 1 |
170 | * | 170 | * |
171 | * In this pass, we load most of the namespace. Control methods | 171 | * In this pass, we load most of the namespace. Control methods |
172 | * are not parsed until later. A parse tree is not created. Instead, | 172 | * are not parsed until later. A parse tree is not created. Instead, |
173 | * each Parser Op subtree is deleted when it is finished. This saves | 173 | * each Parser Op subtree is deleted when it is finished. This saves |
174 | * a great deal of memory, and allows a small cache of parse objects | 174 | * a great deal of memory, and allows a small cache of parse objects |
175 | * to service the entire parse. The second pass of the parse then | 175 | * to service the entire parse. The second pass of the parse then |
176 | * performs another complete parse of the AML. | 176 | * performs another complete parse of the AML. |
177 | */ | 177 | */ |
178 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); | 178 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); |
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index 456cc859f869..1d2d8ffc1bc5 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c | |||
@@ -314,22 +314,7 @@ acpi_ns_search_and_enter(u32 target_name, | |||
314 | * this problem, and we want to be able to enable ACPI support for them, | 314 | * this problem, and we want to be able to enable ACPI support for them, |
315 | * even though there are a few bad names. | 315 | * even though there are a few bad names. |
316 | */ | 316 | */ |
317 | if (!acpi_ut_valid_acpi_name(target_name)) { | 317 | acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); |
318 | target_name = | ||
319 | acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); | ||
320 | |||
321 | /* Report warning only if in strict mode or debug mode */ | ||
322 | |||
323 | if (!acpi_gbl_enable_interpreter_slack) { | ||
324 | ACPI_WARNING((AE_INFO, | ||
325 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
326 | ACPI_CAST_PTR(char, &target_name))); | ||
327 | } else { | ||
328 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
329 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
330 | ACPI_CAST_PTR(char, &target_name))); | ||
331 | } | ||
332 | } | ||
333 | 318 | ||
334 | /* Try to find the name in the namespace level specified by the caller */ | 319 | /* Try to find the name in the namespace level specified by the caller */ |
335 | 320 | ||
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index ef753a41e087..b5b4cb72a8a8 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -530,7 +530,7 @@ acpi_ns_externalize_name(u32 internal_name_length, | |||
530 | ((num_segments > 0) ? (num_segments - 1) : 0) + 1; | 530 | ((num_segments > 0) ? (num_segments - 1) : 0) + 1; |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * Check to see if we're still in bounds. If not, there's a problem | 533 | * Check to see if we're still in bounds. If not, there's a problem |
534 | * with internal_name (invalid format). | 534 | * with internal_name (invalid format). |
535 | */ | 535 | */ |
536 | if (required_length > internal_name_length) { | 536 | if (required_length > internal_name_length) { |
@@ -557,10 +557,14 @@ acpi_ns_externalize_name(u32 internal_name_length, | |||
557 | (*converted_name)[j++] = '.'; | 557 | (*converted_name)[j++] = '.'; |
558 | } | 558 | } |
559 | 559 | ||
560 | (*converted_name)[j++] = internal_name[names_index++]; | 560 | /* Copy and validate the 4-char name segment */ |
561 | (*converted_name)[j++] = internal_name[names_index++]; | 561 | |
562 | (*converted_name)[j++] = internal_name[names_index++]; | 562 | ACPI_MOVE_NAME(&(*converted_name)[j], |
563 | (*converted_name)[j++] = internal_name[names_index++]; | 563 | &internal_name[names_index]); |
564 | acpi_ut_repair_name(&(*converted_name)[j]); | ||
565 | |||
566 | j += ACPI_NAME_SIZE; | ||
567 | names_index += ACPI_NAME_SIZE; | ||
564 | } | 568 | } |
565 | } | 569 | } |
566 | 570 | ||
@@ -681,7 +685,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) | |||
681 | * \ (backslash) and ^ (carat) prefixes, and the | 685 | * \ (backslash) and ^ (carat) prefixes, and the |
682 | * . (period) to separate segments are supported. | 686 | * . (period) to separate segments are supported. |
683 | * prefix_node - Root of subtree to be searched, or NS_ALL for the | 687 | * prefix_node - Root of subtree to be searched, or NS_ALL for the |
684 | * root of the name space. If Name is fully | 688 | * root of the name space. If Name is fully |
685 | * qualified (first s8 is '\'), the passed value | 689 | * qualified (first s8 is '\'), the passed value |
686 | * of Scope will not be accessed. | 690 | * of Scope will not be accessed. |
687 | * flags - Used to indicate whether to perform upsearch or | 691 | * flags - Used to indicate whether to perform upsearch or |
@@ -689,7 +693,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) | |||
689 | * return_node - Where the Node is returned | 693 | * return_node - Where the Node is returned |
690 | * | 694 | * |
691 | * DESCRIPTION: Look up a name relative to a given scope and return the | 695 | * DESCRIPTION: Look up a name relative to a given scope and return the |
692 | * corresponding Node. NOTE: Scope can be null. | 696 | * corresponding Node. NOTE: Scope can be null. |
693 | * | 697 | * |
694 | * MUTEX: Locks namespace | 698 | * MUTEX: Locks namespace |
695 | * | 699 | * |
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 730bccc5e7f7..0483877f26b8 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c | |||
@@ -60,8 +60,8 @@ ACPI_MODULE_NAME("nswalk") | |||
60 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if | 60 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if |
61 | * none is found. | 61 | * none is found. |
62 | * | 62 | * |
63 | * DESCRIPTION: Return the next peer node within the namespace. If Handle | 63 | * DESCRIPTION: Return the next peer node within the namespace. If Handle |
64 | * is valid, Scope is ignored. Otherwise, the first node | 64 | * is valid, Scope is ignored. Otherwise, the first node |
65 | * within Scope is returned. | 65 | * within Scope is returned. |
66 | * | 66 | * |
67 | ******************************************************************************/ | 67 | ******************************************************************************/ |
@@ -97,8 +97,8 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node | |||
97 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if | 97 | * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if |
98 | * none is found. | 98 | * none is found. |
99 | * | 99 | * |
100 | * DESCRIPTION: Return the next peer node within the namespace. If Handle | 100 | * DESCRIPTION: Return the next peer node within the namespace. If Handle |
101 | * is valid, Scope is ignored. Otherwise, the first node | 101 | * is valid, Scope is ignored. Otherwise, the first node |
102 | * within Scope is returned. | 102 | * within Scope is returned. |
103 | * | 103 | * |
104 | ******************************************************************************/ | 104 | ******************************************************************************/ |
@@ -305,7 +305,7 @@ acpi_ns_walk_namespace(acpi_object_type type, | |||
305 | 305 | ||
306 | /* | 306 | /* |
307 | * Depth first search: Attempt to go down another level in the | 307 | * Depth first search: Attempt to go down another level in the |
308 | * namespace if we are allowed to. Don't go any further if we have | 308 | * namespace if we are allowed to. Don't go any further if we have |
309 | * reached the caller specified maximum depth or if the user | 309 | * reached the caller specified maximum depth or if the user |
310 | * function has specified that the maximum depth has been reached. | 310 | * function has specified that the maximum depth has been reached. |
311 | */ | 311 | */ |
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 9692e6702333..d6a9f77972b6 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
@@ -61,16 +61,16 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info); | |||
61 | * PARAMETERS: handle - Object handle (optional) | 61 | * PARAMETERS: handle - Object handle (optional) |
62 | * pathname - Object pathname (optional) | 62 | * pathname - Object pathname (optional) |
63 | * external_params - List of parameters to pass to method, | 63 | * external_params - List of parameters to pass to method, |
64 | * terminated by NULL. May be NULL | 64 | * terminated by NULL. May be NULL |
65 | * if no parameters are being passed. | 65 | * if no parameters are being passed. |
66 | * return_buffer - Where to put method's return value (if | 66 | * return_buffer - Where to put method's return value (if |
67 | * any). If NULL, no value is returned. | 67 | * any). If NULL, no value is returned. |
68 | * return_type - Expected type of return object | 68 | * return_type - Expected type of return object |
69 | * | 69 | * |
70 | * RETURN: Status | 70 | * RETURN: Status |
71 | * | 71 | * |
72 | * DESCRIPTION: Find and evaluate the given object, passing the given | 72 | * DESCRIPTION: Find and evaluate the given object, passing the given |
73 | * parameters if necessary. One of "Handle" or "Pathname" must | 73 | * parameters if necessary. One of "Handle" or "Pathname" must |
74 | * be valid (non-null) | 74 | * be valid (non-null) |
75 | * | 75 | * |
76 | ******************************************************************************/ | 76 | ******************************************************************************/ |
@@ -155,15 +155,15 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) | |||
155 | * PARAMETERS: handle - Object handle (optional) | 155 | * PARAMETERS: handle - Object handle (optional) |
156 | * pathname - Object pathname (optional) | 156 | * pathname - Object pathname (optional) |
157 | * external_params - List of parameters to pass to method, | 157 | * external_params - List of parameters to pass to method, |
158 | * terminated by NULL. May be NULL | 158 | * terminated by NULL. May be NULL |
159 | * if no parameters are being passed. | 159 | * if no parameters are being passed. |
160 | * return_buffer - Where to put method's return value (if | 160 | * return_buffer - Where to put method's return value (if |
161 | * any). If NULL, no value is returned. | 161 | * any). If NULL, no value is returned. |
162 | * | 162 | * |
163 | * RETURN: Status | 163 | * RETURN: Status |
164 | * | 164 | * |
165 | * DESCRIPTION: Find and evaluate the given object, passing the given | 165 | * DESCRIPTION: Find and evaluate the given object, passing the given |
166 | * parameters if necessary. One of "Handle" or "Pathname" must | 166 | * parameters if necessary. One of "Handle" or "Pathname" must |
167 | * be valid (non-null) | 167 | * be valid (non-null) |
168 | * | 168 | * |
169 | ******************************************************************************/ | 169 | ******************************************************************************/ |
@@ -542,15 +542,15 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, | |||
542 | acpi_status status; | 542 | acpi_status status; |
543 | struct acpi_namespace_node *node; | 543 | struct acpi_namespace_node *node; |
544 | u32 flags; | 544 | u32 flags; |
545 | struct acpica_device_id *hid; | 545 | struct acpi_pnp_device_id *hid; |
546 | struct acpica_device_id_list *cid; | 546 | struct acpi_pnp_device_id_list *cid; |
547 | u32 i; | 547 | u32 i; |
548 | u8 found; | 548 | u8 found; |
549 | int no_match; | 549 | int no_match; |
550 | 550 | ||
551 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 551 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
552 | if (ACPI_FAILURE(status)) { | 552 | if (ACPI_FAILURE(status)) { |
553 | return (status); | 553 | return_ACPI_STATUS(status); |
554 | } | 554 | } |
555 | 555 | ||
556 | node = acpi_ns_validate_handle(obj_handle); | 556 | node = acpi_ns_validate_handle(obj_handle); |
@@ -656,7 +656,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, | |||
656 | * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, | 656 | * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, |
657 | * starting (and ending) at the object specified by start_handle. | 657 | * starting (and ending) at the object specified by start_handle. |
658 | * The user_function is called whenever an object of type | 658 | * The user_function is called whenever an object of type |
659 | * Device is found. If the user function returns | 659 | * Device is found. If the user function returns |
660 | * a non-zero value, the search is terminated immediately and this | 660 | * a non-zero value, the search is terminated immediately and this |
661 | * value is returned to the caller. | 661 | * value is returned to the caller. |
662 | * | 662 | * |
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 08e9610b34ca..811c6f13f476 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c | |||
@@ -53,8 +53,8 @@ | |||
53 | ACPI_MODULE_NAME("nsxfname") | 53 | ACPI_MODULE_NAME("nsxfname") |
54 | 54 | ||
55 | /* Local prototypes */ | 55 | /* Local prototypes */ |
56 | static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | 56 | static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, |
57 | struct acpica_device_id *source, | 57 | struct acpi_pnp_device_id *source, |
58 | char *string_area); | 58 | char *string_area); |
59 | 59 | ||
60 | /****************************************************************************** | 60 | /****************************************************************************** |
@@ -69,8 +69,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | |||
69 | * RETURN: Status | 69 | * RETURN: Status |
70 | * | 70 | * |
71 | * DESCRIPTION: This routine will search for a caller specified name in the | 71 | * DESCRIPTION: This routine will search for a caller specified name in the |
72 | * name space. The caller can restrict the search region by | 72 | * name space. The caller can restrict the search region by |
73 | * specifying a non NULL parent. The parent value is itself a | 73 | * specifying a non NULL parent. The parent value is itself a |
74 | * namespace handle. | 74 | * namespace handle. |
75 | * | 75 | * |
76 | ******************************************************************************/ | 76 | ******************************************************************************/ |
@@ -149,7 +149,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle) | |||
149 | * RETURN: Pointer to a string containing the fully qualified Name. | 149 | * RETURN: Pointer to a string containing the fully qualified Name. |
150 | * | 150 | * |
151 | * DESCRIPTION: This routine returns the fully qualified name associated with | 151 | * DESCRIPTION: This routine returns the fully qualified name associated with |
152 | * the Handle parameter. This and the acpi_pathname_to_handle are | 152 | * the Handle parameter. This and the acpi_pathname_to_handle are |
153 | * complementary functions. | 153 | * complementary functions. |
154 | * | 154 | * |
155 | ******************************************************************************/ | 155 | ******************************************************************************/ |
@@ -202,8 +202,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) | |||
202 | 202 | ||
203 | /* Just copy the ACPI name from the Node and zero terminate it */ | 203 | /* Just copy the ACPI name from the Node and zero terminate it */ |
204 | 204 | ||
205 | ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node), | 205 | ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node)); |
206 | ACPI_NAME_SIZE); | ||
207 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; | 206 | ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; |
208 | status = AE_OK; | 207 | status = AE_OK; |
209 | 208 | ||
@@ -219,20 +218,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_name) | |||
219 | * | 218 | * |
220 | * FUNCTION: acpi_ns_copy_device_id | 219 | * FUNCTION: acpi_ns_copy_device_id |
221 | * | 220 | * |
222 | * PARAMETERS: dest - Pointer to the destination DEVICE_ID | 221 | * PARAMETERS: dest - Pointer to the destination PNP_DEVICE_ID |
223 | * source - Pointer to the source DEVICE_ID | 222 | * source - Pointer to the source PNP_DEVICE_ID |
224 | * string_area - Pointer to where to copy the dest string | 223 | * string_area - Pointer to where to copy the dest string |
225 | * | 224 | * |
226 | * RETURN: Pointer to the next string area | 225 | * RETURN: Pointer to the next string area |
227 | * | 226 | * |
228 | * DESCRIPTION: Copy a single DEVICE_ID, including the string data. | 227 | * DESCRIPTION: Copy a single PNP_DEVICE_ID, including the string data. |
229 | * | 228 | * |
230 | ******************************************************************************/ | 229 | ******************************************************************************/ |
231 | static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | 230 | static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, |
232 | struct acpica_device_id *source, | 231 | struct acpi_pnp_device_id *source, |
233 | char *string_area) | 232 | char *string_area) |
234 | { | 233 | { |
235 | /* Create the destination DEVICE_ID */ | 234 | |
235 | /* Create the destination PNP_DEVICE_ID */ | ||
236 | 236 | ||
237 | dest->string = string_area; | 237 | dest->string = string_area; |
238 | dest->length = source->length; | 238 | dest->length = source->length; |
@@ -256,8 +256,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, | |||
256 | * namespace node and possibly by running several standard | 256 | * namespace node and possibly by running several standard |
257 | * control methods (Such as in the case of a device.) | 257 | * control methods (Such as in the case of a device.) |
258 | * | 258 | * |
259 | * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA, | 259 | * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB, |
260 | * _ADR, _sx_w, and _sx_d methods. | 260 | * _STA, _ADR, _sx_w, and _sx_d methods. |
261 | * | 261 | * |
262 | * Note: Allocates the return buffer, must be freed by the caller. | 262 | * Note: Allocates the return buffer, must be freed by the caller. |
263 | * | 263 | * |
@@ -269,9 +269,10 @@ acpi_get_object_info(acpi_handle handle, | |||
269 | { | 269 | { |
270 | struct acpi_namespace_node *node; | 270 | struct acpi_namespace_node *node; |
271 | struct acpi_device_info *info; | 271 | struct acpi_device_info *info; |
272 | struct acpica_device_id_list *cid_list = NULL; | 272 | struct acpi_pnp_device_id_list *cid_list = NULL; |
273 | struct acpica_device_id *hid = NULL; | 273 | struct acpi_pnp_device_id *hid = NULL; |
274 | struct acpica_device_id *uid = NULL; | 274 | struct acpi_pnp_device_id *uid = NULL; |
275 | struct acpi_pnp_device_id *sub = NULL; | ||
275 | char *next_id_string; | 276 | char *next_id_string; |
276 | acpi_object_type type; | 277 | acpi_object_type type; |
277 | acpi_name name; | 278 | acpi_name name; |
@@ -316,7 +317,7 @@ acpi_get_object_info(acpi_handle handle, | |||
316 | if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { | 317 | if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { |
317 | /* | 318 | /* |
318 | * Get extra info for ACPI Device/Processor objects only: | 319 | * Get extra info for ACPI Device/Processor objects only: |
319 | * Run the Device _HID, _UID, and _CID methods. | 320 | * Run the Device _HID, _UID, _SUB, and _CID methods. |
320 | * | 321 | * |
321 | * Note: none of these methods are required, so they may or may | 322 | * Note: none of these methods are required, so they may or may |
322 | * not be present for this device. The Info->Valid bitfield is used | 323 | * not be present for this device. The Info->Valid bitfield is used |
@@ -339,6 +340,14 @@ acpi_get_object_info(acpi_handle handle, | |||
339 | valid |= ACPI_VALID_UID; | 340 | valid |= ACPI_VALID_UID; |
340 | } | 341 | } |
341 | 342 | ||
343 | /* Execute the Device._SUB method */ | ||
344 | |||
345 | status = acpi_ut_execute_SUB(node, &sub); | ||
346 | if (ACPI_SUCCESS(status)) { | ||
347 | info_size += sub->length; | ||
348 | valid |= ACPI_VALID_SUB; | ||
349 | } | ||
350 | |||
342 | /* Execute the Device._CID method */ | 351 | /* Execute the Device._CID method */ |
343 | 352 | ||
344 | status = acpi_ut_execute_CID(node, &cid_list); | 353 | status = acpi_ut_execute_CID(node, &cid_list); |
@@ -348,7 +357,7 @@ acpi_get_object_info(acpi_handle handle, | |||
348 | 357 | ||
349 | info_size += | 358 | info_size += |
350 | (cid_list->list_size - | 359 | (cid_list->list_size - |
351 | sizeof(struct acpica_device_id_list)); | 360 | sizeof(struct acpi_pnp_device_id_list)); |
352 | valid |= ACPI_VALID_CID; | 361 | valid |= ACPI_VALID_CID; |
353 | } | 362 | } |
354 | } | 363 | } |
@@ -418,16 +427,17 @@ acpi_get_object_info(acpi_handle handle, | |||
418 | next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); | 427 | next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); |
419 | if (cid_list) { | 428 | if (cid_list) { |
420 | 429 | ||
421 | /* Point past the CID DEVICE_ID array */ | 430 | /* Point past the CID PNP_DEVICE_ID array */ |
422 | 431 | ||
423 | next_id_string += | 432 | next_id_string += |
424 | ((acpi_size) cid_list->count * | 433 | ((acpi_size) cid_list->count * |
425 | sizeof(struct acpica_device_id)); | 434 | sizeof(struct acpi_pnp_device_id)); |
426 | } | 435 | } |
427 | 436 | ||
428 | /* | 437 | /* |
429 | * Copy the HID, UID, and CIDs to the return buffer. The variable-length | 438 | * Copy the HID, UID, SUB, and CIDs to the return buffer. |
430 | * strings are copied to the reserved area at the end of the buffer. | 439 | * The variable-length strings are copied to the reserved area |
440 | * at the end of the buffer. | ||
431 | * | 441 | * |
432 | * For HID and CID, check if the ID is a PCI Root Bridge. | 442 | * For HID and CID, check if the ID is a PCI Root Bridge. |
433 | */ | 443 | */ |
@@ -445,6 +455,11 @@ acpi_get_object_info(acpi_handle handle, | |||
445 | uid, next_id_string); | 455 | uid, next_id_string); |
446 | } | 456 | } |
447 | 457 | ||
458 | if (sub) { | ||
459 | next_id_string = acpi_ns_copy_device_id(&info->subsystem_id, | ||
460 | sub, next_id_string); | ||
461 | } | ||
462 | |||
448 | if (cid_list) { | 463 | if (cid_list) { |
449 | info->compatible_id_list.count = cid_list->count; | 464 | info->compatible_id_list.count = cid_list->count; |
450 | info->compatible_id_list.list_size = cid_list->list_size; | 465 | info->compatible_id_list.list_size = cid_list->list_size; |
@@ -481,6 +496,9 @@ acpi_get_object_info(acpi_handle handle, | |||
481 | if (uid) { | 496 | if (uid) { |
482 | ACPI_FREE(uid); | 497 | ACPI_FREE(uid); |
483 | } | 498 | } |
499 | if (sub) { | ||
500 | ACPI_FREE(sub); | ||
501 | } | ||
484 | if (cid_list) { | 502 | if (cid_list) { |
485 | ACPI_FREE(cid_list); | 503 | ACPI_FREE(cid_list); |
486 | } | 504 | } |
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 6766fc4f088f..9d029dac6b64 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c | |||
@@ -220,8 +220,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent) | |||
220 | * | 220 | * |
221 | * RETURN: Status | 221 | * RETURN: Status |
222 | * | 222 | * |
223 | * DESCRIPTION: Return the next peer object within the namespace. If Handle is | 223 | * DESCRIPTION: Return the next peer object within the namespace. If Handle is |
224 | * valid, Scope is ignored. Otherwise, the first object within | 224 | * valid, Scope is ignored. Otherwise, the first object within |
225 | * Scope is returned. | 225 | * Scope is returned. |
226 | * | 226 | * |
227 | ******************************************************************************/ | 227 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 844464c4f901..cb79e2d4d743 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c | |||
@@ -120,7 +120,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) | |||
120 | * RETURN: Pointer to end-of-package +1 | 120 | * RETURN: Pointer to end-of-package +1 |
121 | * | 121 | * |
122 | * DESCRIPTION: Get next package length and return a pointer past the end of | 122 | * DESCRIPTION: Get next package length and return a pointer past the end of |
123 | * the package. Consumes the package length field | 123 | * the package. Consumes the package length field |
124 | * | 124 | * |
125 | ******************************************************************************/ | 125 | ******************************************************************************/ |
126 | 126 | ||
@@ -147,8 +147,8 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state) | |||
147 | * RETURN: Pointer to the start of the name string (pointer points into | 147 | * RETURN: Pointer to the start of the name string (pointer points into |
148 | * the AML. | 148 | * the AML. |
149 | * | 149 | * |
150 | * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name | 150 | * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name |
151 | * prefix characters. Set parser state to point past the string. | 151 | * prefix characters. Set parser state to point past the string. |
152 | * (Name is consumed from the AML.) | 152 | * (Name is consumed from the AML.) |
153 | * | 153 | * |
154 | ******************************************************************************/ | 154 | ******************************************************************************/ |
@@ -220,7 +220,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) | |||
220 | * | 220 | * |
221 | * DESCRIPTION: Get next name (if method call, return # of required args). | 221 | * DESCRIPTION: Get next name (if method call, return # of required args). |
222 | * Names are looked up in the internal namespace to determine | 222 | * Names are looked up in the internal namespace to determine |
223 | * if the name represents a control method. If a method | 223 | * if the name represents a control method. If a method |
224 | * is found, the number of arguments to the method is returned. | 224 | * is found, the number of arguments to the method is returned. |
225 | * This information is critical for parsing to continue correctly. | 225 | * This information is critical for parsing to continue correctly. |
226 | * | 226 | * |
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 799162c1b6df..5607805aab26 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c | |||
@@ -133,18 +133,46 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) | |||
133 | 133 | ||
134 | case AML_CLASS_UNKNOWN: | 134 | case AML_CLASS_UNKNOWN: |
135 | 135 | ||
136 | /* The opcode is unrecognized. Just skip unknown opcodes */ | 136 | /* The opcode is unrecognized. Complain and skip unknown opcodes */ |
137 | 137 | ||
138 | ACPI_ERROR((AE_INFO, | 138 | if (walk_state->pass_number == 2) { |
139 | "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring", | 139 | ACPI_ERROR((AE_INFO, |
140 | walk_state->opcode, walk_state->parser_state.aml, | 140 | "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring", |
141 | walk_state->aml_offset)); | 141 | walk_state->opcode, |
142 | (u32)(walk_state->aml_offset + | ||
143 | sizeof(struct acpi_table_header)))); | ||
142 | 144 | ||
143 | ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128); | 145 | ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48); |
144 | 146 | ||
145 | /* Assume one-byte bad opcode */ | 147 | #ifdef ACPI_ASL_COMPILER |
148 | /* | ||
149 | * This is executed for the disassembler only. Output goes | ||
150 | * to the disassembled ASL output file. | ||
151 | */ | ||
152 | acpi_os_printf | ||
153 | ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n", | ||
154 | walk_state->opcode, | ||
155 | (u32)(walk_state->aml_offset + | ||
156 | sizeof(struct acpi_table_header))); | ||
157 | |||
158 | /* Dump the context surrounding the invalid opcode */ | ||
159 | |||
160 | acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. | ||
161 | aml - 16), 48, DB_BYTE_DISPLAY, | ||
162 | walk_state->aml_offset + | ||
163 | sizeof(struct acpi_table_header) - | ||
164 | 16); | ||
165 | acpi_os_printf(" */\n"); | ||
166 | #endif | ||
167 | } | ||
168 | |||
169 | /* Increment past one-byte or two-byte opcode */ | ||
146 | 170 | ||
147 | walk_state->parser_state.aml++; | 171 | walk_state->parser_state.aml++; |
172 | if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */ | ||
173 | walk_state->parser_state.aml++; | ||
174 | } | ||
175 | |||
148 | return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); | 176 | return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); |
149 | 177 | ||
150 | default: | 178 | default: |
@@ -519,11 +547,18 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, | |||
519 | if ((op_info->class == | 547 | if ((op_info->class == |
520 | AML_CLASS_EXECUTE) && (!arg)) { | 548 | AML_CLASS_EXECUTE) && (!arg)) { |
521 | ACPI_WARNING((AE_INFO, | 549 | ACPI_WARNING((AE_INFO, |
522 | "Detected an unsupported executable opcode " | 550 | "Unsupported module-level executable opcode " |
523 | "at module-level: [0x%.4X] at table offset 0x%.4X", | 551 | "0x%.2X at table offset 0x%.4X", |
524 | op->common.aml_opcode, | 552 | op->common. |
525 | (u32)((aml_op_start - walk_state->parser_state.aml_start) | 553 | aml_opcode, |
526 | + sizeof(struct acpi_table_header)))); | 554 | (u32) |
555 | (ACPI_PTR_DIFF | ||
556 | (aml_op_start, | ||
557 | walk_state-> | ||
558 | parser_state. | ||
559 | aml_start) + | ||
560 | sizeof(struct | ||
561 | acpi_table_header)))); | ||
527 | } | 562 | } |
528 | } | 563 | } |
529 | break; | 564 | break; |
@@ -843,8 +878,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, | |||
843 | *op = NULL; | 878 | *op = NULL; |
844 | } | 879 | } |
845 | 880 | ||
846 | ACPI_PREEMPTION_POINT(); | ||
847 | |||
848 | return_ACPI_STATUS(AE_OK); | 881 | return_ACPI_STATUS(AE_OK); |
849 | } | 882 | } |
850 | 883 | ||
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index ed1d457bd5ca..1793d934aa30 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c | |||
@@ -59,7 +59,7 @@ static const u8 acpi_gbl_argument_count[] = | |||
59 | * | 59 | * |
60 | * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands> | 60 | * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands> |
61 | * The name is a simple ascii string, the operand specifier is an | 61 | * The name is a simple ascii string, the operand specifier is an |
62 | * ascii string with one letter per operand. The letter specifies | 62 | * ascii string with one letter per operand. The letter specifies |
63 | * the operand type. | 63 | * the operand type. |
64 | * | 64 | * |
65 | ******************************************************************************/ | 65 | ******************************************************************************/ |
@@ -183,7 +183,7 @@ static const u8 acpi_gbl_argument_count[] = | |||
183 | ******************************************************************************/ | 183 | ******************************************************************************/ |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Master Opcode information table. A summary of everything we know about each | 186 | * Master Opcode information table. A summary of everything we know about each |
187 | * opcode, all in one place. | 187 | * opcode, all in one place. |
188 | */ | 188 | */ |
189 | const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | 189 | const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { |
@@ -392,10 +392,12 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
392 | AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), | 392 | AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), |
393 | /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, | 393 | /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, |
394 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, | 394 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, |
395 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), | 395 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | |
396 | AML_CONSTANT), | ||
396 | /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, | 397 | /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, |
397 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, | 398 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, |
398 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), | 399 | AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | |
400 | AML_CONSTANT), | ||
399 | /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, | 401 | /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, |
400 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, | 402 | AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, |
401 | AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), | 403 | AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), |
@@ -495,7 +497,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
495 | AML_NSNODE | AML_NAMED | AML_DEFER), | 497 | AML_NSNODE | AML_NAMED | AML_DEFER), |
496 | /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, | 498 | /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, |
497 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, | 499 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, |
498 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), | 500 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
501 | AML_FIELD), | ||
499 | /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, | 502 | /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, |
500 | ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, | 503 | ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, |
501 | AML_TYPE_NAMED_NO_OBJ, | 504 | AML_TYPE_NAMED_NO_OBJ, |
@@ -519,12 +522,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
519 | /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, | 522 | /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, |
520 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, | 523 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, |
521 | AML_TYPE_NAMED_FIELD, | 524 | AML_TYPE_NAMED_FIELD, |
522 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), | 525 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
526 | AML_FIELD), | ||
523 | /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, | 527 | /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, |
524 | ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT, | 528 | ACPI_TYPE_LOCAL_BANK_FIELD, |
525 | AML_TYPE_NAMED_FIELD, | 529 | AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, |
526 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD | | 530 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
527 | AML_DEFER), | 531 | AML_FIELD | AML_DEFER), |
528 | 532 | ||
529 | /* Internal opcodes that map to invalid AML opcodes */ | 533 | /* Internal opcodes that map to invalid AML opcodes */ |
530 | 534 | ||
@@ -632,7 +636,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { | |||
632 | /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, | 636 | /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, |
633 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, | 637 | ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, |
634 | AML_TYPE_NAMED_NO_OBJ, | 638 | AML_TYPE_NAMED_NO_OBJ, |
635 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE), | 639 | AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | |
640 | AML_NSNODE), | ||
636 | 641 | ||
637 | /* ACPI 3.0 opcodes */ | 642 | /* ACPI 3.0 opcodes */ |
638 | 643 | ||
@@ -695,7 +700,7 @@ static const u8 acpi_gbl_short_op_index[256] = { | |||
695 | 700 | ||
696 | /* | 701 | /* |
697 | * This table is indexed by the second opcode of the extended opcode | 702 | * This table is indexed by the second opcode of the extended opcode |
698 | * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) | 703 | * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) |
699 | */ | 704 | */ |
700 | static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { | 705 | static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { |
701 | /* 0 1 2 3 4 5 6 7 */ | 706 | /* 0 1 2 3 4 5 6 7 */ |
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 01985703bb98..2494caf47755 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c | |||
@@ -43,9 +43,9 @@ | |||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Parse the AML and build an operation tree as most interpreters, | 45 | * Parse the AML and build an operation tree as most interpreters, |
46 | * like Perl, do. Parsing is done by hand rather than with a YACC | 46 | * like Perl, do. Parsing is done by hand rather than with a YACC |
47 | * generated parser to tightly constrain stack and dynamic memory | 47 | * generated parser to tightly constrain stack and dynamic memory |
48 | * usage. At the same time, parsing is kept flexible and the code | 48 | * usage. At the same time, parsing is kept flexible and the code |
49 | * fairly compact by parsing based on a list of AML opcode | 49 | * fairly compact by parsing based on a list of AML opcode |
50 | * templates in aml_op_info[] | 50 | * templates in aml_op_info[] |
51 | */ | 51 | */ |
@@ -379,7 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state, | |||
379 | case AE_CTRL_FALSE: | 379 | case AE_CTRL_FALSE: |
380 | /* | 380 | /* |
381 | * Either an IF/WHILE Predicate was false or we encountered a BREAK | 381 | * Either an IF/WHILE Predicate was false or we encountered a BREAK |
382 | * opcode. In both cases, we do not execute the rest of the | 382 | * opcode. In both cases, we do not execute the rest of the |
383 | * package; We simply close out the parent (finishing the walk of | 383 | * package; We simply close out the parent (finishing the walk of |
384 | * this branch of the tree) and continue execution at the parent | 384 | * this branch of the tree) and continue execution at the parent |
385 | * level. | 385 | * level. |
@@ -459,8 +459,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
459 | 459 | ||
460 | /* Executing a control method - additional cleanup */ | 460 | /* Executing a control method - additional cleanup */ |
461 | 461 | ||
462 | acpi_ds_terminate_control_method( | 462 | acpi_ds_terminate_control_method(walk_state-> |
463 | walk_state->method_desc, walk_state); | 463 | method_desc, |
464 | walk_state); | ||
464 | } | 465 | } |
465 | 466 | ||
466 | acpi_ds_delete_walk_state(walk_state); | 467 | acpi_ds_delete_walk_state(walk_state); |
@@ -487,7 +488,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
487 | acpi_gbl_current_walk_list = thread; | 488 | acpi_gbl_current_walk_list = thread; |
488 | 489 | ||
489 | /* | 490 | /* |
490 | * Execute the walk loop as long as there is a valid Walk State. This | 491 | * Execute the walk loop as long as there is a valid Walk State. This |
491 | * handles nested control method invocations without recursion. | 492 | * handles nested control method invocations without recursion. |
492 | */ | 493 | */ |
493 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state)); | 494 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state)); |
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 8736ad5f04d3..4137dcb352d1 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c | |||
@@ -108,7 +108,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode) | |||
108 | * RETURN: Pointer to the new Op, null on failure | 108 | * RETURN: Pointer to the new Op, null on failure |
109 | * | 109 | * |
110 | * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on | 110 | * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on |
111 | * opcode. A cache of opcodes is available for the pure | 111 | * opcode. A cache of opcodes is available for the pure |
112 | * GENERIC_OP, since this is by far the most commonly used. | 112 | * GENERIC_OP, since this is by far the most commonly used. |
113 | * | 113 | * |
114 | ******************************************************************************/ | 114 | ******************************************************************************/ |
@@ -164,7 +164,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) | |||
164 | * | 164 | * |
165 | * RETURN: None. | 165 | * RETURN: None. |
166 | * | 166 | * |
167 | * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list | 167 | * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list |
168 | * or actually free it. | 168 | * or actually free it. |
169 | * | 169 | * |
170 | ******************************************************************************/ | 170 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index de12469d1c9c..147feb6aa2a0 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c | |||
@@ -457,6 +457,15 @@ acpi_rs_get_list_length(u8 * aml_buffer, | |||
457 | * Get the number of vendor data bytes | 457 | * Get the number of vendor data bytes |
458 | */ | 458 | */ |
459 | extra_struct_bytes = resource_length; | 459 | extra_struct_bytes = resource_length; |
460 | |||
461 | /* | ||
462 | * There is already one byte included in the minimum | ||
463 | * descriptor size. If there are extra struct bytes, | ||
464 | * subtract one from the count. | ||
465 | */ | ||
466 | if (extra_struct_bytes) { | ||
467 | extra_struct_bytes--; | ||
468 | } | ||
460 | break; | 469 | break; |
461 | 470 | ||
462 | case ACPI_RESOURCE_NAME_END_TAG: | 471 | case ACPI_RESOURCE_NAME_END_TAG: |
@@ -601,7 +610,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, | |||
601 | /* | 610 | /* |
602 | * Calculate the size of the return buffer. | 611 | * Calculate the size of the return buffer. |
603 | * The base size is the number of elements * the sizes of the | 612 | * The base size is the number of elements * the sizes of the |
604 | * structures. Additional space for the strings is added below. | 613 | * structures. Additional space for the strings is added below. |
605 | * The minus one is to subtract the size of the u8 Source[1] | 614 | * The minus one is to subtract the size of the u8 Source[1] |
606 | * member because it is added below. | 615 | * member because it is added below. |
607 | * | 616 | * |
@@ -664,8 +673,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, | |||
664 | (*sub_object_list)->string. | 673 | (*sub_object_list)->string. |
665 | length + 1); | 674 | length + 1); |
666 | } else { | 675 | } else { |
667 | temp_size_needed += | 676 | temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); |
668 | acpi_ns_get_pathname_length((*sub_object_list)->reference.node); | ||
669 | } | 677 | } |
670 | } else { | 678 | } else { |
671 | /* | 679 | /* |
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 46b5324b22d6..8b64db9a3fd2 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c | |||
@@ -109,7 +109,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, | |||
109 | ACPI_ERROR((AE_INFO, | 109 | ACPI_ERROR((AE_INFO, |
110 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", | 110 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", |
111 | resource_index)); | 111 | resource_index)); |
112 | return (AE_AML_INVALID_RESOURCE_TYPE); | 112 | return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Convert the AML byte stream resource to a local resource struct */ | 115 | /* Convert the AML byte stream resource to a local resource struct */ |
@@ -200,7 +200,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, | |||
200 | ACPI_ERROR((AE_INFO, | 200 | ACPI_ERROR((AE_INFO, |
201 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", | 201 | "Invalid/unsupported resource descriptor: Type 0x%2.2X", |
202 | resource->type)); | 202 | resource->type)); |
203 | return (AE_AML_INVALID_RESOURCE_TYPE); | 203 | return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); |
204 | } | 204 | } |
205 | 205 | ||
206 | status = acpi_rs_convert_resource_to_aml(resource, | 206 | status = acpi_rs_convert_resource_to_aml(resource, |
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index 57deae166577..77d1db29a725 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c | |||
@@ -77,7 +77,7 @@ acpi_tb_find_table(char *signature, | |||
77 | /* Normalize the input strings */ | 77 | /* Normalize the input strings */ |
78 | 78 | ||
79 | ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); | 79 | ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); |
80 | ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE); | 80 | ACPI_MOVE_NAME(header.signature, signature); |
81 | ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); | 81 | ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); |
82 | ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); | 82 | ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); |
83 | 83 | ||
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 70f9d787c82c..f540ae462925 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -526,6 +526,8 @@ void acpi_tb_terminate(void) | |||
526 | 526 | ||
527 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); | 527 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); |
528 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 528 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
529 | |||
530 | return_VOID; | ||
529 | } | 531 | } |
530 | 532 | ||
531 | /******************************************************************************* | 533 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index b6cea30da638..285e24b97382 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -354,7 +354,7 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length) | |||
354 | sum = (u8) (sum + *(buffer++)); | 354 | sum = (u8) (sum + *(buffer++)); |
355 | } | 355 | } |
356 | 356 | ||
357 | return sum; | 357 | return (sum); |
358 | } | 358 | } |
359 | 359 | ||
360 | /******************************************************************************* | 360 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 21101262e47a..f5632780421d 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -236,7 +236,7 @@ acpi_get_table_header(char *signature, | |||
236 | sizeof(struct | 236 | sizeof(struct |
237 | acpi_table_header)); | 237 | acpi_table_header)); |
238 | if (!header) { | 238 | if (!header) { |
239 | return AE_NO_MEMORY; | 239 | return (AE_NO_MEMORY); |
240 | } | 240 | } |
241 | ACPI_MEMCPY(out_table_header, header, | 241 | ACPI_MEMCPY(out_table_header, header, |
242 | sizeof(struct acpi_table_header)); | 242 | sizeof(struct acpi_table_header)); |
@@ -244,7 +244,7 @@ acpi_get_table_header(char *signature, | |||
244 | sizeof(struct | 244 | sizeof(struct |
245 | acpi_table_header)); | 245 | acpi_table_header)); |
246 | } else { | 246 | } else { |
247 | return AE_NOT_FOUND; | 247 | return (AE_NOT_FOUND); |
248 | } | 248 | } |
249 | } else { | 249 | } else { |
250 | ACPI_MEMCPY(out_table_header, | 250 | ACPI_MEMCPY(out_table_header, |
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index f87cc63e69a1..a5e1e4e47098 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c | |||
@@ -211,7 +211,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
211 | * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must | 211 | * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must |
212 | * be a valid ACPI table with a valid ACPI table header. | 212 | * be a valid ACPI table with a valid ACPI table header. |
213 | * Note1: Mainly intended to support hotplug addition of SSDTs. | 213 | * Note1: Mainly intended to support hotplug addition of SSDTs. |
214 | * Note2: Does not copy the incoming table. User is reponsible | 214 | * Note2: Does not copy the incoming table. User is responsible |
215 | * to ensure that the table is not deleted or unmapped. | 215 | * to ensure that the table is not deleted or unmapped. |
216 | * | 216 | * |
217 | ******************************************************************************/ | 217 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 74e720800037..28f330230f99 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c | |||
@@ -67,7 +67,6 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); | |||
67 | 67 | ||
68 | static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | 68 | static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) |
69 | { | 69 | { |
70 | ACPI_FUNCTION_ENTRY(); | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * The signature and checksum must both be correct | 72 | * The signature and checksum must both be correct |
@@ -108,7 +107,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) | |||
108 | * RETURN: Status, RSDP physical address | 107 | * RETURN: Status, RSDP physical address |
109 | * | 108 | * |
110 | * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor | 109 | * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor |
111 | * pointer structure. If it is found, set *RSDP to point to it. | 110 | * pointer structure. If it is found, set *RSDP to point to it. |
112 | * | 111 | * |
113 | * NOTE1: The RSDP must be either in the first 1K of the Extended | 112 | * NOTE1: The RSDP must be either in the first 1K of the Extended |
114 | * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) | 113 | * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) |
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c new file mode 100644 index 000000000000..e1d40ed26390 --- /dev/null +++ b/drivers/acpi/acpica/utcache.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: utcache - local cache allocation routines | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | |||
47 | #define _COMPONENT ACPI_UTILITIES | ||
48 | ACPI_MODULE_NAME("utcache") | ||
49 | |||
50 | #ifdef ACPI_USE_LOCAL_CACHE | ||
51 | /******************************************************************************* | ||
52 | * | ||
53 | * FUNCTION: acpi_os_create_cache | ||
54 | * | ||
55 | * PARAMETERS: cache_name - Ascii name for the cache | ||
56 | * object_size - Size of each cached object | ||
57 | * max_depth - Maximum depth of the cache (in objects) | ||
58 | * return_cache - Where the new cache object is returned | ||
59 | * | ||
60 | * RETURN: Status | ||
61 | * | ||
62 | * DESCRIPTION: Create a cache object | ||
63 | * | ||
64 | ******************************************************************************/ | ||
65 | acpi_status | ||
66 | acpi_os_create_cache(char *cache_name, | ||
67 | u16 object_size, | ||
68 | u16 max_depth, struct acpi_memory_list ** return_cache) | ||
69 | { | ||
70 | struct acpi_memory_list *cache; | ||
71 | |||
72 | ACPI_FUNCTION_ENTRY(); | ||
73 | |||
74 | if (!cache_name || !return_cache || (object_size < 16)) { | ||
75 | return (AE_BAD_PARAMETER); | ||
76 | } | ||
77 | |||
78 | /* Create the cache object */ | ||
79 | |||
80 | cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); | ||
81 | if (!cache) { | ||
82 | return (AE_NO_MEMORY); | ||
83 | } | ||
84 | |||
85 | /* Populate the cache object and return it */ | ||
86 | |||
87 | ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); | ||
88 | cache->link_offset = 8; | ||
89 | cache->list_name = cache_name; | ||
90 | cache->object_size = object_size; | ||
91 | cache->max_depth = max_depth; | ||
92 | |||
93 | *return_cache = cache; | ||
94 | return (AE_OK); | ||
95 | } | ||
96 | |||
97 | /******************************************************************************* | ||
98 | * | ||
99 | * FUNCTION: acpi_os_purge_cache | ||
100 | * | ||
101 | * PARAMETERS: cache - Handle to cache object | ||
102 | * | ||
103 | * RETURN: Status | ||
104 | * | ||
105 | * DESCRIPTION: Free all objects within the requested cache. | ||
106 | * | ||
107 | ******************************************************************************/ | ||
108 | |||
109 | acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) | ||
110 | { | ||
111 | char *next; | ||
112 | acpi_status status; | ||
113 | |||
114 | ACPI_FUNCTION_ENTRY(); | ||
115 | |||
116 | if (!cache) { | ||
117 | return (AE_BAD_PARAMETER); | ||
118 | } | ||
119 | |||
120 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
121 | if (ACPI_FAILURE(status)) { | ||
122 | return (status); | ||
123 | } | ||
124 | |||
125 | /* Walk the list of objects in this cache */ | ||
126 | |||
127 | while (cache->list_head) { | ||
128 | |||
129 | /* Delete and unlink one cached state object */ | ||
130 | |||
131 | next = *(ACPI_CAST_INDIRECT_PTR(char, | ||
132 | &(((char *)cache-> | ||
133 | list_head)[cache-> | ||
134 | link_offset]))); | ||
135 | ACPI_FREE(cache->list_head); | ||
136 | |||
137 | cache->list_head = next; | ||
138 | cache->current_depth--; | ||
139 | } | ||
140 | |||
141 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
142 | return (AE_OK); | ||
143 | } | ||
144 | |||
145 | /******************************************************************************* | ||
146 | * | ||
147 | * FUNCTION: acpi_os_delete_cache | ||
148 | * | ||
149 | * PARAMETERS: cache - Handle to cache object | ||
150 | * | ||
151 | * RETURN: Status | ||
152 | * | ||
153 | * DESCRIPTION: Free all objects within the requested cache and delete the | ||
154 | * cache object. | ||
155 | * | ||
156 | ******************************************************************************/ | ||
157 | |||
158 | acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache) | ||
159 | { | ||
160 | acpi_status status; | ||
161 | |||
162 | ACPI_FUNCTION_ENTRY(); | ||
163 | |||
164 | /* Purge all objects in the cache */ | ||
165 | |||
166 | status = acpi_os_purge_cache(cache); | ||
167 | if (ACPI_FAILURE(status)) { | ||
168 | return (status); | ||
169 | } | ||
170 | |||
171 | /* Now we can delete the cache object */ | ||
172 | |||
173 | acpi_os_free(cache); | ||
174 | return (AE_OK); | ||
175 | } | ||
176 | |||
177 | /******************************************************************************* | ||
178 | * | ||
179 | * FUNCTION: acpi_os_release_object | ||
180 | * | ||
181 | * PARAMETERS: cache - Handle to cache object | ||
182 | * object - The object to be released | ||
183 | * | ||
184 | * RETURN: None | ||
185 | * | ||
186 | * DESCRIPTION: Release an object to the specified cache. If cache is full, | ||
187 | * the object is deleted. | ||
188 | * | ||
189 | ******************************************************************************/ | ||
190 | |||
191 | acpi_status | ||
192 | acpi_os_release_object(struct acpi_memory_list * cache, void *object) | ||
193 | { | ||
194 | acpi_status status; | ||
195 | |||
196 | ACPI_FUNCTION_ENTRY(); | ||
197 | |||
198 | if (!cache || !object) { | ||
199 | return (AE_BAD_PARAMETER); | ||
200 | } | ||
201 | |||
202 | /* If cache is full, just free this object */ | ||
203 | |||
204 | if (cache->current_depth >= cache->max_depth) { | ||
205 | ACPI_FREE(object); | ||
206 | ACPI_MEM_TRACKING(cache->total_freed++); | ||
207 | } | ||
208 | |||
209 | /* Otherwise put this object back into the cache */ | ||
210 | |||
211 | else { | ||
212 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
213 | if (ACPI_FAILURE(status)) { | ||
214 | return (status); | ||
215 | } | ||
216 | |||
217 | /* Mark the object as cached */ | ||
218 | |||
219 | ACPI_MEMSET(object, 0xCA, cache->object_size); | ||
220 | ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED); | ||
221 | |||
222 | /* Put the object at the head of the cache list */ | ||
223 | |||
224 | *(ACPI_CAST_INDIRECT_PTR(char, | ||
225 | &(((char *)object)[cache-> | ||
226 | link_offset]))) = | ||
227 | cache->list_head; | ||
228 | cache->list_head = object; | ||
229 | cache->current_depth++; | ||
230 | |||
231 | (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
232 | } | ||
233 | |||
234 | return (AE_OK); | ||
235 | } | ||
236 | |||
237 | /******************************************************************************* | ||
238 | * | ||
239 | * FUNCTION: acpi_os_acquire_object | ||
240 | * | ||
241 | * PARAMETERS: cache - Handle to cache object | ||
242 | * | ||
243 | * RETURN: the acquired object. NULL on error | ||
244 | * | ||
245 | * DESCRIPTION: Get an object from the specified cache. If cache is empty, | ||
246 | * the object is allocated. | ||
247 | * | ||
248 | ******************************************************************************/ | ||
249 | |||
250 | void *acpi_os_acquire_object(struct acpi_memory_list *cache) | ||
251 | { | ||
252 | acpi_status status; | ||
253 | void *object; | ||
254 | |||
255 | ACPI_FUNCTION_NAME(os_acquire_object); | ||
256 | |||
257 | if (!cache) { | ||
258 | return (NULL); | ||
259 | } | ||
260 | |||
261 | status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); | ||
262 | if (ACPI_FAILURE(status)) { | ||
263 | return (NULL); | ||
264 | } | ||
265 | |||
266 | ACPI_MEM_TRACKING(cache->requests++); | ||
267 | |||
268 | /* Check the cache first */ | ||
269 | |||
270 | if (cache->list_head) { | ||
271 | |||
272 | /* There is an object available, use it */ | ||
273 | |||
274 | object = cache->list_head; | ||
275 | cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char, | ||
276 | &(((char *) | ||
277 | object)[cache-> | ||
278 | link_offset]))); | ||
279 | |||
280 | cache->current_depth--; | ||
281 | |||
282 | ACPI_MEM_TRACKING(cache->hits++); | ||
283 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
284 | "Object %p from %s cache\n", object, | ||
285 | cache->list_name)); | ||
286 | |||
287 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
288 | if (ACPI_FAILURE(status)) { | ||
289 | return (NULL); | ||
290 | } | ||
291 | |||
292 | /* Clear (zero) the previously used Object */ | ||
293 | |||
294 | ACPI_MEMSET(object, 0, cache->object_size); | ||
295 | } else { | ||
296 | /* The cache is empty, create a new object */ | ||
297 | |||
298 | ACPI_MEM_TRACKING(cache->total_allocated++); | ||
299 | |||
300 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
301 | if ((cache->total_allocated - cache->total_freed) > | ||
302 | cache->max_occupied) { | ||
303 | cache->max_occupied = | ||
304 | cache->total_allocated - cache->total_freed; | ||
305 | } | ||
306 | #endif | ||
307 | |||
308 | /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ | ||
309 | |||
310 | status = acpi_ut_release_mutex(ACPI_MTX_CACHES); | ||
311 | if (ACPI_FAILURE(status)) { | ||
312 | return (NULL); | ||
313 | } | ||
314 | |||
315 | object = ACPI_ALLOCATE_ZEROED(cache->object_size); | ||
316 | if (!object) { | ||
317 | return (NULL); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | return (object); | ||
322 | } | ||
323 | #endif /* ACPI_USE_LOCAL_CACHE */ | ||
diff --git a/drivers/acpi/acpica/utclib.c b/drivers/acpi/acpica/utclib.c new file mode 100644 index 000000000000..19ea4755aa73 --- /dev/null +++ b/drivers/acpi/acpica/utclib.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: cmclib - Local implementation of C library functions | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | |||
47 | /* | ||
48 | * These implementations of standard C Library routines can optionally be | ||
49 | * used if a C library is not available. In general, they are less efficient | ||
50 | * than an inline or assembly implementation | ||
51 | */ | ||
52 | |||
53 | #define _COMPONENT ACPI_UTILITIES | ||
54 | ACPI_MODULE_NAME("cmclib") | ||
55 | |||
56 | #ifndef ACPI_USE_SYSTEM_CLIBRARY | ||
57 | #define NEGATIVE 1 | ||
58 | #define POSITIVE 0 | ||
59 | /******************************************************************************* | ||
60 | * | ||
61 | * FUNCTION: acpi_ut_memcmp (memcmp) | ||
62 | * | ||
63 | * PARAMETERS: buffer1 - First Buffer | ||
64 | * buffer2 - Second Buffer | ||
65 | * count - Maximum # of bytes to compare | ||
66 | * | ||
67 | * RETURN: Index where Buffers mismatched, or 0 if Buffers matched | ||
68 | * | ||
69 | * DESCRIPTION: Compare two Buffers, with a maximum length | ||
70 | * | ||
71 | ******************************************************************************/ | ||
72 | int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count) | ||
73 | { | ||
74 | |||
75 | return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*buffer1 - | ||
76 | (unsigned char)*buffer2)); | ||
77 | } | ||
78 | |||
79 | /******************************************************************************* | ||
80 | * | ||
81 | * FUNCTION: acpi_ut_memcpy (memcpy) | ||
82 | * | ||
83 | * PARAMETERS: dest - Target of the copy | ||
84 | * src - Source buffer to copy | ||
85 | * count - Number of bytes to copy | ||
86 | * | ||
87 | * RETURN: Dest | ||
88 | * | ||
89 | * DESCRIPTION: Copy arbitrary bytes of memory | ||
90 | * | ||
91 | ******************************************************************************/ | ||
92 | |||
93 | void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count) | ||
94 | { | ||
95 | char *new = (char *)dest; | ||
96 | char *old = (char *)src; | ||
97 | |||
98 | while (count) { | ||
99 | *new = *old; | ||
100 | new++; | ||
101 | old++; | ||
102 | count--; | ||
103 | } | ||
104 | |||
105 | return (dest); | ||
106 | } | ||
107 | |||
108 | /******************************************************************************* | ||
109 | * | ||
110 | * FUNCTION: acpi_ut_memset (memset) | ||
111 | * | ||
112 | * PARAMETERS: dest - Buffer to set | ||
113 | * value - Value to set each byte of memory | ||
114 | * count - Number of bytes to set | ||
115 | * | ||
116 | * RETURN: Dest | ||
117 | * | ||
118 | * DESCRIPTION: Initialize a buffer to a known value. | ||
119 | * | ||
120 | ******************************************************************************/ | ||
121 | |||
122 | void *acpi_ut_memset(void *dest, u8 value, acpi_size count) | ||
123 | { | ||
124 | char *new = (char *)dest; | ||
125 | |||
126 | while (count) { | ||
127 | *new = (char)value; | ||
128 | new++; | ||
129 | count--; | ||
130 | } | ||
131 | |||
132 | return (dest); | ||
133 | } | ||
134 | |||
135 | /******************************************************************************* | ||
136 | * | ||
137 | * FUNCTION: acpi_ut_strlen (strlen) | ||
138 | * | ||
139 | * PARAMETERS: string - Null terminated string | ||
140 | * | ||
141 | * RETURN: Length | ||
142 | * | ||
143 | * DESCRIPTION: Returns the length of the input string | ||
144 | * | ||
145 | ******************************************************************************/ | ||
146 | |||
147 | acpi_size acpi_ut_strlen(const char *string) | ||
148 | { | ||
149 | u32 length = 0; | ||
150 | |||
151 | /* Count the string until a null is encountered */ | ||
152 | |||
153 | while (*string) { | ||
154 | length++; | ||
155 | string++; | ||
156 | } | ||
157 | |||
158 | return (length); | ||
159 | } | ||
160 | |||
161 | /******************************************************************************* | ||
162 | * | ||
163 | * FUNCTION: acpi_ut_strcpy (strcpy) | ||
164 | * | ||
165 | * PARAMETERS: dst_string - Target of the copy | ||
166 | * src_string - The source string to copy | ||
167 | * | ||
168 | * RETURN: dst_string | ||
169 | * | ||
170 | * DESCRIPTION: Copy a null terminated string | ||
171 | * | ||
172 | ******************************************************************************/ | ||
173 | |||
174 | char *acpi_ut_strcpy(char *dst_string, const char *src_string) | ||
175 | { | ||
176 | char *string = dst_string; | ||
177 | |||
178 | /* Move bytes brute force */ | ||
179 | |||
180 | while (*src_string) { | ||
181 | *string = *src_string; | ||
182 | |||
183 | string++; | ||
184 | src_string++; | ||
185 | } | ||
186 | |||
187 | /* Null terminate */ | ||
188 | |||
189 | *string = 0; | ||
190 | return (dst_string); | ||
191 | } | ||
192 | |||
193 | /******************************************************************************* | ||
194 | * | ||
195 | * FUNCTION: acpi_ut_strncpy (strncpy) | ||
196 | * | ||
197 | * PARAMETERS: dst_string - Target of the copy | ||
198 | * src_string - The source string to copy | ||
199 | * count - Maximum # of bytes to copy | ||
200 | * | ||
201 | * RETURN: dst_string | ||
202 | * | ||
203 | * DESCRIPTION: Copy a null terminated string, with a maximum length | ||
204 | * | ||
205 | ******************************************************************************/ | ||
206 | |||
207 | char *acpi_ut_strncpy(char *dst_string, const char *src_string, acpi_size count) | ||
208 | { | ||
209 | char *string = dst_string; | ||
210 | |||
211 | /* Copy the string */ | ||
212 | |||
213 | for (string = dst_string; | ||
214 | count && (count--, (*string++ = *src_string++));) {; | ||
215 | } | ||
216 | |||
217 | /* Pad with nulls if necessary */ | ||
218 | |||
219 | while (count--) { | ||
220 | *string = 0; | ||
221 | string++; | ||
222 | } | ||
223 | |||
224 | /* Return original pointer */ | ||
225 | |||
226 | return (dst_string); | ||
227 | } | ||
228 | |||
229 | /******************************************************************************* | ||
230 | * | ||
231 | * FUNCTION: acpi_ut_strcmp (strcmp) | ||
232 | * | ||
233 | * PARAMETERS: string1 - First string | ||
234 | * string2 - Second string | ||
235 | * | ||
236 | * RETURN: Index where strings mismatched, or 0 if strings matched | ||
237 | * | ||
238 | * DESCRIPTION: Compare two null terminated strings | ||
239 | * | ||
240 | ******************************************************************************/ | ||
241 | |||
242 | int acpi_ut_strcmp(const char *string1, const char *string2) | ||
243 | { | ||
244 | |||
245 | for (; (*string1 == *string2); string2++) { | ||
246 | if (!*string1++) { | ||
247 | return (0); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | return ((unsigned char)*string1 - (unsigned char)*string2); | ||
252 | } | ||
253 | |||
254 | #ifdef ACPI_FUTURE_IMPLEMENTATION | ||
255 | /* Not used at this time */ | ||
256 | /******************************************************************************* | ||
257 | * | ||
258 | * FUNCTION: acpi_ut_strchr (strchr) | ||
259 | * | ||
260 | * PARAMETERS: string - Search string | ||
261 | * ch - character to search for | ||
262 | * | ||
263 | * RETURN: Ptr to char or NULL if not found | ||
264 | * | ||
265 | * DESCRIPTION: Search a string for a character | ||
266 | * | ||
267 | ******************************************************************************/ | ||
268 | |||
269 | char *acpi_ut_strchr(const char *string, int ch) | ||
270 | { | ||
271 | |||
272 | for (; (*string); string++) { | ||
273 | if ((*string) == (char)ch) { | ||
274 | return ((char *)string); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | return (NULL); | ||
279 | } | ||
280 | #endif | ||
281 | |||
282 | /******************************************************************************* | ||
283 | * | ||
284 | * FUNCTION: acpi_ut_strncmp (strncmp) | ||
285 | * | ||
286 | * PARAMETERS: string1 - First string | ||
287 | * string2 - Second string | ||
288 | * count - Maximum # of bytes to compare | ||
289 | * | ||
290 | * RETURN: Index where strings mismatched, or 0 if strings matched | ||
291 | * | ||
292 | * DESCRIPTION: Compare two null terminated strings, with a maximum length | ||
293 | * | ||
294 | ******************************************************************************/ | ||
295 | |||
296 | int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count) | ||
297 | { | ||
298 | |||
299 | for (; count-- && (*string1 == *string2); string2++) { | ||
300 | if (!*string1++) { | ||
301 | return (0); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*string1 - | ||
306 | (unsigned char)*string2)); | ||
307 | } | ||
308 | |||
309 | /******************************************************************************* | ||
310 | * | ||
311 | * FUNCTION: acpi_ut_strcat (Strcat) | ||
312 | * | ||
313 | * PARAMETERS: dst_string - Target of the copy | ||
314 | * src_string - The source string to copy | ||
315 | * | ||
316 | * RETURN: dst_string | ||
317 | * | ||
318 | * DESCRIPTION: Append a null terminated string to a null terminated string | ||
319 | * | ||
320 | ******************************************************************************/ | ||
321 | |||
322 | char *acpi_ut_strcat(char *dst_string, const char *src_string) | ||
323 | { | ||
324 | char *string; | ||
325 | |||
326 | /* Find end of the destination string */ | ||
327 | |||
328 | for (string = dst_string; *string++;) {; | ||
329 | } | ||
330 | |||
331 | /* Concatenate the string */ | ||
332 | |||
333 | for (--string; (*string++ = *src_string++);) {; | ||
334 | } | ||
335 | |||
336 | return (dst_string); | ||
337 | } | ||
338 | |||
339 | /******************************************************************************* | ||
340 | * | ||
341 | * FUNCTION: acpi_ut_strncat (strncat) | ||
342 | * | ||
343 | * PARAMETERS: dst_string - Target of the copy | ||
344 | * src_string - The source string to copy | ||
345 | * count - Maximum # of bytes to copy | ||
346 | * | ||
347 | * RETURN: dst_string | ||
348 | * | ||
349 | * DESCRIPTION: Append a null terminated string to a null terminated string, | ||
350 | * with a maximum count. | ||
351 | * | ||
352 | ******************************************************************************/ | ||
353 | |||
354 | char *acpi_ut_strncat(char *dst_string, const char *src_string, acpi_size count) | ||
355 | { | ||
356 | char *string; | ||
357 | |||
358 | if (count) { | ||
359 | |||
360 | /* Find end of the destination string */ | ||
361 | |||
362 | for (string = dst_string; *string++;) {; | ||
363 | } | ||
364 | |||
365 | /* Concatenate the string */ | ||
366 | |||
367 | for (--string; (*string++ = *src_string++) && --count;) {; | ||
368 | } | ||
369 | |||
370 | /* Null terminate if necessary */ | ||
371 | |||
372 | if (!count) { | ||
373 | *string = 0; | ||
374 | } | ||
375 | } | ||
376 | |||
377 | return (dst_string); | ||
378 | } | ||
379 | |||
380 | /******************************************************************************* | ||
381 | * | ||
382 | * FUNCTION: acpi_ut_strstr (strstr) | ||
383 | * | ||
384 | * PARAMETERS: string1 - Target string | ||
385 | * string2 - Substring to search for | ||
386 | * | ||
387 | * RETURN: Where substring match starts, Null if no match found | ||
388 | * | ||
389 | * DESCRIPTION: Checks if String2 occurs in String1. This is not really a | ||
390 | * full implementation of strstr, only sufficient for command | ||
391 | * matching | ||
392 | * | ||
393 | ******************************************************************************/ | ||
394 | |||
395 | char *acpi_ut_strstr(char *string1, char *string2) | ||
396 | { | ||
397 | char *string; | ||
398 | |||
399 | if (acpi_ut_strlen(string2) > acpi_ut_strlen(string1)) { | ||
400 | return (NULL); | ||
401 | } | ||
402 | |||
403 | /* Walk entire string, comparing the letters */ | ||
404 | |||
405 | for (string = string1; *string2;) { | ||
406 | if (*string2 != *string) { | ||
407 | return (NULL); | ||
408 | } | ||
409 | |||
410 | string2++; | ||
411 | string++; | ||
412 | } | ||
413 | |||
414 | return (string1); | ||
415 | } | ||
416 | |||
417 | /******************************************************************************* | ||
418 | * | ||
419 | * FUNCTION: acpi_ut_strtoul (strtoul) | ||
420 | * | ||
421 | * PARAMETERS: string - Null terminated string | ||
422 | * terminater - Where a pointer to the terminating byte is | ||
423 | * returned | ||
424 | * base - Radix of the string | ||
425 | * | ||
426 | * RETURN: Converted value | ||
427 | * | ||
428 | * DESCRIPTION: Convert a string into a 32-bit unsigned value. | ||
429 | * Note: use acpi_ut_strtoul64 for 64-bit integers. | ||
430 | * | ||
431 | ******************************************************************************/ | ||
432 | |||
433 | u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base) | ||
434 | { | ||
435 | u32 converted = 0; | ||
436 | u32 index; | ||
437 | u32 sign; | ||
438 | const char *string_start; | ||
439 | u32 return_value = 0; | ||
440 | acpi_status status = AE_OK; | ||
441 | |||
442 | /* | ||
443 | * Save the value of the pointer to the buffer's first | ||
444 | * character, save the current errno value, and then | ||
445 | * skip over any white space in the buffer: | ||
446 | */ | ||
447 | string_start = string; | ||
448 | while (ACPI_IS_SPACE(*string) || *string == '\t') { | ||
449 | ++string; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * The buffer may contain an optional plus or minus sign. | ||
454 | * If it does, then skip over it but remember what is was: | ||
455 | */ | ||
456 | if (*string == '-') { | ||
457 | sign = NEGATIVE; | ||
458 | ++string; | ||
459 | } else if (*string == '+') { | ||
460 | ++string; | ||
461 | sign = POSITIVE; | ||
462 | } else { | ||
463 | sign = POSITIVE; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * If the input parameter Base is zero, then we need to | ||
468 | * determine if it is octal, decimal, or hexadecimal: | ||
469 | */ | ||
470 | if (base == 0) { | ||
471 | if (*string == '0') { | ||
472 | if (acpi_ut_to_lower(*(++string)) == 'x') { | ||
473 | base = 16; | ||
474 | ++string; | ||
475 | } else { | ||
476 | base = 8; | ||
477 | } | ||
478 | } else { | ||
479 | base = 10; | ||
480 | } | ||
481 | } else if (base < 2 || base > 36) { | ||
482 | /* | ||
483 | * The specified Base parameter is not in the domain of | ||
484 | * this function: | ||
485 | */ | ||
486 | goto done; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * For octal and hexadecimal bases, skip over the leading | ||
491 | * 0 or 0x, if they are present. | ||
492 | */ | ||
493 | if (base == 8 && *string == '0') { | ||
494 | string++; | ||
495 | } | ||
496 | |||
497 | if (base == 16 && | ||
498 | *string == '0' && acpi_ut_to_lower(*(++string)) == 'x') { | ||
499 | string++; | ||
500 | } | ||
501 | |||
502 | /* | ||
503 | * Main loop: convert the string to an unsigned long: | ||
504 | */ | ||
505 | while (*string) { | ||
506 | if (ACPI_IS_DIGIT(*string)) { | ||
507 | index = (u32)((u8)*string - '0'); | ||
508 | } else { | ||
509 | index = (u32)acpi_ut_to_upper(*string); | ||
510 | if (ACPI_IS_UPPER(index)) { | ||
511 | index = index - 'A' + 10; | ||
512 | } else { | ||
513 | goto done; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | if (index >= base) { | ||
518 | goto done; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Check to see if value is out of range: | ||
523 | */ | ||
524 | |||
525 | if (return_value > ((ACPI_UINT32_MAX - (u32)index) / (u32)base)) { | ||
526 | status = AE_ERROR; | ||
527 | return_value = 0; /* reset */ | ||
528 | } else { | ||
529 | return_value *= base; | ||
530 | return_value += index; | ||
531 | converted = 1; | ||
532 | } | ||
533 | |||
534 | ++string; | ||
535 | } | ||
536 | |||
537 | done: | ||
538 | /* | ||
539 | * If appropriate, update the caller's pointer to the next | ||
540 | * unconverted character in the buffer. | ||
541 | */ | ||
542 | if (terminator) { | ||
543 | if (converted == 0 && return_value == 0 && string != NULL) { | ||
544 | *terminator = (char *)string_start; | ||
545 | } else { | ||
546 | *terminator = (char *)string; | ||
547 | } | ||
548 | } | ||
549 | |||
550 | if (status == AE_ERROR) { | ||
551 | return_value = ACPI_UINT32_MAX; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * If a minus sign was present, then "the conversion is negated": | ||
556 | */ | ||
557 | if (sign == NEGATIVE) { | ||
558 | return_value = (ACPI_UINT32_MAX - return_value) + 1; | ||
559 | } | ||
560 | |||
561 | return (return_value); | ||
562 | } | ||
563 | |||
564 | /******************************************************************************* | ||
565 | * | ||
566 | * FUNCTION: acpi_ut_to_upper (TOUPPER) | ||
567 | * | ||
568 | * PARAMETERS: c - Character to convert | ||
569 | * | ||
570 | * RETURN: Converted character as an int | ||
571 | * | ||
572 | * DESCRIPTION: Convert character to uppercase | ||
573 | * | ||
574 | ******************************************************************************/ | ||
575 | |||
576 | int acpi_ut_to_upper(int c) | ||
577 | { | ||
578 | |||
579 | return (ACPI_IS_LOWER(c) ? ((c) - 0x20) : (c)); | ||
580 | } | ||
581 | |||
582 | /******************************************************************************* | ||
583 | * | ||
584 | * FUNCTION: acpi_ut_to_lower (TOLOWER) | ||
585 | * | ||
586 | * PARAMETERS: c - Character to convert | ||
587 | * | ||
588 | * RETURN: Converted character as an int | ||
589 | * | ||
590 | * DESCRIPTION: Convert character to lowercase | ||
591 | * | ||
592 | ******************************************************************************/ | ||
593 | |||
594 | int acpi_ut_to_lower(int c) | ||
595 | { | ||
596 | |||
597 | return (ACPI_IS_UPPER(c) ? ((c) + 0x20) : (c)); | ||
598 | } | ||
599 | |||
600 | /******************************************************************************* | ||
601 | * | ||
602 | * FUNCTION: is* functions | ||
603 | * | ||
604 | * DESCRIPTION: is* functions use the ctype table below | ||
605 | * | ||
606 | ******************************************************************************/ | ||
607 | |||
608 | const u8 _acpi_ctype[257] = { | ||
609 | _ACPI_CN, /* 0x00 0 NUL */ | ||
610 | _ACPI_CN, /* 0x01 1 SOH */ | ||
611 | _ACPI_CN, /* 0x02 2 STX */ | ||
612 | _ACPI_CN, /* 0x03 3 ETX */ | ||
613 | _ACPI_CN, /* 0x04 4 EOT */ | ||
614 | _ACPI_CN, /* 0x05 5 ENQ */ | ||
615 | _ACPI_CN, /* 0x06 6 ACK */ | ||
616 | _ACPI_CN, /* 0x07 7 BEL */ | ||
617 | _ACPI_CN, /* 0x08 8 BS */ | ||
618 | _ACPI_CN | _ACPI_SP, /* 0x09 9 TAB */ | ||
619 | _ACPI_CN | _ACPI_SP, /* 0x0A 10 LF */ | ||
620 | _ACPI_CN | _ACPI_SP, /* 0x0B 11 VT */ | ||
621 | _ACPI_CN | _ACPI_SP, /* 0x0C 12 FF */ | ||
622 | _ACPI_CN | _ACPI_SP, /* 0x0D 13 CR */ | ||
623 | _ACPI_CN, /* 0x0E 14 SO */ | ||
624 | _ACPI_CN, /* 0x0F 15 SI */ | ||
625 | _ACPI_CN, /* 0x10 16 DLE */ | ||
626 | _ACPI_CN, /* 0x11 17 DC1 */ | ||
627 | _ACPI_CN, /* 0x12 18 DC2 */ | ||
628 | _ACPI_CN, /* 0x13 19 DC3 */ | ||
629 | _ACPI_CN, /* 0x14 20 DC4 */ | ||
630 | _ACPI_CN, /* 0x15 21 NAK */ | ||
631 | _ACPI_CN, /* 0x16 22 SYN */ | ||
632 | _ACPI_CN, /* 0x17 23 ETB */ | ||
633 | _ACPI_CN, /* 0x18 24 CAN */ | ||
634 | _ACPI_CN, /* 0x19 25 EM */ | ||
635 | _ACPI_CN, /* 0x1A 26 SUB */ | ||
636 | _ACPI_CN, /* 0x1B 27 ESC */ | ||
637 | _ACPI_CN, /* 0x1C 28 FS */ | ||
638 | _ACPI_CN, /* 0x1D 29 GS */ | ||
639 | _ACPI_CN, /* 0x1E 30 RS */ | ||
640 | _ACPI_CN, /* 0x1F 31 US */ | ||
641 | _ACPI_XS | _ACPI_SP, /* 0x20 32 ' ' */ | ||
642 | _ACPI_PU, /* 0x21 33 '!' */ | ||
643 | _ACPI_PU, /* 0x22 34 '"' */ | ||
644 | _ACPI_PU, /* 0x23 35 '#' */ | ||
645 | _ACPI_PU, /* 0x24 36 '$' */ | ||
646 | _ACPI_PU, /* 0x25 37 '%' */ | ||
647 | _ACPI_PU, /* 0x26 38 '&' */ | ||
648 | _ACPI_PU, /* 0x27 39 ''' */ | ||
649 | _ACPI_PU, /* 0x28 40 '(' */ | ||
650 | _ACPI_PU, /* 0x29 41 ')' */ | ||
651 | _ACPI_PU, /* 0x2A 42 '*' */ | ||
652 | _ACPI_PU, /* 0x2B 43 '+' */ | ||
653 | _ACPI_PU, /* 0x2C 44 ',' */ | ||
654 | _ACPI_PU, /* 0x2D 45 '-' */ | ||
655 | _ACPI_PU, /* 0x2E 46 '.' */ | ||
656 | _ACPI_PU, /* 0x2F 47 '/' */ | ||
657 | _ACPI_XD | _ACPI_DI, /* 0x30 48 '0' */ | ||
658 | _ACPI_XD | _ACPI_DI, /* 0x31 49 '1' */ | ||
659 | _ACPI_XD | _ACPI_DI, /* 0x32 50 '2' */ | ||
660 | _ACPI_XD | _ACPI_DI, /* 0x33 51 '3' */ | ||
661 | _ACPI_XD | _ACPI_DI, /* 0x34 52 '4' */ | ||
662 | _ACPI_XD | _ACPI_DI, /* 0x35 53 '5' */ | ||
663 | _ACPI_XD | _ACPI_DI, /* 0x36 54 '6' */ | ||
664 | _ACPI_XD | _ACPI_DI, /* 0x37 55 '7' */ | ||
665 | _ACPI_XD | _ACPI_DI, /* 0x38 56 '8' */ | ||
666 | _ACPI_XD | _ACPI_DI, /* 0x39 57 '9' */ | ||
667 | _ACPI_PU, /* 0x3A 58 ':' */ | ||
668 | _ACPI_PU, /* 0x3B 59 ';' */ | ||
669 | _ACPI_PU, /* 0x3C 60 '<' */ | ||
670 | _ACPI_PU, /* 0x3D 61 '=' */ | ||
671 | _ACPI_PU, /* 0x3E 62 '>' */ | ||
672 | _ACPI_PU, /* 0x3F 63 '?' */ | ||
673 | _ACPI_PU, /* 0x40 64 '@' */ | ||
674 | _ACPI_XD | _ACPI_UP, /* 0x41 65 'A' */ | ||
675 | _ACPI_XD | _ACPI_UP, /* 0x42 66 'B' */ | ||
676 | _ACPI_XD | _ACPI_UP, /* 0x43 67 'C' */ | ||
677 | _ACPI_XD | _ACPI_UP, /* 0x44 68 'D' */ | ||
678 | _ACPI_XD | _ACPI_UP, /* 0x45 69 'E' */ | ||
679 | _ACPI_XD | _ACPI_UP, /* 0x46 70 'F' */ | ||
680 | _ACPI_UP, /* 0x47 71 'G' */ | ||
681 | _ACPI_UP, /* 0x48 72 'H' */ | ||
682 | _ACPI_UP, /* 0x49 73 'I' */ | ||
683 | _ACPI_UP, /* 0x4A 74 'J' */ | ||
684 | _ACPI_UP, /* 0x4B 75 'K' */ | ||
685 | _ACPI_UP, /* 0x4C 76 'L' */ | ||
686 | _ACPI_UP, /* 0x4D 77 'M' */ | ||
687 | _ACPI_UP, /* 0x4E 78 'N' */ | ||
688 | _ACPI_UP, /* 0x4F 79 'O' */ | ||
689 | _ACPI_UP, /* 0x50 80 'P' */ | ||
690 | _ACPI_UP, /* 0x51 81 'Q' */ | ||
691 | _ACPI_UP, /* 0x52 82 'R' */ | ||
692 | _ACPI_UP, /* 0x53 83 'S' */ | ||
693 | _ACPI_UP, /* 0x54 84 'T' */ | ||
694 | _ACPI_UP, /* 0x55 85 'U' */ | ||
695 | _ACPI_UP, /* 0x56 86 'V' */ | ||
696 | _ACPI_UP, /* 0x57 87 'W' */ | ||
697 | _ACPI_UP, /* 0x58 88 'X' */ | ||
698 | _ACPI_UP, /* 0x59 89 'Y' */ | ||
699 | _ACPI_UP, /* 0x5A 90 'Z' */ | ||
700 | _ACPI_PU, /* 0x5B 91 '[' */ | ||
701 | _ACPI_PU, /* 0x5C 92 '\' */ | ||
702 | _ACPI_PU, /* 0x5D 93 ']' */ | ||
703 | _ACPI_PU, /* 0x5E 94 '^' */ | ||
704 | _ACPI_PU, /* 0x5F 95 '_' */ | ||
705 | _ACPI_PU, /* 0x60 96 '`' */ | ||
706 | _ACPI_XD | _ACPI_LO, /* 0x61 97 'a' */ | ||
707 | _ACPI_XD | _ACPI_LO, /* 0x62 98 'b' */ | ||
708 | _ACPI_XD | _ACPI_LO, /* 0x63 99 'c' */ | ||
709 | _ACPI_XD | _ACPI_LO, /* 0x64 100 'd' */ | ||
710 | _ACPI_XD | _ACPI_LO, /* 0x65 101 'e' */ | ||
711 | _ACPI_XD | _ACPI_LO, /* 0x66 102 'f' */ | ||
712 | _ACPI_LO, /* 0x67 103 'g' */ | ||
713 | _ACPI_LO, /* 0x68 104 'h' */ | ||
714 | _ACPI_LO, /* 0x69 105 'i' */ | ||
715 | _ACPI_LO, /* 0x6A 106 'j' */ | ||
716 | _ACPI_LO, /* 0x6B 107 'k' */ | ||
717 | _ACPI_LO, /* 0x6C 108 'l' */ | ||
718 | _ACPI_LO, /* 0x6D 109 'm' */ | ||
719 | _ACPI_LO, /* 0x6E 110 'n' */ | ||
720 | _ACPI_LO, /* 0x6F 111 'o' */ | ||
721 | _ACPI_LO, /* 0x70 112 'p' */ | ||
722 | _ACPI_LO, /* 0x71 113 'q' */ | ||
723 | _ACPI_LO, /* 0x72 114 'r' */ | ||
724 | _ACPI_LO, /* 0x73 115 's' */ | ||
725 | _ACPI_LO, /* 0x74 116 't' */ | ||
726 | _ACPI_LO, /* 0x75 117 'u' */ | ||
727 | _ACPI_LO, /* 0x76 118 'v' */ | ||
728 | _ACPI_LO, /* 0x77 119 'w' */ | ||
729 | _ACPI_LO, /* 0x78 120 'x' */ | ||
730 | _ACPI_LO, /* 0x79 121 'y' */ | ||
731 | _ACPI_LO, /* 0x7A 122 'z' */ | ||
732 | _ACPI_PU, /* 0x7B 123 '{' */ | ||
733 | _ACPI_PU, /* 0x7C 124 '|' */ | ||
734 | _ACPI_PU, /* 0x7D 125 '}' */ | ||
735 | _ACPI_PU, /* 0x7E 126 '~' */ | ||
736 | _ACPI_CN, /* 0x7F 127 DEL */ | ||
737 | |||
738 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x80 to 0x8F */ | ||
739 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x90 to 0x9F */ | ||
740 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 to 0xAF */ | ||
741 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xB0 to 0xBF */ | ||
742 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xC0 to 0xCF */ | ||
743 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 to 0xDF */ | ||
744 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xE0 to 0xEF */ | ||
745 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xF0 to 0xFF */ | ||
746 | 0 /* 0x100 */ | ||
747 | }; | ||
748 | |||
749 | #endif /* ACPI_USE_SYSTEM_CLIBRARY */ | ||
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index e810894149ae..5d95166245ae 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c | |||
@@ -47,8 +47,9 @@ | |||
47 | 47 | ||
48 | #define _COMPONENT ACPI_UTILITIES | 48 | #define _COMPONENT ACPI_UTILITIES |
49 | ACPI_MODULE_NAME("utdebug") | 49 | ACPI_MODULE_NAME("utdebug") |
50 | |||
50 | #ifdef ACPI_DEBUG_OUTPUT | 51 | #ifdef ACPI_DEBUG_OUTPUT |
51 | static acpi_thread_id acpi_gbl_prev_thread_id; | 52 | static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF; |
52 | static char *acpi_gbl_fn_entry_str = "----Entry"; | 53 | static char *acpi_gbl_fn_entry_str = "----Entry"; |
53 | static char *acpi_gbl_fn_exit_str = "----Exit-"; | 54 | static char *acpi_gbl_fn_exit_str = "----Exit-"; |
54 | 55 | ||
@@ -109,7 +110,7 @@ void acpi_ut_track_stack_ptr(void) | |||
109 | * RETURN: Updated pointer to the function name | 110 | * RETURN: Updated pointer to the function name |
110 | * | 111 | * |
111 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. | 112 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. |
112 | * This allows compiler macros such as __func__ to be used | 113 | * This allows compiler macros such as __FUNCTION__ to be used |
113 | * with no change to the debug output. | 114 | * with no change to the debug output. |
114 | * | 115 | * |
115 | ******************************************************************************/ | 116 | ******************************************************************************/ |
@@ -222,7 +223,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print) | |||
222 | * | 223 | * |
223 | * RETURN: None | 224 | * RETURN: None |
224 | * | 225 | * |
225 | * DESCRIPTION: Print message with no headers. Has same interface as | 226 | * DESCRIPTION: Print message with no headers. Has same interface as |
226 | * debug_print so that the same macros can be used. | 227 | * debug_print so that the same macros can be used. |
227 | * | 228 | * |
228 | ******************************************************************************/ | 229 | ******************************************************************************/ |
@@ -258,7 +259,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print_raw) | |||
258 | * | 259 | * |
259 | * RETURN: None | 260 | * RETURN: None |
260 | * | 261 | * |
261 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 262 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
262 | * set in debug_level | 263 | * set in debug_level |
263 | * | 264 | * |
264 | ******************************************************************************/ | 265 | ******************************************************************************/ |
@@ -290,7 +291,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace) | |||
290 | * | 291 | * |
291 | * RETURN: None | 292 | * RETURN: None |
292 | * | 293 | * |
293 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 294 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
294 | * set in debug_level | 295 | * set in debug_level |
295 | * | 296 | * |
296 | ******************************************************************************/ | 297 | ******************************************************************************/ |
@@ -299,6 +300,7 @@ acpi_ut_trace_ptr(u32 line_number, | |||
299 | const char *function_name, | 300 | const char *function_name, |
300 | const char *module_name, u32 component_id, void *pointer) | 301 | const char *module_name, u32 component_id, void *pointer) |
301 | { | 302 | { |
303 | |||
302 | acpi_gbl_nesting_level++; | 304 | acpi_gbl_nesting_level++; |
303 | acpi_ut_track_stack_ptr(); | 305 | acpi_ut_track_stack_ptr(); |
304 | 306 | ||
@@ -319,7 +321,7 @@ acpi_ut_trace_ptr(u32 line_number, | |||
319 | * | 321 | * |
320 | * RETURN: None | 322 | * RETURN: None |
321 | * | 323 | * |
322 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 324 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
323 | * set in debug_level | 325 | * set in debug_level |
324 | * | 326 | * |
325 | ******************************************************************************/ | 327 | ******************************************************************************/ |
@@ -350,7 +352,7 @@ acpi_ut_trace_str(u32 line_number, | |||
350 | * | 352 | * |
351 | * RETURN: None | 353 | * RETURN: None |
352 | * | 354 | * |
353 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is | 355 | * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is |
354 | * set in debug_level | 356 | * set in debug_level |
355 | * | 357 | * |
356 | ******************************************************************************/ | 358 | ******************************************************************************/ |
@@ -380,7 +382,7 @@ acpi_ut_trace_u32(u32 line_number, | |||
380 | * | 382 | * |
381 | * RETURN: None | 383 | * RETURN: None |
382 | * | 384 | * |
383 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 385 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
384 | * set in debug_level | 386 | * set in debug_level |
385 | * | 387 | * |
386 | ******************************************************************************/ | 388 | ******************************************************************************/ |
@@ -412,7 +414,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit) | |||
412 | * | 414 | * |
413 | * RETURN: None | 415 | * RETURN: None |
414 | * | 416 | * |
415 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 417 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
416 | * set in debug_level. Prints exit status also. | 418 | * set in debug_level. Prints exit status also. |
417 | * | 419 | * |
418 | ******************************************************************************/ | 420 | ******************************************************************************/ |
@@ -453,7 +455,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) | |||
453 | * | 455 | * |
454 | * RETURN: None | 456 | * RETURN: None |
455 | * | 457 | * |
456 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 458 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
457 | * set in debug_level. Prints exit value also. | 459 | * set in debug_level. Prints exit value also. |
458 | * | 460 | * |
459 | ******************************************************************************/ | 461 | ******************************************************************************/ |
@@ -485,7 +487,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) | |||
485 | * | 487 | * |
486 | * RETURN: None | 488 | * RETURN: None |
487 | * | 489 | * |
488 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is | 490 | * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is |
489 | * set in debug_level. Prints exit value also. | 491 | * set in debug_level. Prints exit value also. |
490 | * | 492 | * |
491 | ******************************************************************************/ | 493 | ******************************************************************************/ |
@@ -511,7 +513,7 @@ acpi_ut_ptr_exit(u32 line_number, | |||
511 | * PARAMETERS: buffer - Buffer to dump | 513 | * PARAMETERS: buffer - Buffer to dump |
512 | * count - Amount to dump, in bytes | 514 | * count - Amount to dump, in bytes |
513 | * display - BYTE, WORD, DWORD, or QWORD display | 515 | * display - BYTE, WORD, DWORD, or QWORD display |
514 | * component_ID - Caller's component ID | 516 | * offset - Beginning buffer offset (display only) |
515 | * | 517 | * |
516 | * RETURN: None | 518 | * RETURN: None |
517 | * | 519 | * |
@@ -519,7 +521,7 @@ acpi_ut_ptr_exit(u32 line_number, | |||
519 | * | 521 | * |
520 | ******************************************************************************/ | 522 | ******************************************************************************/ |
521 | 523 | ||
522 | void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | 524 | void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) |
523 | { | 525 | { |
524 | u32 i = 0; | 526 | u32 i = 0; |
525 | u32 j; | 527 | u32 j; |
@@ -541,7 +543,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
541 | 543 | ||
542 | /* Print current offset */ | 544 | /* Print current offset */ |
543 | 545 | ||
544 | acpi_os_printf("%6.4X: ", i); | 546 | acpi_os_printf("%6.4X: ", (base_offset + i)); |
545 | 547 | ||
546 | /* Print 16 hex chars */ | 548 | /* Print 16 hex chars */ |
547 | 549 | ||
@@ -623,7 +625,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
623 | 625 | ||
624 | /******************************************************************************* | 626 | /******************************************************************************* |
625 | * | 627 | * |
626 | * FUNCTION: acpi_ut_dump_buffer | 628 | * FUNCTION: acpi_ut_debug_dump_buffer |
627 | * | 629 | * |
628 | * PARAMETERS: buffer - Buffer to dump | 630 | * PARAMETERS: buffer - Buffer to dump |
629 | * count - Amount to dump, in bytes | 631 | * count - Amount to dump, in bytes |
@@ -636,7 +638,8 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) | |||
636 | * | 638 | * |
637 | ******************************************************************************/ | 639 | ******************************************************************************/ |
638 | 640 | ||
639 | void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) | 641 | void |
642 | acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id) | ||
640 | { | 643 | { |
641 | 644 | ||
642 | /* Only dump the buffer if tracing is enabled */ | 645 | /* Only dump the buffer if tracing is enabled */ |
@@ -646,5 +649,5 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) | |||
646 | return; | 649 | return; |
647 | } | 650 | } |
648 | 651 | ||
649 | acpi_ut_dump_buffer2(buffer, count, display); | 652 | acpi_ut_dump_buffer(buffer, count, display, 0); |
650 | } | 653 | } |
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 5d84e1954575..774c3aefbf5d 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c | |||
@@ -67,10 +67,10 @@ ACPI_MODULE_NAME("utids") | |||
67 | ******************************************************************************/ | 67 | ******************************************************************************/ |
68 | acpi_status | 68 | acpi_status |
69 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | 69 | acpi_ut_execute_HID(struct acpi_namespace_node *device_node, |
70 | struct acpica_device_id **return_id) | 70 | struct acpi_pnp_device_id **return_id) |
71 | { | 71 | { |
72 | union acpi_operand_object *obj_desc; | 72 | union acpi_operand_object *obj_desc; |
73 | struct acpica_device_id *hid; | 73 | struct acpi_pnp_device_id *hid; |
74 | u32 length; | 74 | u32 length; |
75 | acpi_status status; | 75 | acpi_status status; |
76 | 76 | ||
@@ -94,16 +94,17 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node, | |||
94 | /* Allocate a buffer for the HID */ | 94 | /* Allocate a buffer for the HID */ |
95 | 95 | ||
96 | hid = | 96 | hid = |
97 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + | 97 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + |
98 | (acpi_size) length); | 98 | (acpi_size) length); |
99 | if (!hid) { | 99 | if (!hid) { |
100 | status = AE_NO_MEMORY; | 100 | status = AE_NO_MEMORY; |
101 | goto cleanup; | 101 | goto cleanup; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Area for the string starts after DEVICE_ID struct */ | 104 | /* Area for the string starts after PNP_DEVICE_ID struct */ |
105 | 105 | ||
106 | hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id)); | 106 | hid->string = |
107 | ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id)); | ||
107 | 108 | ||
108 | /* Convert EISAID to a string or simply copy existing string */ | 109 | /* Convert EISAID to a string or simply copy existing string */ |
109 | 110 | ||
@@ -126,6 +127,73 @@ cleanup: | |||
126 | 127 | ||
127 | /******************************************************************************* | 128 | /******************************************************************************* |
128 | * | 129 | * |
130 | * FUNCTION: acpi_ut_execute_SUB | ||
131 | * | ||
132 | * PARAMETERS: device_node - Node for the device | ||
133 | * return_id - Where the _SUB is returned | ||
134 | * | ||
135 | * RETURN: Status | ||
136 | * | ||
137 | * DESCRIPTION: Executes the _SUB control method that returns the subsystem | ||
138 | * ID of the device. The _SUB value is always a string containing | ||
139 | * either a valid PNP or ACPI ID. | ||
140 | * | ||
141 | * NOTE: Internal function, no parameter validation | ||
142 | * | ||
143 | ******************************************************************************/ | ||
144 | |||
145 | acpi_status | ||
146 | acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, | ||
147 | struct acpi_pnp_device_id **return_id) | ||
148 | { | ||
149 | union acpi_operand_object *obj_desc; | ||
150 | struct acpi_pnp_device_id *sub; | ||
151 | u32 length; | ||
152 | acpi_status status; | ||
153 | |||
154 | ACPI_FUNCTION_TRACE(ut_execute_SUB); | ||
155 | |||
156 | status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB, | ||
157 | ACPI_BTYPE_STRING, &obj_desc); | ||
158 | if (ACPI_FAILURE(status)) { | ||
159 | return_ACPI_STATUS(status); | ||
160 | } | ||
161 | |||
162 | /* Get the size of the String to be returned, includes null terminator */ | ||
163 | |||
164 | length = obj_desc->string.length + 1; | ||
165 | |||
166 | /* Allocate a buffer for the SUB */ | ||
167 | |||
168 | sub = | ||
169 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + | ||
170 | (acpi_size) length); | ||
171 | if (!sub) { | ||
172 | status = AE_NO_MEMORY; | ||
173 | goto cleanup; | ||
174 | } | ||
175 | |||
176 | /* Area for the string starts after PNP_DEVICE_ID struct */ | ||
177 | |||
178 | sub->string = | ||
179 | ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id)); | ||
180 | |||
181 | /* Simply copy existing string */ | ||
182 | |||
183 | ACPI_STRCPY(sub->string, obj_desc->string.pointer); | ||
184 | sub->length = length; | ||
185 | *return_id = sub; | ||
186 | |||
187 | cleanup: | ||
188 | |||
189 | /* On exit, we must delete the return object */ | ||
190 | |||
191 | acpi_ut_remove_reference(obj_desc); | ||
192 | return_ACPI_STATUS(status); | ||
193 | } | ||
194 | |||
195 | /******************************************************************************* | ||
196 | * | ||
129 | * FUNCTION: acpi_ut_execute_UID | 197 | * FUNCTION: acpi_ut_execute_UID |
130 | * | 198 | * |
131 | * PARAMETERS: device_node - Node for the device | 199 | * PARAMETERS: device_node - Node for the device |
@@ -144,10 +212,10 @@ cleanup: | |||
144 | 212 | ||
145 | acpi_status | 213 | acpi_status |
146 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | 214 | acpi_ut_execute_UID(struct acpi_namespace_node *device_node, |
147 | struct acpica_device_id **return_id) | 215 | struct acpi_pnp_device_id **return_id) |
148 | { | 216 | { |
149 | union acpi_operand_object *obj_desc; | 217 | union acpi_operand_object *obj_desc; |
150 | struct acpica_device_id *uid; | 218 | struct acpi_pnp_device_id *uid; |
151 | u32 length; | 219 | u32 length; |
152 | acpi_status status; | 220 | acpi_status status; |
153 | 221 | ||
@@ -171,16 +239,17 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node, | |||
171 | /* Allocate a buffer for the UID */ | 239 | /* Allocate a buffer for the UID */ |
172 | 240 | ||
173 | uid = | 241 | uid = |
174 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + | 242 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + |
175 | (acpi_size) length); | 243 | (acpi_size) length); |
176 | if (!uid) { | 244 | if (!uid) { |
177 | status = AE_NO_MEMORY; | 245 | status = AE_NO_MEMORY; |
178 | goto cleanup; | 246 | goto cleanup; |
179 | } | 247 | } |
180 | 248 | ||
181 | /* Area for the string starts after DEVICE_ID struct */ | 249 | /* Area for the string starts after PNP_DEVICE_ID struct */ |
182 | 250 | ||
183 | uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id)); | 251 | uid->string = |
252 | ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id)); | ||
184 | 253 | ||
185 | /* Convert an Integer to string, or just copy an existing string */ | 254 | /* Convert an Integer to string, or just copy an existing string */ |
186 | 255 | ||
@@ -226,11 +295,11 @@ cleanup: | |||
226 | 295 | ||
227 | acpi_status | 296 | acpi_status |
228 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | 297 | acpi_ut_execute_CID(struct acpi_namespace_node *device_node, |
229 | struct acpica_device_id_list **return_cid_list) | 298 | struct acpi_pnp_device_id_list **return_cid_list) |
230 | { | 299 | { |
231 | union acpi_operand_object **cid_objects; | 300 | union acpi_operand_object **cid_objects; |
232 | union acpi_operand_object *obj_desc; | 301 | union acpi_operand_object *obj_desc; |
233 | struct acpica_device_id_list *cid_list; | 302 | struct acpi_pnp_device_id_list *cid_list; |
234 | char *next_id_string; | 303 | char *next_id_string; |
235 | u32 string_area_size; | 304 | u32 string_area_size; |
236 | u32 length; | 305 | u32 length; |
@@ -288,11 +357,12 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | |||
288 | /* | 357 | /* |
289 | * Now that we know the length of the CIDs, allocate return buffer: | 358 | * Now that we know the length of the CIDs, allocate return buffer: |
290 | * 1) Size of the base structure + | 359 | * 1) Size of the base structure + |
291 | * 2) Size of the CID DEVICE_ID array + | 360 | * 2) Size of the CID PNP_DEVICE_ID array + |
292 | * 3) Size of the actual CID strings | 361 | * 3) Size of the actual CID strings |
293 | */ | 362 | */ |
294 | cid_list_size = sizeof(struct acpica_device_id_list) + | 363 | cid_list_size = sizeof(struct acpi_pnp_device_id_list) + |
295 | ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size; | 364 | ((count - 1) * sizeof(struct acpi_pnp_device_id)) + |
365 | string_area_size; | ||
296 | 366 | ||
297 | cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); | 367 | cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); |
298 | if (!cid_list) { | 368 | if (!cid_list) { |
@@ -300,10 +370,10 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, | |||
300 | goto cleanup; | 370 | goto cleanup; |
301 | } | 371 | } |
302 | 372 | ||
303 | /* Area for CID strings starts after the CID DEVICE_ID array */ | 373 | /* Area for CID strings starts after the CID PNP_DEVICE_ID array */ |
304 | 374 | ||
305 | next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + | 375 | next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + |
306 | ((acpi_size) count * sizeof(struct acpica_device_id)); | 376 | ((acpi_size) count * sizeof(struct acpi_pnp_device_id)); |
307 | 377 | ||
308 | /* Copy/convert the CIDs to the return buffer */ | 378 | /* Copy/convert the CIDs to the return buffer */ |
309 | 379 | ||
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index d88a8aaab2a6..49563674833a 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c | |||
@@ -81,7 +81,7 @@ typedef union uint64_overlay { | |||
81 | * RETURN: Status (Checks for divide-by-zero) | 81 | * RETURN: Status (Checks for divide-by-zero) |
82 | * | 82 | * |
83 | * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) | 83 | * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) |
84 | * divide and modulo. The result is a 64-bit quotient and a | 84 | * divide and modulo. The result is a 64-bit quotient and a |
85 | * 32-bit remainder. | 85 | * 32-bit remainder. |
86 | * | 86 | * |
87 | ******************************************************************************/ | 87 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 33c6cf7ff467..9286a69eb9aa 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c | |||
@@ -41,8 +41,6 @@ | |||
41 | * POSSIBILITY OF SUCH DAMAGES. | 41 | * POSSIBILITY OF SUCH DAMAGES. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/module.h> | ||
45 | |||
46 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
47 | #include "accommon.h" | 45 | #include "accommon.h" |
48 | #include "acnamesp.h" | 46 | #include "acnamesp.h" |
@@ -201,8 +199,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) | |||
201 | */ | 199 | */ |
202 | acpi_gbl_owner_id_mask[j] |= (1 << k); | 200 | acpi_gbl_owner_id_mask[j] |= (1 << k); |
203 | 201 | ||
204 | acpi_gbl_last_owner_id_index = (u8) j; | 202 | acpi_gbl_last_owner_id_index = (u8)j; |
205 | acpi_gbl_next_owner_id_offset = (u8) (k + 1); | 203 | acpi_gbl_next_owner_id_offset = (u8)(k + 1); |
206 | 204 | ||
207 | /* | 205 | /* |
208 | * Construct encoded ID from the index and bit position | 206 | * Construct encoded ID from the index and bit position |
@@ -252,7 +250,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) | |||
252 | * control method or unloading a table. Either way, we would | 250 | * control method or unloading a table. Either way, we would |
253 | * ignore any error anyway. | 251 | * ignore any error anyway. |
254 | * | 252 | * |
255 | * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 | 253 | * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 |
256 | * | 254 | * |
257 | ******************************************************************************/ | 255 | ******************************************************************************/ |
258 | 256 | ||
@@ -339,6 +337,73 @@ void acpi_ut_strupr(char *src_string) | |||
339 | return; | 337 | return; |
340 | } | 338 | } |
341 | 339 | ||
340 | #ifdef ACPI_ASL_COMPILER | ||
341 | /******************************************************************************* | ||
342 | * | ||
343 | * FUNCTION: acpi_ut_strlwr (strlwr) | ||
344 | * | ||
345 | * PARAMETERS: src_string - The source string to convert | ||
346 | * | ||
347 | * RETURN: None | ||
348 | * | ||
349 | * DESCRIPTION: Convert string to lowercase | ||
350 | * | ||
351 | * NOTE: This is not a POSIX function, so it appears here, not in utclib.c | ||
352 | * | ||
353 | ******************************************************************************/ | ||
354 | |||
355 | void acpi_ut_strlwr(char *src_string) | ||
356 | { | ||
357 | char *string; | ||
358 | |||
359 | ACPI_FUNCTION_ENTRY(); | ||
360 | |||
361 | if (!src_string) { | ||
362 | return; | ||
363 | } | ||
364 | |||
365 | /* Walk entire string, lowercasing the letters */ | ||
366 | |||
367 | for (string = src_string; *string; string++) { | ||
368 | *string = (char)ACPI_TOLOWER(*string); | ||
369 | } | ||
370 | |||
371 | return; | ||
372 | } | ||
373 | |||
374 | /****************************************************************************** | ||
375 | * | ||
376 | * FUNCTION: acpi_ut_stricmp | ||
377 | * | ||
378 | * PARAMETERS: string1 - first string to compare | ||
379 | * string2 - second string to compare | ||
380 | * | ||
381 | * RETURN: int that signifies string relationship. Zero means strings | ||
382 | * are equal. | ||
383 | * | ||
384 | * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare | ||
385 | * strings with no case sensitivity) | ||
386 | * | ||
387 | ******************************************************************************/ | ||
388 | |||
389 | int acpi_ut_stricmp(char *string1, char *string2) | ||
390 | { | ||
391 | int c1; | ||
392 | int c2; | ||
393 | |||
394 | do { | ||
395 | c1 = tolower((int)*string1); | ||
396 | c2 = tolower((int)*string2); | ||
397 | |||
398 | string1++; | ||
399 | string2++; | ||
400 | } | ||
401 | while ((c1 == c2) && (c1)); | ||
402 | |||
403 | return (c1 - c2); | ||
404 | } | ||
405 | #endif | ||
406 | |||
342 | /******************************************************************************* | 407 | /******************************************************************************* |
343 | * | 408 | * |
344 | * FUNCTION: acpi_ut_print_string | 409 | * FUNCTION: acpi_ut_print_string |
@@ -469,8 +534,8 @@ u32 acpi_ut_dword_byte_swap(u32 value) | |||
469 | * RETURN: None | 534 | * RETURN: None |
470 | * | 535 | * |
471 | * DESCRIPTION: Set the global integer bit width based upon the revision | 536 | * DESCRIPTION: Set the global integer bit width based upon the revision |
472 | * of the DSDT. For Revision 1 and 0, Integers are 32 bits. | 537 | * of the DSDT. For Revision 1 and 0, Integers are 32 bits. |
473 | * For Revision 2 and above, Integers are 64 bits. Yes, this | 538 | * For Revision 2 and above, Integers are 64 bits. Yes, this |
474 | * makes a difference. | 539 | * makes a difference. |
475 | * | 540 | * |
476 | ******************************************************************************/ | 541 | ******************************************************************************/ |
@@ -606,7 +671,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position) | |||
606 | * | 671 | * |
607 | * RETURN: TRUE if the name is valid, FALSE otherwise | 672 | * RETURN: TRUE if the name is valid, FALSE otherwise |
608 | * | 673 | * |
609 | * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: | 674 | * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: |
610 | * 1) Upper case alpha | 675 | * 1) Upper case alpha |
611 | * 2) numeric | 676 | * 2) numeric |
612 | * 3) underscore | 677 | * 3) underscore |
@@ -638,29 +703,59 @@ u8 acpi_ut_valid_acpi_name(u32 name) | |||
638 | * RETURN: Repaired version of the name | 703 | * RETURN: Repaired version of the name |
639 | * | 704 | * |
640 | * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and | 705 | * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and |
641 | * return the new name. | 706 | * return the new name. NOTE: the Name parameter must reside in |
707 | * read/write memory, cannot be a const. | ||
708 | * | ||
709 | * An ACPI Name must consist of valid ACPI characters. We will repair the name | ||
710 | * if necessary because we don't want to abort because of this, but we want | ||
711 | * all namespace names to be printable. A warning message is appropriate. | ||
712 | * | ||
713 | * This issue came up because there are in fact machines that exhibit | ||
714 | * this problem, and we want to be able to enable ACPI support for them, | ||
715 | * even though there are a few bad names. | ||
642 | * | 716 | * |
643 | ******************************************************************************/ | 717 | ******************************************************************************/ |
644 | 718 | ||
645 | acpi_name acpi_ut_repair_name(char *name) | 719 | void acpi_ut_repair_name(char *name) |
646 | { | 720 | { |
647 | u32 i; | 721 | u32 i; |
648 | char new_name[ACPI_NAME_SIZE]; | 722 | u8 found_bad_char = FALSE; |
723 | u32 original_name; | ||
724 | |||
725 | ACPI_FUNCTION_NAME(ut_repair_name); | ||
726 | |||
727 | ACPI_MOVE_NAME(&original_name, name); | ||
728 | |||
729 | /* Check each character in the name */ | ||
649 | 730 | ||
650 | for (i = 0; i < ACPI_NAME_SIZE; i++) { | 731 | for (i = 0; i < ACPI_NAME_SIZE; i++) { |
651 | new_name[i] = name[i]; | 732 | if (acpi_ut_valid_acpi_char(name[i], i)) { |
733 | continue; | ||
734 | } | ||
652 | 735 | ||
653 | /* | 736 | /* |
654 | * Replace a bad character with something printable, yet technically | 737 | * Replace a bad character with something printable, yet technically |
655 | * still invalid. This prevents any collisions with existing "good" | 738 | * still invalid. This prevents any collisions with existing "good" |
656 | * names in the namespace. | 739 | * names in the namespace. |
657 | */ | 740 | */ |
658 | if (!acpi_ut_valid_acpi_char(name[i], i)) { | 741 | name[i] = '*'; |
659 | new_name[i] = '*'; | 742 | found_bad_char = TRUE; |
660 | } | ||
661 | } | 743 | } |
662 | 744 | ||
663 | return (*(u32 *) new_name); | 745 | if (found_bad_char) { |
746 | |||
747 | /* Report warning only if in strict mode or debug mode */ | ||
748 | |||
749 | if (!acpi_gbl_enable_interpreter_slack) { | ||
750 | ACPI_WARNING((AE_INFO, | ||
751 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
752 | name)); | ||
753 | } else { | ||
754 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
755 | "Found bad character(s) in name, repaired: [%4.4s]\n", | ||
756 | name)); | ||
757 | } | ||
758 | } | ||
664 | } | 759 | } |
665 | 760 | ||
666 | /******************************************************************************* | 761 | /******************************************************************************* |
@@ -681,7 +776,7 @@ acpi_name acpi_ut_repair_name(char *name) | |||
681 | * | 776 | * |
682 | ******************************************************************************/ | 777 | ******************************************************************************/ |
683 | 778 | ||
684 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | 779 | acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) |
685 | { | 780 | { |
686 | u32 this_digit = 0; | 781 | u32 this_digit = 0; |
687 | u64 return_value = 0; | 782 | u64 return_value = 0; |
@@ -754,14 +849,14 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
754 | 849 | ||
755 | /* Convert ASCII 0-9 to Decimal value */ | 850 | /* Convert ASCII 0-9 to Decimal value */ |
756 | 851 | ||
757 | this_digit = ((u8) * string) - '0'; | 852 | this_digit = ((u8)*string) - '0'; |
758 | } else if (base == 10) { | 853 | } else if (base == 10) { |
759 | 854 | ||
760 | /* Digit is out of range; possible in to_integer case only */ | 855 | /* Digit is out of range; possible in to_integer case only */ |
761 | 856 | ||
762 | term = 1; | 857 | term = 1; |
763 | } else { | 858 | } else { |
764 | this_digit = (u8) ACPI_TOUPPER(*string); | 859 | this_digit = (u8)ACPI_TOUPPER(*string); |
765 | if (ACPI_IS_XDIGIT((char)this_digit)) { | 860 | if (ACPI_IS_XDIGIT((char)this_digit)) { |
766 | 861 | ||
767 | /* Convert ASCII Hex char to value */ | 862 | /* Convert ASCII Hex char to value */ |
@@ -788,8 +883,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
788 | 883 | ||
789 | valid_digits++; | 884 | valid_digits++; |
790 | 885 | ||
791 | if (sign_of0x && ((valid_digits > 16) | 886 | if (sign_of0x |
792 | || ((valid_digits > 8) && mode32))) { | 887 | && ((valid_digits > 16) |
888 | || ((valid_digits > 8) && mode32))) { | ||
793 | /* | 889 | /* |
794 | * This is to_integer operation case. | 890 | * This is to_integer operation case. |
795 | * No any restrictions for string-to-integer conversion, | 891 | * No any restrictions for string-to-integer conversion, |
@@ -800,7 +896,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) | |||
800 | 896 | ||
801 | /* Divide the digit into the correct position */ | 897 | /* Divide the digit into the correct position */ |
802 | 898 | ||
803 | (void)acpi_ut_short_divide((dividend - (u64) this_digit), | 899 | (void)acpi_ut_short_divide((dividend - (u64)this_digit), |
804 | base, "ient, NULL); | 900 | base, "ient, NULL); |
805 | 901 | ||
806 | if (return_value > quotient) { | 902 | if (return_value > quotient) { |
@@ -890,7 +986,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object, | |||
890 | ******************************************************************************/ | 986 | ******************************************************************************/ |
891 | 987 | ||
892 | acpi_status | 988 | acpi_status |
893 | acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | 989 | acpi_ut_walk_package_tree(union acpi_operand_object *source_object, |
894 | void *target_object, | 990 | void *target_object, |
895 | acpi_pkg_callback walk_callback, void *context) | 991 | acpi_pkg_callback walk_callback, void *context) |
896 | { | 992 | { |
@@ -917,10 +1013,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
917 | 1013 | ||
918 | /* | 1014 | /* |
919 | * Check for: | 1015 | * Check for: |
920 | * 1) An uninitialized package element. It is completely | 1016 | * 1) An uninitialized package element. It is completely |
921 | * legal to declare a package and leave it uninitialized | 1017 | * legal to declare a package and leave it uninitialized |
922 | * 2) Not an internal object - can be a namespace node instead | 1018 | * 2) Not an internal object - can be a namespace node instead |
923 | * 3) Any type other than a package. Packages are handled in else | 1019 | * 3) Any type other than a package. Packages are handled in else |
924 | * case below. | 1020 | * case below. |
925 | */ | 1021 | */ |
926 | if ((!this_source_obj) || | 1022 | if ((!this_source_obj) || |
@@ -939,7 +1035,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, | |||
939 | state->pkg.source_object->package.count) { | 1035 | state->pkg.source_object->package.count) { |
940 | /* | 1036 | /* |
941 | * We've handled all of the objects at this level, This means | 1037 | * We've handled all of the objects at this level, This means |
942 | * that we have just completed a package. That package may | 1038 | * that we have just completed a package. That package may |
943 | * have contained one or more packages itself. | 1039 | * have contained one or more packages itself. |
944 | * | 1040 | * |
945 | * Delete this state and pop the previous state (package). | 1041 | * Delete this state and pop the previous state (package). |
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 296baa676bc5..5ccf57c0d87e 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c | |||
@@ -193,6 +193,8 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) | |||
193 | 193 | ||
194 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; | 194 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; |
195 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; | 195 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; |
196 | |||
197 | return_VOID; | ||
196 | } | 198 | } |
197 | 199 | ||
198 | /******************************************************************************* | 200 | /******************************************************************************* |
@@ -226,9 +228,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) | |||
226 | /* | 228 | /* |
227 | * Mutex debug code, for internal debugging only. | 229 | * Mutex debug code, for internal debugging only. |
228 | * | 230 | * |
229 | * Deadlock prevention. Check if this thread owns any mutexes of value | 231 | * Deadlock prevention. Check if this thread owns any mutexes of value |
230 | * greater than or equal to this one. If so, the thread has violated | 232 | * greater than or equal to this one. If so, the thread has violated |
231 | * the mutex ordering rule. This indicates a coding error somewhere in | 233 | * the mutex ordering rule. This indicates a coding error somewhere in |
232 | * the ACPI subsystem code. | 234 | * the ACPI subsystem code. |
233 | */ | 235 | */ |
234 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { | 236 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { |
@@ -319,9 +321,9 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) | |||
319 | /* | 321 | /* |
320 | * Mutex debug code, for internal debugging only. | 322 | * Mutex debug code, for internal debugging only. |
321 | * | 323 | * |
322 | * Deadlock prevention. Check if this thread owns any mutexes of value | 324 | * Deadlock prevention. Check if this thread owns any mutexes of value |
323 | * greater than this one. If so, the thread has violated the mutex | 325 | * greater than this one. If so, the thread has violated the mutex |
324 | * ordering rule. This indicates a coding error somewhere in | 326 | * ordering rule. This indicates a coding error somewhere in |
325 | * the ACPI subsystem code. | 327 | * the ACPI subsystem code. |
326 | */ | 328 | */ |
327 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { | 329 | for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { |
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 655f0799a391..5c52ca78f6fa 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c | |||
@@ -77,7 +77,7 @@ acpi_ut_get_element_length(u8 object_type, | |||
77 | * | 77 | * |
78 | * NOTE: We always allocate the worst-case object descriptor because | 78 | * NOTE: We always allocate the worst-case object descriptor because |
79 | * these objects are cached, and we want them to be | 79 | * these objects are cached, and we want them to be |
80 | * one-size-satisifies-any-request. This in itself may not be | 80 | * one-size-satisifies-any-request. This in itself may not be |
81 | * the most memory efficient, but the efficiency of the object | 81 | * the most memory efficient, but the efficiency of the object |
82 | * cache should more than make up for this! | 82 | * cache should more than make up for this! |
83 | * | 83 | * |
@@ -370,9 +370,9 @@ u8 acpi_ut_valid_internal_object(void *object) | |||
370 | * line_number - Caller's line number (for error output) | 370 | * line_number - Caller's line number (for error output) |
371 | * component_id - Caller's component ID (for error output) | 371 | * component_id - Caller's component ID (for error output) |
372 | * | 372 | * |
373 | * RETURN: Pointer to newly allocated object descriptor. Null on error | 373 | * RETURN: Pointer to newly allocated object descriptor. Null on error |
374 | * | 374 | * |
375 | * DESCRIPTION: Allocate a new object descriptor. Gracefully handle | 375 | * DESCRIPTION: Allocate a new object descriptor. Gracefully handle |
376 | * error conditions. | 376 | * error conditions. |
377 | * | 377 | * |
378 | ******************************************************************************/ | 378 | ******************************************************************************/ |
@@ -554,7 +554,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
554 | 554 | ||
555 | /* | 555 | /* |
556 | * Account for the space required by the object rounded up to the next | 556 | * Account for the space required by the object rounded up to the next |
557 | * multiple of the machine word size. This keeps each object aligned | 557 | * multiple of the machine word size. This keeps each object aligned |
558 | * on a machine word boundary. (preventing alignment faults on some | 558 | * on a machine word boundary. (preventing alignment faults on some |
559 | * machines.) | 559 | * machines.) |
560 | */ | 560 | */ |
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index a1c988260073..cee0473ba813 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c | |||
@@ -147,7 +147,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state | |||
147 | * | 147 | * |
148 | * RETURN: The new state object. NULL on failure. | 148 | * RETURN: The new state object. NULL on failure. |
149 | * | 149 | * |
150 | * DESCRIPTION: Create a generic state object. Attempt to obtain one from | 150 | * DESCRIPTION: Create a generic state object. Attempt to obtain one from |
151 | * the global state cache; If none available, create a new one. | 151 | * the global state cache; If none available, create a new one. |
152 | * | 152 | * |
153 | ******************************************************************************/ | 153 | ******************************************************************************/ |
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c new file mode 100644 index 000000000000..a424a9e3fea4 --- /dev/null +++ b/drivers/acpi/acpica/uttrack.c | |||
@@ -0,0 +1,692 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: uttrack - Memory allocation tracking routines (debug only) | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2012, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | /* | ||
45 | * These procedures are used for tracking memory leaks in the subsystem, and | ||
46 | * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. | ||
47 | * | ||
48 | * Each memory allocation is tracked via a doubly linked list. Each | ||
49 | * element contains the caller's component, module name, function name, and | ||
50 | * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call | ||
51 | * acpi_ut_track_allocation to add an element to the list; deletion | ||
52 | * occurs in the body of acpi_ut_free. | ||
53 | */ | ||
54 | |||
55 | #include <acpi/acpi.h> | ||
56 | #include "accommon.h" | ||
57 | |||
58 | #ifdef ACPI_DBG_TRACK_ALLOCATIONS | ||
59 | |||
60 | #define _COMPONENT ACPI_UTILITIES | ||
61 | ACPI_MODULE_NAME("uttrack") | ||
62 | |||
63 | /* Local prototypes */ | ||
64 | static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct | ||
65 | acpi_debug_mem_block | ||
66 | *allocation); | ||
67 | |||
68 | static acpi_status | ||
69 | acpi_ut_track_allocation(struct acpi_debug_mem_block *address, | ||
70 | acpi_size size, | ||
71 | u8 alloc_type, | ||
72 | u32 component, const char *module, u32 line); | ||
73 | |||
74 | static acpi_status | ||
75 | acpi_ut_remove_allocation(struct acpi_debug_mem_block *address, | ||
76 | u32 component, const char *module, u32 line); | ||
77 | |||
78 | /******************************************************************************* | ||
79 | * | ||
80 | * FUNCTION: acpi_ut_create_list | ||
81 | * | ||
82 | * PARAMETERS: cache_name - Ascii name for the cache | ||
83 | * object_size - Size of each cached object | ||
84 | * return_cache - Where the new cache object is returned | ||
85 | * | ||
86 | * RETURN: Status | ||
87 | * | ||
88 | * DESCRIPTION: Create a local memory list for tracking purposed | ||
89 | * | ||
90 | ******************************************************************************/ | ||
91 | |||
92 | acpi_status | ||
93 | acpi_ut_create_list(char *list_name, | ||
94 | u16 object_size, struct acpi_memory_list **return_cache) | ||
95 | { | ||
96 | struct acpi_memory_list *cache; | ||
97 | |||
98 | cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); | ||
99 | if (!cache) { | ||
100 | return (AE_NO_MEMORY); | ||
101 | } | ||
102 | |||
103 | ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); | ||
104 | |||
105 | cache->list_name = list_name; | ||
106 | cache->object_size = object_size; | ||
107 | |||
108 | *return_cache = cache; | ||
109 | return (AE_OK); | ||
110 | } | ||
111 | |||
112 | /******************************************************************************* | ||
113 | * | ||
114 | * FUNCTION: acpi_ut_allocate_and_track | ||
115 | * | ||
116 | * PARAMETERS: size - Size of the allocation | ||
117 | * component - Component type of caller | ||
118 | * module - Source file name of caller | ||
119 | * line - Line number of caller | ||
120 | * | ||
121 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
122 | * | ||
123 | * DESCRIPTION: The subsystem's equivalent of malloc. | ||
124 | * | ||
125 | ******************************************************************************/ | ||
126 | |||
127 | void *acpi_ut_allocate_and_track(acpi_size size, | ||
128 | u32 component, const char *module, u32 line) | ||
129 | { | ||
130 | struct acpi_debug_mem_block *allocation; | ||
131 | acpi_status status; | ||
132 | |||
133 | allocation = | ||
134 | acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header), | ||
135 | component, module, line); | ||
136 | if (!allocation) { | ||
137 | return (NULL); | ||
138 | } | ||
139 | |||
140 | status = acpi_ut_track_allocation(allocation, size, | ||
141 | ACPI_MEM_MALLOC, component, module, | ||
142 | line); | ||
143 | if (ACPI_FAILURE(status)) { | ||
144 | acpi_os_free(allocation); | ||
145 | return (NULL); | ||
146 | } | ||
147 | |||
148 | acpi_gbl_global_list->total_allocated++; | ||
149 | acpi_gbl_global_list->total_size += (u32)size; | ||
150 | acpi_gbl_global_list->current_total_size += (u32)size; | ||
151 | if (acpi_gbl_global_list->current_total_size > | ||
152 | acpi_gbl_global_list->max_occupied) { | ||
153 | acpi_gbl_global_list->max_occupied = | ||
154 | acpi_gbl_global_list->current_total_size; | ||
155 | } | ||
156 | |||
157 | return ((void *)&allocation->user_space); | ||
158 | } | ||
159 | |||
160 | /******************************************************************************* | ||
161 | * | ||
162 | * FUNCTION: acpi_ut_allocate_zeroed_and_track | ||
163 | * | ||
164 | * PARAMETERS: size - Size of the allocation | ||
165 | * component - Component type of caller | ||
166 | * module - Source file name of caller | ||
167 | * line - Line number of caller | ||
168 | * | ||
169 | * RETURN: Address of the allocated memory on success, NULL on failure. | ||
170 | * | ||
171 | * DESCRIPTION: Subsystem equivalent of calloc. | ||
172 | * | ||
173 | ******************************************************************************/ | ||
174 | |||
175 | void *acpi_ut_allocate_zeroed_and_track(acpi_size size, | ||
176 | u32 component, | ||
177 | const char *module, u32 line) | ||
178 | { | ||
179 | struct acpi_debug_mem_block *allocation; | ||
180 | acpi_status status; | ||
181 | |||
182 | allocation = | ||
183 | acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header), | ||
184 | component, module, line); | ||
185 | if (!allocation) { | ||
186 | |||
187 | /* Report allocation error */ | ||
188 | |||
189 | ACPI_ERROR((module, line, | ||
190 | "Could not allocate size %u", (u32)size)); | ||
191 | return (NULL); | ||
192 | } | ||
193 | |||
194 | status = acpi_ut_track_allocation(allocation, size, | ||
195 | ACPI_MEM_CALLOC, component, module, | ||
196 | line); | ||
197 | if (ACPI_FAILURE(status)) { | ||
198 | acpi_os_free(allocation); | ||
199 | return (NULL); | ||
200 | } | ||
201 | |||
202 | acpi_gbl_global_list->total_allocated++; | ||
203 | acpi_gbl_global_list->total_size += (u32)size; | ||
204 | acpi_gbl_global_list->current_total_size += (u32)size; | ||
205 | if (acpi_gbl_global_list->current_total_size > | ||
206 | acpi_gbl_global_list->max_occupied) { | ||
207 | acpi_gbl_global_list->max_occupied = | ||
208 | acpi_gbl_global_list->current_total_size; | ||
209 | } | ||
210 | |||
211 | return ((void *)&allocation->user_space); | ||
212 | } | ||
213 | |||
214 | /******************************************************************************* | ||
215 | * | ||
216 | * FUNCTION: acpi_ut_free_and_track | ||
217 | * | ||
218 | * PARAMETERS: allocation - Address of the memory to deallocate | ||
219 | * component - Component type of caller | ||
220 | * module - Source file name of caller | ||
221 | * line - Line number of caller | ||
222 | * | ||
223 | * RETURN: None | ||
224 | * | ||
225 | * DESCRIPTION: Frees the memory at Allocation | ||
226 | * | ||
227 | ******************************************************************************/ | ||
228 | |||
229 | void | ||
230 | acpi_ut_free_and_track(void *allocation, | ||
231 | u32 component, const char *module, u32 line) | ||
232 | { | ||
233 | struct acpi_debug_mem_block *debug_block; | ||
234 | acpi_status status; | ||
235 | |||
236 | ACPI_FUNCTION_TRACE_PTR(ut_free, allocation); | ||
237 | |||
238 | if (NULL == allocation) { | ||
239 | ACPI_ERROR((module, line, "Attempt to delete a NULL address")); | ||
240 | |||
241 | return_VOID; | ||
242 | } | ||
243 | |||
244 | debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block, | ||
245 | (((char *)allocation) - | ||
246 | sizeof(struct acpi_debug_mem_header))); | ||
247 | |||
248 | acpi_gbl_global_list->total_freed++; | ||
249 | acpi_gbl_global_list->current_total_size -= debug_block->size; | ||
250 | |||
251 | status = acpi_ut_remove_allocation(debug_block, | ||
252 | component, module, line); | ||
253 | if (ACPI_FAILURE(status)) { | ||
254 | ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); | ||
255 | } | ||
256 | |||
257 | acpi_os_free(debug_block); | ||
258 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation)); | ||
259 | return_VOID; | ||
260 | } | ||
261 | |||
262 | /******************************************************************************* | ||
263 | * | ||
264 | * FUNCTION: acpi_ut_find_allocation | ||
265 | * | ||
266 | * PARAMETERS: allocation - Address of allocated memory | ||
267 | * | ||
268 | * RETURN: Three cases: | ||
269 | * 1) List is empty, NULL is returned. | ||
270 | * 2) Element was found. Returns Allocation parameter. | ||
271 | * 3) Element was not found. Returns position where it should be | ||
272 | * inserted into the list. | ||
273 | * | ||
274 | * DESCRIPTION: Searches for an element in the global allocation tracking list. | ||
275 | * If the element is not found, returns the location within the | ||
276 | * list where the element should be inserted. | ||
277 | * | ||
278 | * Note: The list is ordered by larger-to-smaller addresses. | ||
279 | * | ||
280 | * This global list is used to detect memory leaks in ACPICA as | ||
281 | * well as other issues such as an attempt to release the same | ||
282 | * internal object more than once. Although expensive as far | ||
283 | * as cpu time, this list is much more helpful for finding these | ||
284 | * types of issues than using memory leak detectors outside of | ||
285 | * the ACPICA code. | ||
286 | * | ||
287 | ******************************************************************************/ | ||
288 | |||
289 | static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct | ||
290 | acpi_debug_mem_block | ||
291 | *allocation) | ||
292 | { | ||
293 | struct acpi_debug_mem_block *element; | ||
294 | |||
295 | element = acpi_gbl_global_list->list_head; | ||
296 | if (!element) { | ||
297 | return (NULL); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Search for the address. | ||
302 | * | ||
303 | * Note: List is ordered by larger-to-smaller addresses, on the | ||
304 | * assumption that a new allocation usually has a larger address | ||
305 | * than previous allocations. | ||
306 | */ | ||
307 | while (element > allocation) { | ||
308 | |||
309 | /* Check for end-of-list */ | ||
310 | |||
311 | if (!element->next) { | ||
312 | return (element); | ||
313 | } | ||
314 | |||
315 | element = element->next; | ||
316 | } | ||
317 | |||
318 | if (element == allocation) { | ||
319 | return (element); | ||
320 | } | ||
321 | |||
322 | return (element->previous); | ||
323 | } | ||
324 | |||
325 | /******************************************************************************* | ||
326 | * | ||
327 | * FUNCTION: acpi_ut_track_allocation | ||
328 | * | ||
329 | * PARAMETERS: allocation - Address of allocated memory | ||
330 | * size - Size of the allocation | ||
331 | * alloc_type - MEM_MALLOC or MEM_CALLOC | ||
332 | * component - Component type of caller | ||
333 | * module - Source file name of caller | ||
334 | * line - Line number of caller | ||
335 | * | ||
336 | * RETURN: Status | ||
337 | * | ||
338 | * DESCRIPTION: Inserts an element into the global allocation tracking list. | ||
339 | * | ||
340 | ******************************************************************************/ | ||
341 | |||
342 | static acpi_status | ||
343 | acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, | ||
344 | acpi_size size, | ||
345 | u8 alloc_type, | ||
346 | u32 component, const char *module, u32 line) | ||
347 | { | ||
348 | struct acpi_memory_list *mem_list; | ||
349 | struct acpi_debug_mem_block *element; | ||
350 | acpi_status status = AE_OK; | ||
351 | |||
352 | ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation); | ||
353 | |||
354 | if (acpi_gbl_disable_mem_tracking) { | ||
355 | return_ACPI_STATUS(AE_OK); | ||
356 | } | ||
357 | |||
358 | mem_list = acpi_gbl_global_list; | ||
359 | status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); | ||
360 | if (ACPI_FAILURE(status)) { | ||
361 | return_ACPI_STATUS(status); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Search the global list for this address to make sure it is not | ||
366 | * already present. This will catch several kinds of problems. | ||
367 | */ | ||
368 | element = acpi_ut_find_allocation(allocation); | ||
369 | if (element == allocation) { | ||
370 | ACPI_ERROR((AE_INFO, | ||
371 | "UtTrackAllocation: Allocation (%p) already present in global list!", | ||
372 | allocation)); | ||
373 | goto unlock_and_exit; | ||
374 | } | ||
375 | |||
376 | /* Fill in the instance data */ | ||
377 | |||
378 | allocation->size = (u32)size; | ||
379 | allocation->alloc_type = alloc_type; | ||
380 | allocation->component = component; | ||
381 | allocation->line = line; | ||
382 | |||
383 | ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME); | ||
384 | allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0; | ||
385 | |||
386 | if (!element) { | ||
387 | |||
388 | /* Insert at list head */ | ||
389 | |||
390 | if (mem_list->list_head) { | ||
391 | ((struct acpi_debug_mem_block *)(mem_list->list_head))-> | ||
392 | previous = allocation; | ||
393 | } | ||
394 | |||
395 | allocation->next = mem_list->list_head; | ||
396 | allocation->previous = NULL; | ||
397 | |||
398 | mem_list->list_head = allocation; | ||
399 | } else { | ||
400 | /* Insert after element */ | ||
401 | |||
402 | allocation->next = element->next; | ||
403 | allocation->previous = element; | ||
404 | |||
405 | if (element->next) { | ||
406 | (element->next)->previous = allocation; | ||
407 | } | ||
408 | |||
409 | element->next = allocation; | ||
410 | } | ||
411 | |||
412 | unlock_and_exit: | ||
413 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
414 | return_ACPI_STATUS(status); | ||
415 | } | ||
416 | |||
417 | /******************************************************************************* | ||
418 | * | ||
419 | * FUNCTION: acpi_ut_remove_allocation | ||
420 | * | ||
421 | * PARAMETERS: allocation - Address of allocated memory | ||
422 | * component - Component type of caller | ||
423 | * module - Source file name of caller | ||
424 | * line - Line number of caller | ||
425 | * | ||
426 | * RETURN: Status | ||
427 | * | ||
428 | * DESCRIPTION: Deletes an element from the global allocation tracking list. | ||
429 | * | ||
430 | ******************************************************************************/ | ||
431 | |||
432 | static acpi_status | ||
433 | acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation, | ||
434 | u32 component, const char *module, u32 line) | ||
435 | { | ||
436 | struct acpi_memory_list *mem_list; | ||
437 | acpi_status status; | ||
438 | |||
439 | ACPI_FUNCTION_TRACE(ut_remove_allocation); | ||
440 | |||
441 | if (acpi_gbl_disable_mem_tracking) { | ||
442 | return_ACPI_STATUS(AE_OK); | ||
443 | } | ||
444 | |||
445 | mem_list = acpi_gbl_global_list; | ||
446 | if (NULL == mem_list->list_head) { | ||
447 | |||
448 | /* No allocations! */ | ||
449 | |||
450 | ACPI_ERROR((module, line, | ||
451 | "Empty allocation list, nothing to free!")); | ||
452 | |||
453 | return_ACPI_STATUS(AE_OK); | ||
454 | } | ||
455 | |||
456 | status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); | ||
457 | if (ACPI_FAILURE(status)) { | ||
458 | return_ACPI_STATUS(status); | ||
459 | } | ||
460 | |||
461 | /* Unlink */ | ||
462 | |||
463 | if (allocation->previous) { | ||
464 | (allocation->previous)->next = allocation->next; | ||
465 | } else { | ||
466 | mem_list->list_head = allocation->next; | ||
467 | } | ||
468 | |||
469 | if (allocation->next) { | ||
470 | (allocation->next)->previous = allocation->previous; | ||
471 | } | ||
472 | |||
473 | /* Mark the segment as deleted */ | ||
474 | |||
475 | ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size); | ||
476 | |||
477 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n", | ||
478 | allocation->size)); | ||
479 | |||
480 | status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
481 | return_ACPI_STATUS(status); | ||
482 | } | ||
483 | |||
484 | /******************************************************************************* | ||
485 | * | ||
486 | * FUNCTION: acpi_ut_dump_allocation_info | ||
487 | * | ||
488 | * PARAMETERS: None | ||
489 | * | ||
490 | * RETURN: None | ||
491 | * | ||
492 | * DESCRIPTION: Print some info about the outstanding allocations. | ||
493 | * | ||
494 | ******************************************************************************/ | ||
495 | |||
496 | void acpi_ut_dump_allocation_info(void) | ||
497 | { | ||
498 | /* | ||
499 | struct acpi_memory_list *mem_list; | ||
500 | */ | ||
501 | |||
502 | ACPI_FUNCTION_TRACE(ut_dump_allocation_info); | ||
503 | |||
504 | /* | ||
505 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
506 | ("%30s: %4d (%3d Kb)\n", "Current allocations", | ||
507 | mem_list->current_count, | ||
508 | ROUND_UP_TO_1K (mem_list->current_size))); | ||
509 | |||
510 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
511 | ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", | ||
512 | mem_list->max_concurrent_count, | ||
513 | ROUND_UP_TO_1K (mem_list->max_concurrent_size))); | ||
514 | |||
515 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
516 | ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", | ||
517 | running_object_count, | ||
518 | ROUND_UP_TO_1K (running_object_size))); | ||
519 | |||
520 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
521 | ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", | ||
522 | running_alloc_count, | ||
523 | ROUND_UP_TO_1K (running_alloc_size))); | ||
524 | |||
525 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
526 | ("%30s: %4d (%3d Kb)\n", "Current Nodes", | ||
527 | acpi_gbl_current_node_count, | ||
528 | ROUND_UP_TO_1K (acpi_gbl_current_node_size))); | ||
529 | |||
530 | ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, | ||
531 | ("%30s: %4d (%3d Kb)\n", "Max Nodes", | ||
532 | acpi_gbl_max_concurrent_node_count, | ||
533 | ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * | ||
534 | sizeof (struct acpi_namespace_node))))); | ||
535 | */ | ||
536 | return_VOID; | ||
537 | } | ||
538 | |||
539 | /******************************************************************************* | ||
540 | * | ||
541 | * FUNCTION: acpi_ut_dump_allocations | ||
542 | * | ||
543 | * PARAMETERS: component - Component(s) to dump info for. | ||
544 | * module - Module to dump info for. NULL means all. | ||
545 | * | ||
546 | * RETURN: None | ||
547 | * | ||
548 | * DESCRIPTION: Print a list of all outstanding allocations. | ||
549 | * | ||
550 | ******************************************************************************/ | ||
551 | |||
552 | void acpi_ut_dump_allocations(u32 component, const char *module) | ||
553 | { | ||
554 | struct acpi_debug_mem_block *element; | ||
555 | union acpi_descriptor *descriptor; | ||
556 | u32 num_outstanding = 0; | ||
557 | u8 descriptor_type; | ||
558 | |||
559 | ACPI_FUNCTION_TRACE(ut_dump_allocations); | ||
560 | |||
561 | if (acpi_gbl_disable_mem_tracking) { | ||
562 | return_VOID; | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * Walk the allocation list. | ||
567 | */ | ||
568 | if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) { | ||
569 | return_VOID; | ||
570 | } | ||
571 | |||
572 | element = acpi_gbl_global_list->list_head; | ||
573 | while (element) { | ||
574 | if ((element->component & component) && | ||
575 | ((module == NULL) | ||
576 | || (0 == ACPI_STRCMP(module, element->module)))) { | ||
577 | descriptor = | ||
578 | ACPI_CAST_PTR(union acpi_descriptor, | ||
579 | &element->user_space); | ||
580 | |||
581 | if (element->size < | ||
582 | sizeof(struct acpi_common_descriptor)) { | ||
583 | acpi_os_printf("%p Length 0x%04X %9.9s-%u " | ||
584 | "[Not a Descriptor - too small]\n", | ||
585 | descriptor, element->size, | ||
586 | element->module, element->line); | ||
587 | } else { | ||
588 | /* Ignore allocated objects that are in a cache */ | ||
589 | |||
590 | if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != | ||
591 | ACPI_DESC_TYPE_CACHED) { | ||
592 | acpi_os_printf | ||
593 | ("%p Length 0x%04X %9.9s-%u [%s] ", | ||
594 | descriptor, element->size, | ||
595 | element->module, element->line, | ||
596 | acpi_ut_get_descriptor_name | ||
597 | (descriptor)); | ||
598 | |||
599 | /* Validate the descriptor type using Type field and length */ | ||
600 | |||
601 | descriptor_type = 0; /* Not a valid descriptor type */ | ||
602 | |||
603 | switch (ACPI_GET_DESCRIPTOR_TYPE | ||
604 | (descriptor)) { | ||
605 | case ACPI_DESC_TYPE_OPERAND: | ||
606 | if (element->size == | ||
607 | sizeof(union | ||
608 | acpi_operand_object)) | ||
609 | { | ||
610 | descriptor_type = | ||
611 | ACPI_DESC_TYPE_OPERAND; | ||
612 | } | ||
613 | break; | ||
614 | |||
615 | case ACPI_DESC_TYPE_PARSER: | ||
616 | if (element->size == | ||
617 | sizeof(union | ||
618 | acpi_parse_object)) { | ||
619 | descriptor_type = | ||
620 | ACPI_DESC_TYPE_PARSER; | ||
621 | } | ||
622 | break; | ||
623 | |||
624 | case ACPI_DESC_TYPE_NAMED: | ||
625 | if (element->size == | ||
626 | sizeof(struct | ||
627 | acpi_namespace_node)) | ||
628 | { | ||
629 | descriptor_type = | ||
630 | ACPI_DESC_TYPE_NAMED; | ||
631 | } | ||
632 | break; | ||
633 | |||
634 | default: | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | /* Display additional info for the major descriptor types */ | ||
639 | |||
640 | switch (descriptor_type) { | ||
641 | case ACPI_DESC_TYPE_OPERAND: | ||
642 | acpi_os_printf | ||
643 | ("%12.12s RefCount 0x%04X\n", | ||
644 | acpi_ut_get_type_name | ||
645 | (descriptor->object.common. | ||
646 | type), | ||
647 | descriptor->object.common. | ||
648 | reference_count); | ||
649 | break; | ||
650 | |||
651 | case ACPI_DESC_TYPE_PARSER: | ||
652 | acpi_os_printf | ||
653 | ("AmlOpcode 0x%04hX\n", | ||
654 | descriptor->op.asl. | ||
655 | aml_opcode); | ||
656 | break; | ||
657 | |||
658 | case ACPI_DESC_TYPE_NAMED: | ||
659 | acpi_os_printf("%4.4s\n", | ||
660 | acpi_ut_get_node_name | ||
661 | (&descriptor-> | ||
662 | node)); | ||
663 | break; | ||
664 | |||
665 | default: | ||
666 | acpi_os_printf("\n"); | ||
667 | break; | ||
668 | } | ||
669 | } | ||
670 | } | ||
671 | |||
672 | num_outstanding++; | ||
673 | } | ||
674 | |||
675 | element = element->next; | ||
676 | } | ||
677 | |||
678 | (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); | ||
679 | |||
680 | /* Print summary */ | ||
681 | |||
682 | if (!num_outstanding) { | ||
683 | ACPI_INFO((AE_INFO, "No outstanding allocations")); | ||
684 | } else { | ||
685 | ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations", | ||
686 | num_outstanding, num_outstanding)); | ||
687 | } | ||
688 | |||
689 | return_VOID; | ||
690 | } | ||
691 | |||
692 | #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ | ||
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index b09632b4f5b3..390db0ca5e2e 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c | |||
@@ -147,7 +147,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status) | |||
147 | * RETURN: status - the status of the call | 147 | * RETURN: status - the status of the call |
148 | * | 148 | * |
149 | * DESCRIPTION: This function is called to get information about the current | 149 | * DESCRIPTION: This function is called to get information about the current |
150 | * state of the ACPI subsystem. It will return system information | 150 | * state of the ACPI subsystem. It will return system information |
151 | * in the out_buffer. | 151 | * in the out_buffer. |
152 | * | 152 | * |
153 | * If the function fails an appropriate status will be returned | 153 | * If the function fails an appropriate status will be returned |
@@ -238,7 +238,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | acpi_gbl_init_handler = handler; | 240 | acpi_gbl_init_handler = handler; |
241 | return AE_OK; | 241 | return (AE_OK); |
242 | } | 242 | } |
243 | 243 | ||
244 | ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) | 244 | ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) |
@@ -263,6 +263,7 @@ acpi_status acpi_purge_cached_objects(void) | |||
263 | (void)acpi_os_purge_cache(acpi_gbl_operand_cache); | 263 | (void)acpi_os_purge_cache(acpi_gbl_operand_cache); |
264 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache); | 264 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache); |
265 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache); | 265 | (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache); |
266 | |||
266 | return_ACPI_STATUS(AE_OK); | 267 | return_ACPI_STATUS(AE_OK); |
267 | } | 268 | } |
268 | 269 | ||
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 6d63cc39b9ae..d4d3826140d8 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c | |||
@@ -408,7 +408,7 @@ acpi_ut_namespace_error(const char *module_name, | |||
408 | 408 | ||
409 | ACPI_MOVE_32_TO_32(&bad_name, | 409 | ACPI_MOVE_32_TO_32(&bad_name, |
410 | ACPI_CAST_PTR(u32, internal_name)); | 410 | ACPI_CAST_PTR(u32, internal_name)); |
411 | acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name); | 411 | acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name); |
412 | } else { | 412 | } else { |
413 | /* Convert path to external format */ | 413 | /* Convert path to external format */ |
414 | 414 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d59175efc428..1f0d457ecbcf 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -257,7 +257,15 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | 259 | ||
260 | static int __acpi_bus_set_power(struct acpi_device *device, int state) | 260 | /** |
261 | * acpi_device_set_power - Set power state of an ACPI device. | ||
262 | * @device: Device to set the power state of. | ||
263 | * @state: New power state to set. | ||
264 | * | ||
265 | * Callers must ensure that the device is power manageable before using this | ||
266 | * function. | ||
267 | */ | ||
268 | int acpi_device_set_power(struct acpi_device *device, int state) | ||
261 | { | 269 | { |
262 | int result = 0; | 270 | int result = 0; |
263 | acpi_status status = AE_OK; | 271 | acpi_status status = AE_OK; |
@@ -298,6 +306,12 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
298 | * a lower-powered state. | 306 | * a lower-powered state. |
299 | */ | 307 | */ |
300 | if (state < device->power.state) { | 308 | if (state < device->power.state) { |
309 | if (device->power.state >= ACPI_STATE_D3_HOT && | ||
310 | state != ACPI_STATE_D0) { | ||
311 | printk(KERN_WARNING PREFIX | ||
312 | "Cannot transition to non-D0 state from D3\n"); | ||
313 | return -ENODEV; | ||
314 | } | ||
301 | if (device->power.flags.power_resources) { | 315 | if (device->power.flags.power_resources) { |
302 | result = acpi_power_transition(device, state); | 316 | result = acpi_power_transition(device, state); |
303 | if (result) | 317 | if (result) |
@@ -341,6 +355,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
341 | 355 | ||
342 | return result; | 356 | return result; |
343 | } | 357 | } |
358 | EXPORT_SYMBOL(acpi_device_set_power); | ||
344 | 359 | ||
345 | 360 | ||
346 | int acpi_bus_set_power(acpi_handle handle, int state) | 361 | int acpi_bus_set_power(acpi_handle handle, int state) |
@@ -359,7 +374,7 @@ int acpi_bus_set_power(acpi_handle handle, int state) | |||
359 | return -ENODEV; | 374 | return -ENODEV; |
360 | } | 375 | } |
361 | 376 | ||
362 | return __acpi_bus_set_power(device, state); | 377 | return acpi_device_set_power(device, state); |
363 | } | 378 | } |
364 | EXPORT_SYMBOL(acpi_bus_set_power); | 379 | EXPORT_SYMBOL(acpi_bus_set_power); |
365 | 380 | ||
@@ -402,7 +417,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p) | |||
402 | if (result) | 417 | if (result) |
403 | return result; | 418 | return result; |
404 | 419 | ||
405 | result = __acpi_bus_set_power(device, state); | 420 | result = acpi_device_set_power(device, state); |
406 | if (!result && state_p) | 421 | if (!result && state_p) |
407 | *state_p = state; | 422 | *state_p = state; |
408 | 423 | ||
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c new file mode 100644 index 000000000000..f09dc987cf17 --- /dev/null +++ b/drivers/acpi/device_pm.c | |||
@@ -0,0 +1,668 @@ | |||
1 | /* | ||
2 | * drivers/acpi/device_pm.c - ACPI device power management routines. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corp. | ||
5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | */ | ||
24 | |||
25 | #include <linux/device.h> | ||
26 | #include <linux/export.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/pm_qos.h> | ||
29 | #include <linux/pm_runtime.h> | ||
30 | |||
31 | #include <acpi/acpi.h> | ||
32 | #include <acpi/acpi_bus.h> | ||
33 | |||
34 | static DEFINE_MUTEX(acpi_pm_notifier_lock); | ||
35 | |||
36 | /** | ||
37 | * acpi_add_pm_notifier - Register PM notifier for given ACPI device. | ||
38 | * @adev: ACPI device to add the notifier for. | ||
39 | * @context: Context information to pass to the notifier routine. | ||
40 | * | ||
41 | * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of | ||
42 | * PM wakeup events. For example, wakeup events may be generated for bridges | ||
43 | * if one of the devices below the bridge is signaling wakeup, even if the | ||
44 | * bridge itself doesn't have a wakeup GPE associated with it. | ||
45 | */ | ||
46 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, | ||
47 | acpi_notify_handler handler, void *context) | ||
48 | { | ||
49 | acpi_status status = AE_ALREADY_EXISTS; | ||
50 | |||
51 | mutex_lock(&acpi_pm_notifier_lock); | ||
52 | |||
53 | if (adev->wakeup.flags.notifier_present) | ||
54 | goto out; | ||
55 | |||
56 | status = acpi_install_notify_handler(adev->handle, | ||
57 | ACPI_SYSTEM_NOTIFY, | ||
58 | handler, context); | ||
59 | if (ACPI_FAILURE(status)) | ||
60 | goto out; | ||
61 | |||
62 | adev->wakeup.flags.notifier_present = true; | ||
63 | |||
64 | out: | ||
65 | mutex_unlock(&acpi_pm_notifier_lock); | ||
66 | return status; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
71 | * @adev: ACPI device to remove the notifier from. | ||
72 | */ | ||
73 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
74 | acpi_notify_handler handler) | ||
75 | { | ||
76 | acpi_status status = AE_BAD_PARAMETER; | ||
77 | |||
78 | mutex_lock(&acpi_pm_notifier_lock); | ||
79 | |||
80 | if (!adev->wakeup.flags.notifier_present) | ||
81 | goto out; | ||
82 | |||
83 | status = acpi_remove_notify_handler(adev->handle, | ||
84 | ACPI_SYSTEM_NOTIFY, | ||
85 | handler); | ||
86 | if (ACPI_FAILURE(status)) | ||
87 | goto out; | ||
88 | |||
89 | adev->wakeup.flags.notifier_present = false; | ||
90 | |||
91 | out: | ||
92 | mutex_unlock(&acpi_pm_notifier_lock); | ||
93 | return status; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * acpi_device_power_state - Get preferred power state of ACPI device. | ||
98 | * @dev: Device whose preferred target power state to return. | ||
99 | * @adev: ACPI device node corresponding to @dev. | ||
100 | * @target_state: System state to match the resultant device state. | ||
101 | * @d_max_in: Deepest low-power state to take into consideration. | ||
102 | * @d_min_p: Location to store the upper limit of the allowed states range. | ||
103 | * Return value: Preferred power state of the device on success, -ENODEV | ||
104 | * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
105 | * | ||
106 | * Find the lowest power (highest number) ACPI device power state that the | ||
107 | * device can be in while the system is in the state represented by | ||
108 | * @target_state. If @d_min_p is set, the highest power (lowest number) device | ||
109 | * power state that @dev can be in for the given system sleep state is stored | ||
110 | * at the location pointed to by it. | ||
111 | * | ||
112 | * Callers must ensure that @dev and @adev are valid pointers and that @adev | ||
113 | * actually corresponds to @dev before using this function. | ||
114 | */ | ||
115 | int acpi_device_power_state(struct device *dev, struct acpi_device *adev, | ||
116 | u32 target_state, int d_max_in, int *d_min_p) | ||
117 | { | ||
118 | char acpi_method[] = "_SxD"; | ||
119 | unsigned long long d_min, d_max; | ||
120 | bool wakeup = false; | ||
121 | |||
122 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) | ||
123 | return -EINVAL; | ||
124 | |||
125 | if (d_max_in > ACPI_STATE_D3_HOT) { | ||
126 | enum pm_qos_flags_status stat; | ||
127 | |||
128 | stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); | ||
129 | if (stat == PM_QOS_FLAGS_ALL) | ||
130 | d_max_in = ACPI_STATE_D3_HOT; | ||
131 | } | ||
132 | |||
133 | acpi_method[2] = '0' + target_state; | ||
134 | /* | ||
135 | * If the sleep state is S0, the lowest limit from ACPI is D3, | ||
136 | * but if the device has _S0W, we will use the value from _S0W | ||
137 | * as the lowest limit from ACPI. Finally, we will constrain | ||
138 | * the lowest limit with the specified one. | ||
139 | */ | ||
140 | d_min = ACPI_STATE_D0; | ||
141 | d_max = ACPI_STATE_D3; | ||
142 | |||
143 | /* | ||
144 | * If present, _SxD methods return the minimum D-state (highest power | ||
145 | * state) we can use for the corresponding S-states. Otherwise, the | ||
146 | * minimum D-state is D0 (ACPI 3.x). | ||
147 | * | ||
148 | * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer | ||
149 | * provided -- that's our fault recovery, we ignore retval. | ||
150 | */ | ||
151 | if (target_state > ACPI_STATE_S0) { | ||
152 | acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min); | ||
153 | wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid | ||
154 | && adev->wakeup.sleep_state >= target_state; | ||
155 | } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) != | ||
156 | PM_QOS_FLAGS_NONE) { | ||
157 | wakeup = adev->wakeup.flags.valid; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * If _PRW says we can wake up the system from the target sleep state, | ||
162 | * the D-state returned by _SxD is sufficient for that (we assume a | ||
163 | * wakeup-aware driver if wake is set). Still, if _SxW exists | ||
164 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that | ||
165 | * can wake the system. _S0W may be valid, too. | ||
166 | */ | ||
167 | if (wakeup) { | ||
168 | acpi_status status; | ||
169 | |||
170 | acpi_method[3] = 'W'; | ||
171 | status = acpi_evaluate_integer(adev->handle, acpi_method, NULL, | ||
172 | &d_max); | ||
173 | if (ACPI_FAILURE(status)) { | ||
174 | if (target_state != ACPI_STATE_S0 || | ||
175 | status != AE_NOT_FOUND) | ||
176 | d_max = d_min; | ||
177 | } else if (d_max < d_min) { | ||
178 | /* Warn the user of the broken DSDT */ | ||
179 | printk(KERN_WARNING "ACPI: Wrong value from %s\n", | ||
180 | acpi_method); | ||
181 | /* Sanitize it */ | ||
182 | d_min = d_max; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | if (d_max_in < d_min) | ||
187 | return -EINVAL; | ||
188 | if (d_min_p) | ||
189 | *d_min_p = d_min; | ||
190 | /* constrain d_max with specified lowest limit (max number) */ | ||
191 | if (d_max > d_max_in) { | ||
192 | for (d_max = d_max_in; d_max > d_min; d_max--) { | ||
193 | if (adev->power.states[d_max].flags.valid) | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | return d_max; | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(acpi_device_power_state); | ||
200 | |||
201 | /** | ||
202 | * acpi_pm_device_sleep_state - Get preferred power state of ACPI device. | ||
203 | * @dev: Device whose preferred target power state to return. | ||
204 | * @d_min_p: Location to store the upper limit of the allowed states range. | ||
205 | * @d_max_in: Deepest low-power state to take into consideration. | ||
206 | * Return value: Preferred power state of the device on success, -ENODEV | ||
207 | * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
208 | * | ||
209 | * The caller must ensure that @dev is valid before using this function. | ||
210 | */ | ||
211 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | ||
212 | { | ||
213 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
214 | struct acpi_device *adev; | ||
215 | |||
216 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
217 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | return acpi_device_power_state(dev, adev, acpi_target_system_state(), | ||
222 | d_max_in, d_min_p); | ||
223 | } | ||
224 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); | ||
225 | |||
226 | #ifdef CONFIG_PM_RUNTIME | ||
227 | /** | ||
228 | * acpi_wakeup_device - Wakeup notification handler for ACPI devices. | ||
229 | * @handle: ACPI handle of the device the notification is for. | ||
230 | * @event: Type of the signaled event. | ||
231 | * @context: Device corresponding to @handle. | ||
232 | */ | ||
233 | static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context) | ||
234 | { | ||
235 | struct device *dev = context; | ||
236 | |||
237 | if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) { | ||
238 | pm_wakeup_event(dev, 0); | ||
239 | pm_runtime_resume(dev); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device. | ||
245 | * @adev: ACPI device to enable/disable the remote wakeup for. | ||
246 | * @enable: Whether to enable or disable the wakeup functionality. | ||
247 | * | ||
248 | * Enable/disable the GPE associated with @adev so that it can generate | ||
249 | * wakeup signals for the device in response to external (remote) events and | ||
250 | * enable/disable device wakeup power. | ||
251 | * | ||
252 | * Callers must ensure that @adev is a valid ACPI device node before executing | ||
253 | * this function. | ||
254 | */ | ||
255 | int __acpi_device_run_wake(struct acpi_device *adev, bool enable) | ||
256 | { | ||
257 | struct acpi_device_wakeup *wakeup = &adev->wakeup; | ||
258 | |||
259 | if (enable) { | ||
260 | acpi_status res; | ||
261 | int error; | ||
262 | |||
263 | error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0); | ||
264 | if (error) | ||
265 | return error; | ||
266 | |||
267 | res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); | ||
268 | if (ACPI_FAILURE(res)) { | ||
269 | acpi_disable_wakeup_device_power(adev); | ||
270 | return -EIO; | ||
271 | } | ||
272 | } else { | ||
273 | acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); | ||
274 | acpi_disable_wakeup_device_power(adev); | ||
275 | } | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. | ||
281 | * @dev: Device to enable/disable the platform to wake up. | ||
282 | * @enable: Whether to enable or disable the wakeup functionality. | ||
283 | */ | ||
284 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | ||
285 | { | ||
286 | struct acpi_device *adev; | ||
287 | acpi_handle handle; | ||
288 | |||
289 | if (!device_run_wake(phys_dev)) | ||
290 | return -EINVAL; | ||
291 | |||
292 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
293 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
294 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", | ||
295 | __func__); | ||
296 | return -ENODEV; | ||
297 | } | ||
298 | |||
299 | return __acpi_device_run_wake(adev, enable); | ||
300 | } | ||
301 | EXPORT_SYMBOL(acpi_pm_device_run_wake); | ||
302 | #else | ||
303 | static inline void acpi_wakeup_device(acpi_handle handle, u32 event, | ||
304 | void *context) {} | ||
305 | #endif /* CONFIG_PM_RUNTIME */ | ||
306 | |||
307 | #ifdef CONFIG_PM_SLEEP | ||
308 | /** | ||
309 | * __acpi_device_sleep_wake - Enable or disable device to wake up the system. | ||
310 | * @dev: Device to enable/desible to wake up the system. | ||
311 | * @target_state: System state the device is supposed to wake up from. | ||
312 | * @enable: Whether to enable or disable @dev to wake up the system. | ||
313 | */ | ||
314 | int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state, | ||
315 | bool enable) | ||
316 | { | ||
317 | return enable ? | ||
318 | acpi_enable_wakeup_device_power(adev, target_state) : | ||
319 | acpi_disable_wakeup_device_power(adev); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system. | ||
324 | * @dev: Device to enable/desible to wake up the system from sleep states. | ||
325 | * @enable: Whether to enable or disable @dev to wake up the system. | ||
326 | */ | ||
327 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | ||
328 | { | ||
329 | acpi_handle handle; | ||
330 | struct acpi_device *adev; | ||
331 | int error; | ||
332 | |||
333 | if (!device_can_wakeup(dev)) | ||
334 | return -EINVAL; | ||
335 | |||
336 | handle = DEVICE_ACPI_HANDLE(dev); | ||
337 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
338 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | ||
339 | return -ENODEV; | ||
340 | } | ||
341 | |||
342 | error = __acpi_device_sleep_wake(adev, acpi_target_system_state(), | ||
343 | enable); | ||
344 | if (!error) | ||
345 | dev_info(dev, "System wakeup %s by ACPI\n", | ||
346 | enable ? "enabled" : "disabled"); | ||
347 | |||
348 | return error; | ||
349 | } | ||
350 | #endif /* CONFIG_PM_SLEEP */ | ||
351 | |||
352 | /** | ||
353 | * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. | ||
354 | * @dev: Device to get the ACPI node for. | ||
355 | */ | ||
356 | static struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
357 | { | ||
358 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
359 | struct acpi_device *adev; | ||
360 | |||
361 | return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ? | ||
362 | adev : NULL; | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * acpi_dev_pm_low_power - Put ACPI device into a low-power state. | ||
367 | * @dev: Device to put into a low-power state. | ||
368 | * @adev: ACPI device node corresponding to @dev. | ||
369 | * @system_state: System state to choose the device state for. | ||
370 | */ | ||
371 | static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, | ||
372 | u32 system_state) | ||
373 | { | ||
374 | int power_state; | ||
375 | |||
376 | if (!acpi_device_power_manageable(adev)) | ||
377 | return 0; | ||
378 | |||
379 | power_state = acpi_device_power_state(dev, adev, system_state, | ||
380 | ACPI_STATE_D3, NULL); | ||
381 | if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3) | ||
382 | return -EIO; | ||
383 | |||
384 | return acpi_device_set_power(adev, power_state); | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * acpi_dev_pm_full_power - Put ACPI device into the full-power state. | ||
389 | * @adev: ACPI device node to put into the full-power state. | ||
390 | */ | ||
391 | static int acpi_dev_pm_full_power(struct acpi_device *adev) | ||
392 | { | ||
393 | return acpi_device_power_manageable(adev) ? | ||
394 | acpi_device_set_power(adev, ACPI_STATE_D0) : 0; | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_PM_RUNTIME | ||
398 | /** | ||
399 | * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. | ||
400 | * @dev: Device to put into a low-power state. | ||
401 | * | ||
402 | * Put the given device into a runtime low-power state using the standard ACPI | ||
403 | * mechanism. Set up remote wakeup if desired, choose the state to put the | ||
404 | * device into (this checks if remote wakeup is expected to work too), and set | ||
405 | * the power state of the device. | ||
406 | */ | ||
407 | int acpi_dev_runtime_suspend(struct device *dev) | ||
408 | { | ||
409 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
410 | bool remote_wakeup; | ||
411 | int error; | ||
412 | |||
413 | if (!adev) | ||
414 | return 0; | ||
415 | |||
416 | remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > | ||
417 | PM_QOS_FLAGS_NONE; | ||
418 | error = __acpi_device_run_wake(adev, remote_wakeup); | ||
419 | if (remote_wakeup && error) | ||
420 | return -EAGAIN; | ||
421 | |||
422 | error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
423 | if (error) | ||
424 | __acpi_device_run_wake(adev, false); | ||
425 | |||
426 | return error; | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); | ||
429 | |||
430 | /** | ||
431 | * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. | ||
432 | * @dev: Device to put into the full-power state. | ||
433 | * | ||
434 | * Put the given device into the full-power state using the standard ACPI | ||
435 | * mechanism at run time. Set the power state of the device to ACPI D0 and | ||
436 | * disable remote wakeup. | ||
437 | */ | ||
438 | int acpi_dev_runtime_resume(struct device *dev) | ||
439 | { | ||
440 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
441 | int error; | ||
442 | |||
443 | if (!adev) | ||
444 | return 0; | ||
445 | |||
446 | error = acpi_dev_pm_full_power(adev); | ||
447 | __acpi_device_run_wake(adev, false); | ||
448 | return error; | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); | ||
451 | |||
452 | /** | ||
453 | * acpi_subsys_runtime_suspend - Suspend device using ACPI. | ||
454 | * @dev: Device to suspend. | ||
455 | * | ||
456 | * Carry out the generic runtime suspend procedure for @dev and use ACPI to put | ||
457 | * it into a runtime low-power state. | ||
458 | */ | ||
459 | int acpi_subsys_runtime_suspend(struct device *dev) | ||
460 | { | ||
461 | int ret = pm_generic_runtime_suspend(dev); | ||
462 | return ret ? ret : acpi_dev_runtime_suspend(dev); | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); | ||
465 | |||
466 | /** | ||
467 | * acpi_subsys_runtime_resume - Resume device using ACPI. | ||
468 | * @dev: Device to Resume. | ||
469 | * | ||
470 | * Use ACPI to put the given device into the full-power state and carry out the | ||
471 | * generic runtime resume procedure for it. | ||
472 | */ | ||
473 | int acpi_subsys_runtime_resume(struct device *dev) | ||
474 | { | ||
475 | int ret = acpi_dev_runtime_resume(dev); | ||
476 | return ret ? ret : pm_generic_runtime_resume(dev); | ||
477 | } | ||
478 | EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); | ||
479 | #endif /* CONFIG_PM_RUNTIME */ | ||
480 | |||
481 | #ifdef CONFIG_PM_SLEEP | ||
482 | /** | ||
483 | * acpi_dev_suspend_late - Put device into a low-power state using ACPI. | ||
484 | * @dev: Device to put into a low-power state. | ||
485 | * | ||
486 | * Put the given device into a low-power state during system transition to a | ||
487 | * sleep state using the standard ACPI mechanism. Set up system wakeup if | ||
488 | * desired, choose the state to put the device into (this checks if system | ||
489 | * wakeup is expected to work too), and set the power state of the device. | ||
490 | */ | ||
491 | int acpi_dev_suspend_late(struct device *dev) | ||
492 | { | ||
493 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
494 | u32 target_state; | ||
495 | bool wakeup; | ||
496 | int error; | ||
497 | |||
498 | if (!adev) | ||
499 | return 0; | ||
500 | |||
501 | target_state = acpi_target_system_state(); | ||
502 | wakeup = device_may_wakeup(dev); | ||
503 | error = __acpi_device_sleep_wake(adev, target_state, wakeup); | ||
504 | if (wakeup && error) | ||
505 | return error; | ||
506 | |||
507 | error = acpi_dev_pm_low_power(dev, adev, target_state); | ||
508 | if (error) | ||
509 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | ||
510 | |||
511 | return error; | ||
512 | } | ||
513 | EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); | ||
514 | |||
515 | /** | ||
516 | * acpi_dev_resume_early - Put device into the full-power state using ACPI. | ||
517 | * @dev: Device to put into the full-power state. | ||
518 | * | ||
519 | * Put the given device into the full-power state using the standard ACPI | ||
520 | * mechanism during system transition to the working state. Set the power | ||
521 | * state of the device to ACPI D0 and disable remote wakeup. | ||
522 | */ | ||
523 | int acpi_dev_resume_early(struct device *dev) | ||
524 | { | ||
525 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
526 | int error; | ||
527 | |||
528 | if (!adev) | ||
529 | return 0; | ||
530 | |||
531 | error = acpi_dev_pm_full_power(adev); | ||
532 | __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); | ||
533 | return error; | ||
534 | } | ||
535 | EXPORT_SYMBOL_GPL(acpi_dev_resume_early); | ||
536 | |||
537 | /** | ||
538 | * acpi_subsys_prepare - Prepare device for system transition to a sleep state. | ||
539 | * @dev: Device to prepare. | ||
540 | */ | ||
541 | int acpi_subsys_prepare(struct device *dev) | ||
542 | { | ||
543 | /* | ||
544 | * Follow PCI and resume devices suspended at run time before running | ||
545 | * their system suspend callbacks. | ||
546 | */ | ||
547 | pm_runtime_resume(dev); | ||
548 | return pm_generic_prepare(dev); | ||
549 | } | ||
550 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); | ||
551 | |||
552 | /** | ||
553 | * acpi_subsys_suspend_late - Suspend device using ACPI. | ||
554 | * @dev: Device to suspend. | ||
555 | * | ||
556 | * Carry out the generic late suspend procedure for @dev and use ACPI to put | ||
557 | * it into a low-power state during system transition into a sleep state. | ||
558 | */ | ||
559 | int acpi_subsys_suspend_late(struct device *dev) | ||
560 | { | ||
561 | int ret = pm_generic_suspend_late(dev); | ||
562 | return ret ? ret : acpi_dev_suspend_late(dev); | ||
563 | } | ||
564 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); | ||
565 | |||
566 | /** | ||
567 | * acpi_subsys_resume_early - Resume device using ACPI. | ||
568 | * @dev: Device to Resume. | ||
569 | * | ||
570 | * Use ACPI to put the given device into the full-power state and carry out the | ||
571 | * generic early resume procedure for it during system transition into the | ||
572 | * working state. | ||
573 | */ | ||
574 | int acpi_subsys_resume_early(struct device *dev) | ||
575 | { | ||
576 | int ret = acpi_dev_resume_early(dev); | ||
577 | return ret ? ret : pm_generic_resume_early(dev); | ||
578 | } | ||
579 | EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); | ||
580 | #endif /* CONFIG_PM_SLEEP */ | ||
581 | |||
582 | static struct dev_pm_domain acpi_general_pm_domain = { | ||
583 | .ops = { | ||
584 | #ifdef CONFIG_PM_RUNTIME | ||
585 | .runtime_suspend = acpi_subsys_runtime_suspend, | ||
586 | .runtime_resume = acpi_subsys_runtime_resume, | ||
587 | .runtime_idle = pm_generic_runtime_idle, | ||
588 | #endif | ||
589 | #ifdef CONFIG_PM_SLEEP | ||
590 | .prepare = acpi_subsys_prepare, | ||
591 | .suspend_late = acpi_subsys_suspend_late, | ||
592 | .resume_early = acpi_subsys_resume_early, | ||
593 | .poweroff_late = acpi_subsys_suspend_late, | ||
594 | .restore_early = acpi_subsys_resume_early, | ||
595 | #endif | ||
596 | }, | ||
597 | }; | ||
598 | |||
599 | /** | ||
600 | * acpi_dev_pm_attach - Prepare device for ACPI power management. | ||
601 | * @dev: Device to prepare. | ||
602 | * @power_on: Whether or not to power on the device. | ||
603 | * | ||
604 | * If @dev has a valid ACPI handle that has a valid struct acpi_device object | ||
605 | * attached to it, install a wakeup notification handler for the device and | ||
606 | * add it to the general ACPI PM domain. If @power_on is set, the device will | ||
607 | * be put into the ACPI D0 state before the function returns. | ||
608 | * | ||
609 | * This assumes that the @dev's bus type uses generic power management callbacks | ||
610 | * (or doesn't use any power management callbacks at all). | ||
611 | * | ||
612 | * Callers must ensure proper synchronization of this function with power | ||
613 | * management callbacks. | ||
614 | */ | ||
615 | int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
616 | { | ||
617 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
618 | |||
619 | if (!adev) | ||
620 | return -ENODEV; | ||
621 | |||
622 | if (dev->pm_domain) | ||
623 | return -EEXIST; | ||
624 | |||
625 | acpi_add_pm_notifier(adev, acpi_wakeup_device, dev); | ||
626 | dev->pm_domain = &acpi_general_pm_domain; | ||
627 | if (power_on) { | ||
628 | acpi_dev_pm_full_power(adev); | ||
629 | __acpi_device_run_wake(adev, false); | ||
630 | } | ||
631 | return 0; | ||
632 | } | ||
633 | EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); | ||
634 | |||
635 | /** | ||
636 | * acpi_dev_pm_detach - Remove ACPI power management from the device. | ||
637 | * @dev: Device to take care of. | ||
638 | * @power_off: Whether or not to try to remove power from the device. | ||
639 | * | ||
640 | * Remove the device from the general ACPI PM domain and remove its wakeup | ||
641 | * notifier. If @power_off is set, additionally remove power from the device if | ||
642 | * possible. | ||
643 | * | ||
644 | * Callers must ensure proper synchronization of this function with power | ||
645 | * management callbacks. | ||
646 | */ | ||
647 | void acpi_dev_pm_detach(struct device *dev, bool power_off) | ||
648 | { | ||
649 | struct acpi_device *adev = acpi_dev_pm_get_node(dev); | ||
650 | |||
651 | if (adev && dev->pm_domain == &acpi_general_pm_domain) { | ||
652 | dev->pm_domain = NULL; | ||
653 | acpi_remove_pm_notifier(adev, acpi_wakeup_device); | ||
654 | if (power_off) { | ||
655 | /* | ||
656 | * If the device's PM QoS resume latency limit or flags | ||
657 | * have been exposed to user space, they have to be | ||
658 | * hidden at this point, so that they don't affect the | ||
659 | * choice of the low-power state to put the device into. | ||
660 | */ | ||
661 | dev_pm_qos_hide_latency_limit(dev); | ||
662 | dev_pm_qos_hide_flags(dev); | ||
663 | __acpi_device_run_wake(adev, false); | ||
664 | acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); | ||
665 | } | ||
666 | } | ||
667 | } | ||
668 | EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); | ||
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 08373086cd7e..01551840d236 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -130,46 +130,59 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
130 | { | 130 | { |
131 | struct acpi_device *acpi_dev; | 131 | struct acpi_device *acpi_dev; |
132 | acpi_status status; | 132 | acpi_status status; |
133 | struct acpi_device_physical_node *physical_node; | 133 | struct acpi_device_physical_node *physical_node, *pn; |
134 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 134 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; |
135 | int retval = -EINVAL; | 135 | int retval = -EINVAL; |
136 | 136 | ||
137 | if (dev->archdata.acpi_handle) { | 137 | if (ACPI_HANDLE(dev)) { |
138 | dev_warn(dev, "Drivers changed 'acpi_handle'\n"); | 138 | if (handle) { |
139 | return -EINVAL; | 139 | dev_warn(dev, "ACPI handle is already set\n"); |
140 | return -EINVAL; | ||
141 | } else { | ||
142 | handle = ACPI_HANDLE(dev); | ||
143 | } | ||
140 | } | 144 | } |
145 | if (!handle) | ||
146 | return -EINVAL; | ||
141 | 147 | ||
142 | get_device(dev); | 148 | get_device(dev); |
143 | status = acpi_bus_get_device(handle, &acpi_dev); | 149 | status = acpi_bus_get_device(handle, &acpi_dev); |
144 | if (ACPI_FAILURE(status)) | 150 | if (ACPI_FAILURE(status)) |
145 | goto err; | 151 | goto err; |
146 | 152 | ||
147 | physical_node = kzalloc(sizeof(struct acpi_device_physical_node), | 153 | physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); |
148 | GFP_KERNEL); | ||
149 | if (!physical_node) { | 154 | if (!physical_node) { |
150 | retval = -ENOMEM; | 155 | retval = -ENOMEM; |
151 | goto err; | 156 | goto err; |
152 | } | 157 | } |
153 | 158 | ||
154 | mutex_lock(&acpi_dev->physical_node_lock); | 159 | mutex_lock(&acpi_dev->physical_node_lock); |
160 | |||
161 | /* Sanity check. */ | ||
162 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) | ||
163 | if (pn->dev == dev) { | ||
164 | dev_warn(dev, "Already associated with ACPI node\n"); | ||
165 | goto err_free; | ||
166 | } | ||
167 | |||
155 | /* allocate physical node id according to physical_node_id_bitmap */ | 168 | /* allocate physical node id according to physical_node_id_bitmap */ |
156 | physical_node->node_id = | 169 | physical_node->node_id = |
157 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, | 170 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, |
158 | ACPI_MAX_PHYSICAL_NODE); | 171 | ACPI_MAX_PHYSICAL_NODE); |
159 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { | 172 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { |
160 | retval = -ENOSPC; | 173 | retval = -ENOSPC; |
161 | mutex_unlock(&acpi_dev->physical_node_lock); | 174 | goto err_free; |
162 | kfree(physical_node); | ||
163 | goto err; | ||
164 | } | 175 | } |
165 | 176 | ||
166 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); | 177 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); |
167 | physical_node->dev = dev; | 178 | physical_node->dev = dev; |
168 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); | 179 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); |
169 | acpi_dev->physical_node_count++; | 180 | acpi_dev->physical_node_count++; |
181 | |||
170 | mutex_unlock(&acpi_dev->physical_node_lock); | 182 | mutex_unlock(&acpi_dev->physical_node_lock); |
171 | 183 | ||
172 | dev->archdata.acpi_handle = handle; | 184 | if (!ACPI_HANDLE(dev)) |
185 | ACPI_HANDLE_SET(dev, acpi_dev->handle); | ||
173 | 186 | ||
174 | if (!physical_node->node_id) | 187 | if (!physical_node->node_id) |
175 | strcpy(physical_node_name, PHYSICAL_NODE_STRING); | 188 | strcpy(physical_node_name, PHYSICAL_NODE_STRING); |
@@ -187,8 +200,14 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
187 | return 0; | 200 | return 0; |
188 | 201 | ||
189 | err: | 202 | err: |
203 | ACPI_HANDLE_SET(dev, NULL); | ||
190 | put_device(dev); | 204 | put_device(dev); |
191 | return retval; | 205 | return retval; |
206 | |||
207 | err_free: | ||
208 | mutex_unlock(&acpi_dev->physical_node_lock); | ||
209 | kfree(physical_node); | ||
210 | goto err; | ||
192 | } | 211 | } |
193 | 212 | ||
194 | static int acpi_unbind_one(struct device *dev) | 213 | static int acpi_unbind_one(struct device *dev) |
@@ -198,11 +217,10 @@ static int acpi_unbind_one(struct device *dev) | |||
198 | acpi_status status; | 217 | acpi_status status; |
199 | struct list_head *node, *next; | 218 | struct list_head *node, *next; |
200 | 219 | ||
201 | if (!dev->archdata.acpi_handle) | 220 | if (!ACPI_HANDLE(dev)) |
202 | return 0; | 221 | return 0; |
203 | 222 | ||
204 | status = acpi_bus_get_device(dev->archdata.acpi_handle, | 223 | status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); |
205 | &acpi_dev); | ||
206 | if (ACPI_FAILURE(status)) | 224 | if (ACPI_FAILURE(status)) |
207 | goto err; | 225 | goto err; |
208 | 226 | ||
@@ -228,7 +246,7 @@ static int acpi_unbind_one(struct device *dev) | |||
228 | 246 | ||
229 | sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name); | 247 | sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name); |
230 | sysfs_remove_link(&dev->kobj, "firmware_node"); | 248 | sysfs_remove_link(&dev->kobj, "firmware_node"); |
231 | dev->archdata.acpi_handle = NULL; | 249 | ACPI_HANDLE_SET(dev, NULL); |
232 | /* acpi_bind_one increase refcnt by one */ | 250 | /* acpi_bind_one increase refcnt by one */ |
233 | put_device(dev); | 251 | put_device(dev); |
234 | kfree(entry); | 252 | kfree(entry); |
@@ -248,6 +266,10 @@ static int acpi_platform_notify(struct device *dev) | |||
248 | acpi_handle handle; | 266 | acpi_handle handle; |
249 | int ret = -EINVAL; | 267 | int ret = -EINVAL; |
250 | 268 | ||
269 | ret = acpi_bind_one(dev, NULL); | ||
270 | if (!ret) | ||
271 | goto out; | ||
272 | |||
251 | if (!dev->bus || !dev->parent) { | 273 | if (!dev->bus || !dev->parent) { |
252 | /* bridge devices genernally haven't bus or parent */ | 274 | /* bridge devices genernally haven't bus or parent */ |
253 | ret = acpi_find_bridge_device(dev, &handle); | 275 | ret = acpi_find_bridge_device(dev, &handle); |
@@ -261,16 +283,16 @@ static int acpi_platform_notify(struct device *dev) | |||
261 | } | 283 | } |
262 | if ((ret = type->find_device(dev, &handle)) != 0) | 284 | if ((ret = type->find_device(dev, &handle)) != 0) |
263 | DBG("Can't get handler for %s\n", dev_name(dev)); | 285 | DBG("Can't get handler for %s\n", dev_name(dev)); |
264 | end: | 286 | end: |
265 | if (!ret) | 287 | if (!ret) |
266 | acpi_bind_one(dev, handle); | 288 | acpi_bind_one(dev, handle); |
267 | 289 | ||
290 | out: | ||
268 | #if ACPI_GLUE_DEBUG | 291 | #if ACPI_GLUE_DEBUG |
269 | if (!ret) { | 292 | if (!ret) { |
270 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 293 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
271 | 294 | ||
272 | acpi_get_name(dev->archdata.acpi_handle, | 295 | acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer); |
273 | ACPI_FULL_PATHNAME, &buffer); | ||
274 | DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); | 296 | DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); |
275 | kfree(buffer.pointer); | 297 | kfree(buffer.pointer); |
276 | } else | 298 | } else |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 509dcaa17555..3c407cdc1ec1 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -93,4 +93,11 @@ static inline int suspend_nvs_save(void) { return 0; } | |||
93 | static inline void suspend_nvs_restore(void) {} | 93 | static inline void suspend_nvs_restore(void) {} |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | /*-------------------------------------------------------------------------- | ||
97 | Platform bus support | ||
98 | -------------------------------------------------------------------------- */ | ||
99 | struct platform_device; | ||
100 | |||
101 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev); | ||
102 | |||
96 | #endif /* _ACPI_INTERNAL_H_ */ | 103 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index f2c3d74af23e..23a032490130 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -495,11 +495,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | /* FIXME: implement x86/x86_64 version */ | ||
499 | void __attribute__ ((weak)) acpi_unregister_gsi(u32 i) | ||
500 | { | ||
501 | } | ||
502 | |||
503 | void acpi_pci_irq_disable(struct pci_dev *dev) | 498 | void acpi_pci_irq_disable(struct pci_dev *dev) |
504 | { | 499 | { |
505 | struct acpi_prt_entry *entry; | 500 | struct acpi_prt_entry *entry; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e8086c725305..f1a5da44591d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | 735 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
736 | struct cpuidle_driver *drv, int index) | 736 | struct cpuidle_driver *drv, int index) |
737 | { | 737 | { |
738 | ktime_t kt1, kt2; | ||
739 | s64 idle_time; | ||
740 | struct acpi_processor *pr; | 738 | struct acpi_processor *pr; |
741 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 739 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
742 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 740 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
743 | 741 | ||
744 | pr = __this_cpu_read(processors); | 742 | pr = __this_cpu_read(processors); |
745 | dev->last_residency = 0; | ||
746 | 743 | ||
747 | if (unlikely(!pr)) | 744 | if (unlikely(!pr)) |
748 | return -EINVAL; | 745 | return -EINVAL; |
749 | 746 | ||
750 | local_irq_disable(); | ||
751 | |||
752 | |||
753 | lapic_timer_state_broadcast(pr, cx, 1); | 747 | lapic_timer_state_broadcast(pr, cx, 1); |
754 | kt1 = ktime_get_real(); | ||
755 | acpi_idle_do_entry(cx); | 748 | acpi_idle_do_entry(cx); |
756 | kt2 = ktime_get_real(); | ||
757 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | ||
758 | |||
759 | /* Update device last_residency*/ | ||
760 | dev->last_residency = (int)idle_time; | ||
761 | 749 | ||
762 | local_irq_enable(); | ||
763 | lapic_timer_state_broadcast(pr, cx, 0); | 750 | lapic_timer_state_broadcast(pr, cx, 0); |
764 | 751 | ||
765 | return index; | 752 | return index; |
@@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
806 | struct acpi_processor *pr; | 793 | struct acpi_processor *pr; |
807 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 794 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
808 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 795 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
809 | ktime_t kt1, kt2; | ||
810 | s64 idle_time_ns; | ||
811 | s64 idle_time; | ||
812 | 796 | ||
813 | pr = __this_cpu_read(processors); | 797 | pr = __this_cpu_read(processors); |
814 | dev->last_residency = 0; | ||
815 | 798 | ||
816 | if (unlikely(!pr)) | 799 | if (unlikely(!pr)) |
817 | return -EINVAL; | 800 | return -EINVAL; |
818 | 801 | ||
819 | local_irq_disable(); | ||
820 | |||
821 | |||
822 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 802 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
823 | current_thread_info()->status &= ~TS_POLLING; | 803 | current_thread_info()->status &= ~TS_POLLING; |
824 | /* | 804 | /* |
@@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
829 | 809 | ||
830 | if (unlikely(need_resched())) { | 810 | if (unlikely(need_resched())) { |
831 | current_thread_info()->status |= TS_POLLING; | 811 | current_thread_info()->status |= TS_POLLING; |
832 | local_irq_enable(); | ||
833 | return -EINVAL; | 812 | return -EINVAL; |
834 | } | 813 | } |
835 | } | 814 | } |
@@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
843 | if (cx->type == ACPI_STATE_C3) | 822 | if (cx->type == ACPI_STATE_C3) |
844 | ACPI_FLUSH_CPU_CACHE(); | 823 | ACPI_FLUSH_CPU_CACHE(); |
845 | 824 | ||
846 | kt1 = ktime_get_real(); | ||
847 | /* Tell the scheduler that we are going deep-idle: */ | 825 | /* Tell the scheduler that we are going deep-idle: */ |
848 | sched_clock_idle_sleep_event(); | 826 | sched_clock_idle_sleep_event(); |
849 | acpi_idle_do_entry(cx); | 827 | acpi_idle_do_entry(cx); |
850 | kt2 = ktime_get_real(); | ||
851 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
852 | idle_time = idle_time_ns; | ||
853 | do_div(idle_time, NSEC_PER_USEC); | ||
854 | 828 | ||
855 | /* Update device last_residency*/ | 829 | sched_clock_idle_wakeup_event(0); |
856 | dev->last_residency = (int)idle_time; | ||
857 | 830 | ||
858 | /* Tell the scheduler how much we idled: */ | ||
859 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
860 | |||
861 | local_irq_enable(); | ||
862 | if (cx->entry_method != ACPI_CSTATE_FFH) | 831 | if (cx->entry_method != ACPI_CSTATE_FFH) |
863 | current_thread_info()->status |= TS_POLLING; | 832 | current_thread_info()->status |= TS_POLLING; |
864 | 833 | ||
@@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
883 | struct acpi_processor *pr; | 852 | struct acpi_processor *pr; |
884 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 853 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
885 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | 854 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); |
886 | ktime_t kt1, kt2; | ||
887 | s64 idle_time_ns; | ||
888 | s64 idle_time; | ||
889 | |||
890 | 855 | ||
891 | pr = __this_cpu_read(processors); | 856 | pr = __this_cpu_read(processors); |
892 | dev->last_residency = 0; | ||
893 | 857 | ||
894 | if (unlikely(!pr)) | 858 | if (unlikely(!pr)) |
895 | return -EINVAL; | 859 | return -EINVAL; |
@@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
899 | return drv->states[drv->safe_state_index].enter(dev, | 863 | return drv->states[drv->safe_state_index].enter(dev, |
900 | drv, drv->safe_state_index); | 864 | drv, drv->safe_state_index); |
901 | } else { | 865 | } else { |
902 | local_irq_disable(); | ||
903 | acpi_safe_halt(); | 866 | acpi_safe_halt(); |
904 | local_irq_enable(); | ||
905 | return -EBUSY; | 867 | return -EBUSY; |
906 | } | 868 | } |
907 | } | 869 | } |
908 | 870 | ||
909 | local_irq_disable(); | ||
910 | |||
911 | |||
912 | if (cx->entry_method != ACPI_CSTATE_FFH) { | 871 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
913 | current_thread_info()->status &= ~TS_POLLING; | 872 | current_thread_info()->status &= ~TS_POLLING; |
914 | /* | 873 | /* |
@@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
919 | 878 | ||
920 | if (unlikely(need_resched())) { | 879 | if (unlikely(need_resched())) { |
921 | current_thread_info()->status |= TS_POLLING; | 880 | current_thread_info()->status |= TS_POLLING; |
922 | local_irq_enable(); | ||
923 | return -EINVAL; | 881 | return -EINVAL; |
924 | } | 882 | } |
925 | } | 883 | } |
@@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
934 | */ | 892 | */ |
935 | lapic_timer_state_broadcast(pr, cx, 1); | 893 | lapic_timer_state_broadcast(pr, cx, 1); |
936 | 894 | ||
937 | kt1 = ktime_get_real(); | ||
938 | /* | 895 | /* |
939 | * disable bus master | 896 | * disable bus master |
940 | * bm_check implies we need ARB_DIS | 897 | * bm_check implies we need ARB_DIS |
@@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
965 | c3_cpu_count--; | 922 | c3_cpu_count--; |
966 | raw_spin_unlock(&c3_lock); | 923 | raw_spin_unlock(&c3_lock); |
967 | } | 924 | } |
968 | kt2 = ktime_get_real(); | ||
969 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | ||
970 | idle_time = idle_time_ns; | ||
971 | do_div(idle_time, NSEC_PER_USEC); | ||
972 | |||
973 | /* Update device last_residency*/ | ||
974 | dev->last_residency = (int)idle_time; | ||
975 | 925 | ||
976 | /* Tell the scheduler how much we idled: */ | 926 | sched_clock_idle_wakeup_event(0); |
977 | sched_clock_idle_wakeup_event(idle_time_ns); | ||
978 | 927 | ||
979 | local_irq_enable(); | ||
980 | if (cx->entry_method != ACPI_CSTATE_FFH) | 928 | if (cx->entry_method != ACPI_CSTATE_FFH) |
981 | current_thread_info()->status |= TS_POLLING; | 929 | current_thread_info()->status |= TS_POLLING; |
982 | 930 | ||
@@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
987 | struct cpuidle_driver acpi_idle_driver = { | 935 | struct cpuidle_driver acpi_idle_driver = { |
988 | .name = "acpi_idle", | 936 | .name = "acpi_idle", |
989 | .owner = THIS_MODULE, | 937 | .owner = THIS_MODULE, |
938 | .en_core_tk_irqen = 1, | ||
990 | }; | 939 | }; |
991 | 940 | ||
992 | /** | 941 | /** |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c new file mode 100644 index 000000000000..a3868f6c222a --- /dev/null +++ b/drivers/acpi/resource.c | |||
@@ -0,0 +1,526 @@ | |||
1 | /* | ||
2 | * drivers/acpi/resource.c - ACPI device resources interpretation. | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corp. | ||
5 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | */ | ||
24 | |||
25 | #include <linux/acpi.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/export.h> | ||
28 | #include <linux/ioport.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #ifdef CONFIG_X86 | ||
32 | #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) | ||
33 | #else | ||
34 | #define valid_IRQ(i) (true) | ||
35 | #endif | ||
36 | |||
37 | static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect, | ||
38 | bool window) | ||
39 | { | ||
40 | unsigned long flags = IORESOURCE_MEM; | ||
41 | |||
42 | if (len == 0) | ||
43 | flags |= IORESOURCE_DISABLED; | ||
44 | |||
45 | if (write_protect == ACPI_READ_WRITE_MEMORY) | ||
46 | flags |= IORESOURCE_MEM_WRITEABLE; | ||
47 | |||
48 | if (window) | ||
49 | flags |= IORESOURCE_WINDOW; | ||
50 | |||
51 | return flags; | ||
52 | } | ||
53 | |||
54 | static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, | ||
55 | u8 write_protect) | ||
56 | { | ||
57 | res->start = start; | ||
58 | res->end = start + len - 1; | ||
59 | res->flags = acpi_dev_memresource_flags(len, write_protect, false); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * acpi_dev_resource_memory - Extract ACPI memory resource information. | ||
64 | * @ares: Input ACPI resource object. | ||
65 | * @res: Output generic resource object. | ||
66 | * | ||
67 | * Check if the given ACPI resource object represents a memory resource and | ||
68 | * if that's the case, use the information in it to populate the generic | ||
69 | * resource object pointed to by @res. | ||
70 | */ | ||
71 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) | ||
72 | { | ||
73 | struct acpi_resource_memory24 *memory24; | ||
74 | struct acpi_resource_memory32 *memory32; | ||
75 | struct acpi_resource_fixed_memory32 *fixed_memory32; | ||
76 | |||
77 | switch (ares->type) { | ||
78 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
79 | memory24 = &ares->data.memory24; | ||
80 | acpi_dev_get_memresource(res, memory24->minimum, | ||
81 | memory24->address_length, | ||
82 | memory24->write_protect); | ||
83 | break; | ||
84 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
85 | memory32 = &ares->data.memory32; | ||
86 | acpi_dev_get_memresource(res, memory32->minimum, | ||
87 | memory32->address_length, | ||
88 | memory32->write_protect); | ||
89 | break; | ||
90 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
91 | fixed_memory32 = &ares->data.fixed_memory32; | ||
92 | acpi_dev_get_memresource(res, fixed_memory32->address, | ||
93 | fixed_memory32->address_length, | ||
94 | fixed_memory32->write_protect); | ||
95 | break; | ||
96 | default: | ||
97 | return false; | ||
98 | } | ||
99 | return true; | ||
100 | } | ||
101 | EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); | ||
102 | |||
103 | static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode, | ||
104 | bool window) | ||
105 | { | ||
106 | int flags = IORESOURCE_IO; | ||
107 | |||
108 | if (io_decode == ACPI_DECODE_16) | ||
109 | flags |= IORESOURCE_IO_16BIT_ADDR; | ||
110 | |||
111 | if (start > end || end >= 0x10003) | ||
112 | flags |= IORESOURCE_DISABLED; | ||
113 | |||
114 | if (window) | ||
115 | flags |= IORESOURCE_WINDOW; | ||
116 | |||
117 | return flags; | ||
118 | } | ||
119 | |||
120 | static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, | ||
121 | u8 io_decode) | ||
122 | { | ||
123 | u64 end = start + len - 1; | ||
124 | |||
125 | res->start = start; | ||
126 | res->end = end; | ||
127 | res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * acpi_dev_resource_io - Extract ACPI I/O resource information. | ||
132 | * @ares: Input ACPI resource object. | ||
133 | * @res: Output generic resource object. | ||
134 | * | ||
135 | * Check if the given ACPI resource object represents an I/O resource and | ||
136 | * if that's the case, use the information in it to populate the generic | ||
137 | * resource object pointed to by @res. | ||
138 | */ | ||
139 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) | ||
140 | { | ||
141 | struct acpi_resource_io *io; | ||
142 | struct acpi_resource_fixed_io *fixed_io; | ||
143 | |||
144 | switch (ares->type) { | ||
145 | case ACPI_RESOURCE_TYPE_IO: | ||
146 | io = &ares->data.io; | ||
147 | acpi_dev_get_ioresource(res, io->minimum, | ||
148 | io->address_length, | ||
149 | io->io_decode); | ||
150 | break; | ||
151 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
152 | fixed_io = &ares->data.fixed_io; | ||
153 | acpi_dev_get_ioresource(res, fixed_io->address, | ||
154 | fixed_io->address_length, | ||
155 | ACPI_DECODE_10); | ||
156 | break; | ||
157 | default: | ||
158 | return false; | ||
159 | } | ||
160 | return true; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(acpi_dev_resource_io); | ||
163 | |||
164 | /** | ||
165 | * acpi_dev_resource_address_space - Extract ACPI address space information. | ||
166 | * @ares: Input ACPI resource object. | ||
167 | * @res: Output generic resource object. | ||
168 | * | ||
169 | * Check if the given ACPI resource object represents an address space resource | ||
170 | * and if that's the case, use the information in it to populate the generic | ||
171 | * resource object pointed to by @res. | ||
172 | */ | ||
173 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
174 | struct resource *res) | ||
175 | { | ||
176 | acpi_status status; | ||
177 | struct acpi_resource_address64 addr; | ||
178 | bool window; | ||
179 | u64 len; | ||
180 | u8 io_decode; | ||
181 | |||
182 | switch (ares->type) { | ||
183 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
184 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
185 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
186 | break; | ||
187 | default: | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | status = acpi_resource_to_address64(ares, &addr); | ||
192 | if (ACPI_FAILURE(status)) | ||
193 | return true; | ||
194 | |||
195 | res->start = addr.minimum; | ||
196 | res->end = addr.maximum; | ||
197 | window = addr.producer_consumer == ACPI_PRODUCER; | ||
198 | |||
199 | switch(addr.resource_type) { | ||
200 | case ACPI_MEMORY_RANGE: | ||
201 | len = addr.maximum - addr.minimum + 1; | ||
202 | res->flags = acpi_dev_memresource_flags(len, | ||
203 | addr.info.mem.write_protect, | ||
204 | window); | ||
205 | break; | ||
206 | case ACPI_IO_RANGE: | ||
207 | io_decode = addr.granularity == 0xfff ? | ||
208 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
209 | res->flags = acpi_dev_ioresource_flags(addr.minimum, | ||
210 | addr.maximum, | ||
211 | io_decode, window); | ||
212 | break; | ||
213 | case ACPI_BUS_NUMBER_RANGE: | ||
214 | res->flags = IORESOURCE_BUS; | ||
215 | break; | ||
216 | default: | ||
217 | res->flags = 0; | ||
218 | } | ||
219 | |||
220 | return true; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); | ||
223 | |||
224 | /** | ||
225 | * acpi_dev_resource_ext_address_space - Extract ACPI address space information. | ||
226 | * @ares: Input ACPI resource object. | ||
227 | * @res: Output generic resource object. | ||
228 | * | ||
229 | * Check if the given ACPI resource object represents an extended address space | ||
230 | * resource and if that's the case, use the information in it to populate the | ||
231 | * generic resource object pointed to by @res. | ||
232 | */ | ||
233 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | ||
234 | struct resource *res) | ||
235 | { | ||
236 | struct acpi_resource_extended_address64 *ext_addr; | ||
237 | bool window; | ||
238 | u64 len; | ||
239 | u8 io_decode; | ||
240 | |||
241 | if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) | ||
242 | return false; | ||
243 | |||
244 | ext_addr = &ares->data.ext_address64; | ||
245 | |||
246 | res->start = ext_addr->minimum; | ||
247 | res->end = ext_addr->maximum; | ||
248 | window = ext_addr->producer_consumer == ACPI_PRODUCER; | ||
249 | |||
250 | switch(ext_addr->resource_type) { | ||
251 | case ACPI_MEMORY_RANGE: | ||
252 | len = ext_addr->maximum - ext_addr->minimum + 1; | ||
253 | res->flags = acpi_dev_memresource_flags(len, | ||
254 | ext_addr->info.mem.write_protect, | ||
255 | window); | ||
256 | break; | ||
257 | case ACPI_IO_RANGE: | ||
258 | io_decode = ext_addr->granularity == 0xfff ? | ||
259 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
260 | res->flags = acpi_dev_ioresource_flags(ext_addr->minimum, | ||
261 | ext_addr->maximum, | ||
262 | io_decode, window); | ||
263 | break; | ||
264 | case ACPI_BUS_NUMBER_RANGE: | ||
265 | res->flags = IORESOURCE_BUS; | ||
266 | break; | ||
267 | default: | ||
268 | res->flags = 0; | ||
269 | } | ||
270 | |||
271 | return true; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); | ||
274 | |||
275 | /** | ||
276 | * acpi_dev_irq_flags - Determine IRQ resource flags. | ||
277 | * @triggering: Triggering type as provided by ACPI. | ||
278 | * @polarity: Interrupt polarity as provided by ACPI. | ||
279 | * @shareable: Whether or not the interrupt is shareable. | ||
280 | */ | ||
281 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | |||
285 | if (triggering == ACPI_LEVEL_SENSITIVE) | ||
286 | flags = polarity == ACPI_ACTIVE_LOW ? | ||
287 | IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL; | ||
288 | else | ||
289 | flags = polarity == ACPI_ACTIVE_LOW ? | ||
290 | IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE; | ||
291 | |||
292 | if (shareable == ACPI_SHARED) | ||
293 | flags |= IORESOURCE_IRQ_SHAREABLE; | ||
294 | |||
295 | return flags | IORESOURCE_IRQ; | ||
296 | } | ||
297 | EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); | ||
298 | |||
299 | static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) | ||
300 | { | ||
301 | res->start = gsi; | ||
302 | res->end = gsi; | ||
303 | res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED; | ||
304 | } | ||
305 | |||
306 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | ||
307 | u8 triggering, u8 polarity, u8 shareable) | ||
308 | { | ||
309 | int irq, p, t; | ||
310 | |||
311 | if (!valid_IRQ(gsi)) { | ||
312 | acpi_dev_irqresource_disabled(res, gsi); | ||
313 | return; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * In IO-APIC mode, use overrided attribute. Two reasons: | ||
318 | * 1. BIOS bug in DSDT | ||
319 | * 2. BIOS uses IO-APIC mode Interrupt Source Override | ||
320 | */ | ||
321 | if (!acpi_get_override_irq(gsi, &t, &p)) { | ||
322 | u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
323 | u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
324 | |||
325 | if (triggering != trig || polarity != pol) { | ||
326 | pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, | ||
327 | t ? "edge" : "level", p ? "low" : "high"); | ||
328 | triggering = trig; | ||
329 | polarity = pol; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | res->flags = acpi_dev_irq_flags(triggering, polarity, shareable); | ||
334 | irq = acpi_register_gsi(NULL, gsi, triggering, polarity); | ||
335 | if (irq >= 0) { | ||
336 | res->start = irq; | ||
337 | res->end = irq; | ||
338 | } else { | ||
339 | acpi_dev_irqresource_disabled(res, gsi); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information. | ||
345 | * @ares: Input ACPI resource object. | ||
346 | * @index: Index into the array of GSIs represented by the resource. | ||
347 | * @res: Output generic resource object. | ||
348 | * | ||
349 | * Check if the given ACPI resource object represents an interrupt resource | ||
350 | * and @index does not exceed the resource's interrupt count (true is returned | ||
351 | * in that case regardless of the results of the other checks)). If that's the | ||
352 | * case, register the GSI corresponding to @index from the array of interrupts | ||
353 | * represented by the resource and populate the generic resource object pointed | ||
354 | * to by @res accordingly. If the registration of the GSI is not successful, | ||
355 | * IORESOURCE_DISABLED will be set it that object's flags. | ||
356 | */ | ||
357 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | ||
358 | struct resource *res) | ||
359 | { | ||
360 | struct acpi_resource_irq *irq; | ||
361 | struct acpi_resource_extended_irq *ext_irq; | ||
362 | |||
363 | switch (ares->type) { | ||
364 | case ACPI_RESOURCE_TYPE_IRQ: | ||
365 | /* | ||
366 | * Per spec, only one interrupt per descriptor is allowed in | ||
367 | * _CRS, but some firmware violates this, so parse them all. | ||
368 | */ | ||
369 | irq = &ares->data.irq; | ||
370 | if (index >= irq->interrupt_count) { | ||
371 | acpi_dev_irqresource_disabled(res, 0); | ||
372 | return false; | ||
373 | } | ||
374 | acpi_dev_get_irqresource(res, irq->interrupts[index], | ||
375 | irq->triggering, irq->polarity, | ||
376 | irq->sharable); | ||
377 | break; | ||
378 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | ||
379 | ext_irq = &ares->data.extended_irq; | ||
380 | if (index >= ext_irq->interrupt_count) { | ||
381 | acpi_dev_irqresource_disabled(res, 0); | ||
382 | return false; | ||
383 | } | ||
384 | acpi_dev_get_irqresource(res, ext_irq->interrupts[index], | ||
385 | ext_irq->triggering, ext_irq->polarity, | ||
386 | ext_irq->sharable); | ||
387 | break; | ||
388 | default: | ||
389 | return false; | ||
390 | } | ||
391 | |||
392 | return true; | ||
393 | } | ||
394 | EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); | ||
395 | |||
396 | /** | ||
397 | * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources(). | ||
398 | * @list: The head of the resource list to free. | ||
399 | */ | ||
400 | void acpi_dev_free_resource_list(struct list_head *list) | ||
401 | { | ||
402 | struct resource_list_entry *rentry, *re; | ||
403 | |||
404 | list_for_each_entry_safe(rentry, re, list, node) { | ||
405 | list_del(&rentry->node); | ||
406 | kfree(rentry); | ||
407 | } | ||
408 | } | ||
409 | EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); | ||
410 | |||
411 | struct res_proc_context { | ||
412 | struct list_head *list; | ||
413 | int (*preproc)(struct acpi_resource *, void *); | ||
414 | void *preproc_data; | ||
415 | int count; | ||
416 | int error; | ||
417 | }; | ||
418 | |||
419 | static acpi_status acpi_dev_new_resource_entry(struct resource *r, | ||
420 | struct res_proc_context *c) | ||
421 | { | ||
422 | struct resource_list_entry *rentry; | ||
423 | |||
424 | rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); | ||
425 | if (!rentry) { | ||
426 | c->error = -ENOMEM; | ||
427 | return AE_NO_MEMORY; | ||
428 | } | ||
429 | rentry->res = *r; | ||
430 | list_add_tail(&rentry->node, c->list); | ||
431 | c->count++; | ||
432 | return AE_OK; | ||
433 | } | ||
434 | |||
435 | static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, | ||
436 | void *context) | ||
437 | { | ||
438 | struct res_proc_context *c = context; | ||
439 | struct resource r; | ||
440 | int i; | ||
441 | |||
442 | if (c->preproc) { | ||
443 | int ret; | ||
444 | |||
445 | ret = c->preproc(ares, c->preproc_data); | ||
446 | if (ret < 0) { | ||
447 | c->error = ret; | ||
448 | return AE_CTRL_TERMINATE; | ||
449 | } else if (ret > 0) { | ||
450 | return AE_OK; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | memset(&r, 0, sizeof(r)); | ||
455 | |||
456 | if (acpi_dev_resource_memory(ares, &r) | ||
457 | || acpi_dev_resource_io(ares, &r) | ||
458 | || acpi_dev_resource_address_space(ares, &r) | ||
459 | || acpi_dev_resource_ext_address_space(ares, &r)) | ||
460 | return acpi_dev_new_resource_entry(&r, c); | ||
461 | |||
462 | for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) { | ||
463 | acpi_status status; | ||
464 | |||
465 | status = acpi_dev_new_resource_entry(&r, c); | ||
466 | if (ACPI_FAILURE(status)) | ||
467 | return status; | ||
468 | } | ||
469 | |||
470 | return AE_OK; | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * acpi_dev_get_resources - Get current resources of a device. | ||
475 | * @adev: ACPI device node to get the resources for. | ||
476 | * @list: Head of the resultant list of resources (must be empty). | ||
477 | * @preproc: The caller's preprocessing routine. | ||
478 | * @preproc_data: Pointer passed to the caller's preprocessing routine. | ||
479 | * | ||
480 | * Evaluate the _CRS method for the given device node and process its output by | ||
481 | * (1) executing the @preproc() rountine provided by the caller, passing the | ||
482 | * resource pointer and @preproc_data to it as arguments, for each ACPI resource | ||
483 | * returned and (2) converting all of the returned ACPI resources into struct | ||
484 | * resource objects if possible. If the return value of @preproc() in step (1) | ||
485 | * is different from 0, step (2) is not applied to the given ACPI resource and | ||
486 | * if that value is negative, the whole processing is aborted and that value is | ||
487 | * returned as the final error code. | ||
488 | * | ||
489 | * The resultant struct resource objects are put on the list pointed to by | ||
490 | * @list, that must be empty initially, as members of struct resource_list_entry | ||
491 | * objects. Callers of this routine should use %acpi_dev_free_resource_list() to | ||
492 | * free that list. | ||
493 | * | ||
494 | * The number of resources in the output list is returned on success, an error | ||
495 | * code reflecting the error condition is returned otherwise. | ||
496 | */ | ||
497 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | ||
498 | int (*preproc)(struct acpi_resource *, void *), | ||
499 | void *preproc_data) | ||
500 | { | ||
501 | struct res_proc_context c; | ||
502 | acpi_handle not_used; | ||
503 | acpi_status status; | ||
504 | |||
505 | if (!adev || !adev->handle || !list_empty(list)) | ||
506 | return -EINVAL; | ||
507 | |||
508 | status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, ¬_used); | ||
509 | if (ACPI_FAILURE(status)) | ||
510 | return 0; | ||
511 | |||
512 | c.list = list; | ||
513 | c.preproc = preproc; | ||
514 | c.preproc_data = preproc_data; | ||
515 | c.count = 0; | ||
516 | c.error = 0; | ||
517 | status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS, | ||
518 | acpi_dev_process_resource, &c); | ||
519 | if (ACPI_FAILURE(status)) { | ||
520 | acpi_dev_free_resource_list(list); | ||
521 | return c.error ? c.error : -EIO; | ||
522 | } | ||
523 | |||
524 | return c.count; | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(acpi_dev_get_resources); | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index bd523bfbaad1..53502d1bbf26 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -29,6 +29,27 @@ extern struct acpi_device *acpi_root; | |||
29 | 29 | ||
30 | static const char *dummy_hid = "device"; | 30 | static const char *dummy_hid = "device"; |
31 | 31 | ||
32 | /* | ||
33 | * The following ACPI IDs are known to be suitable for representing as | ||
34 | * platform devices. | ||
35 | */ | ||
36 | static const struct acpi_device_id acpi_platform_device_ids[] = { | ||
37 | |||
38 | { "PNP0D40" }, | ||
39 | |||
40 | /* Haswell LPSS devices */ | ||
41 | { "INT33C0", 0 }, | ||
42 | { "INT33C1", 0 }, | ||
43 | { "INT33C2", 0 }, | ||
44 | { "INT33C3", 0 }, | ||
45 | { "INT33C4", 0 }, | ||
46 | { "INT33C5", 0 }, | ||
47 | { "INT33C6", 0 }, | ||
48 | { "INT33C7", 0 }, | ||
49 | |||
50 | { } | ||
51 | }; | ||
52 | |||
32 | static LIST_HEAD(acpi_device_list); | 53 | static LIST_HEAD(acpi_device_list); |
33 | static LIST_HEAD(acpi_bus_id_list); | 54 | static LIST_HEAD(acpi_bus_id_list); |
34 | DEFINE_MUTEX(acpi_device_lock); | 55 | DEFINE_MUTEX(acpi_device_lock); |
@@ -397,8 +418,8 @@ static void acpi_device_remove_files(struct acpi_device *dev) | |||
397 | ACPI Bus operations | 418 | ACPI Bus operations |
398 | -------------------------------------------------------------------------- */ | 419 | -------------------------------------------------------------------------- */ |
399 | 420 | ||
400 | int acpi_match_device_ids(struct acpi_device *device, | 421 | static const struct acpi_device_id *__acpi_match_device( |
401 | const struct acpi_device_id *ids) | 422 | struct acpi_device *device, const struct acpi_device_id *ids) |
402 | { | 423 | { |
403 | const struct acpi_device_id *id; | 424 | const struct acpi_device_id *id; |
404 | struct acpi_hardware_id *hwid; | 425 | struct acpi_hardware_id *hwid; |
@@ -408,14 +429,44 @@ int acpi_match_device_ids(struct acpi_device *device, | |||
408 | * driver for it. | 429 | * driver for it. |
409 | */ | 430 | */ |
410 | if (!device->status.present) | 431 | if (!device->status.present) |
411 | return -ENODEV; | 432 | return NULL; |
412 | 433 | ||
413 | for (id = ids; id->id[0]; id++) | 434 | for (id = ids; id->id[0]; id++) |
414 | list_for_each_entry(hwid, &device->pnp.ids, list) | 435 | list_for_each_entry(hwid, &device->pnp.ids, list) |
415 | if (!strcmp((char *) id->id, hwid->id)) | 436 | if (!strcmp((char *) id->id, hwid->id)) |
416 | return 0; | 437 | return id; |
417 | 438 | ||
418 | return -ENOENT; | 439 | return NULL; |
440 | } | ||
441 | |||
442 | /** | ||
443 | * acpi_match_device - Match a struct device against a given list of ACPI IDs | ||
444 | * @ids: Array of struct acpi_device_id object to match against. | ||
445 | * @dev: The device structure to match. | ||
446 | * | ||
447 | * Check if @dev has a valid ACPI handle and if there is a struct acpi_device | ||
448 | * object for that handle and use that object to match against a given list of | ||
449 | * device IDs. | ||
450 | * | ||
451 | * Return a pointer to the first matching ID on success or %NULL on failure. | ||
452 | */ | ||
453 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | ||
454 | const struct device *dev) | ||
455 | { | ||
456 | struct acpi_device *adev; | ||
457 | |||
458 | if (!ids || !ACPI_HANDLE(dev) | ||
459 | || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev))) | ||
460 | return NULL; | ||
461 | |||
462 | return __acpi_match_device(adev, ids); | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(acpi_match_device); | ||
465 | |||
466 | int acpi_match_device_ids(struct acpi_device *device, | ||
467 | const struct acpi_device_id *ids) | ||
468 | { | ||
469 | return __acpi_match_device(device, ids) ? 0 : -ENOENT; | ||
419 | } | 470 | } |
420 | EXPORT_SYMBOL(acpi_match_device_ids); | 471 | EXPORT_SYMBOL(acpi_match_device_ids); |
421 | 472 | ||
@@ -1028,8 +1079,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
1028 | * D3hot is only valid if _PR3 present. | 1079 | * D3hot is only valid if _PR3 present. |
1029 | */ | 1080 | */ |
1030 | if (ps->resources.count || | 1081 | if (ps->resources.count || |
1031 | (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) | 1082 | (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) { |
1032 | ps->flags.valid = 1; | 1083 | ps->flags.valid = 1; |
1084 | ps->flags.os_accessible = 1; | ||
1085 | } | ||
1033 | 1086 | ||
1034 | ps->power = -1; /* Unknown - driver assigned */ | 1087 | ps->power = -1; /* Unknown - driver assigned */ |
1035 | ps->latency = -1; /* Unknown - driver assigned */ | 1088 | ps->latency = -1; /* Unknown - driver assigned */ |
@@ -1045,6 +1098,11 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) | |||
1045 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) | 1098 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) |
1046 | device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; | 1099 | device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; |
1047 | 1100 | ||
1101 | /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ | ||
1102 | if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || | ||
1103 | device->power.flags.power_resources) | ||
1104 | device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; | ||
1105 | |||
1048 | acpi_bus_init_power(device); | 1106 | acpi_bus_init_power(device); |
1049 | 1107 | ||
1050 | return 0; | 1108 | return 0; |
@@ -1243,7 +1301,7 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
1243 | { | 1301 | { |
1244 | acpi_status status; | 1302 | acpi_status status; |
1245 | struct acpi_device_info *info; | 1303 | struct acpi_device_info *info; |
1246 | struct acpica_device_id_list *cid_list; | 1304 | struct acpi_pnp_device_id_list *cid_list; |
1247 | int i; | 1305 | int i; |
1248 | 1306 | ||
1249 | switch (device->device_type) { | 1307 | switch (device->device_type) { |
@@ -1544,8 +1602,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, | |||
1544 | */ | 1602 | */ |
1545 | device = NULL; | 1603 | device = NULL; |
1546 | acpi_bus_get_device(handle, &device); | 1604 | acpi_bus_get_device(handle, &device); |
1547 | if (ops->acpi_op_add && !device) | 1605 | if (ops->acpi_op_add && !device) { |
1548 | acpi_add_single_object(&device, handle, type, sta, ops); | 1606 | acpi_add_single_object(&device, handle, type, sta, ops); |
1607 | /* Is the device a known good platform device? */ | ||
1608 | if (device | ||
1609 | && !acpi_match_device_ids(device, acpi_platform_device_ids)) | ||
1610 | acpi_create_platform_device(device); | ||
1611 | } | ||
1549 | 1612 | ||
1550 | if (!device) | 1613 | if (!device) |
1551 | return AE_CTRL_DEPTH; | 1614 | return AE_CTRL_DEPTH; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6efef87b405c..2fcc67d34b11 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/reboot.h> | 18 | #include <linux/reboot.h> |
19 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/pm_runtime.h> | ||
22 | 21 | ||
23 | #include <asm/io.h> | 22 | #include <asm/io.h> |
24 | 23 | ||
@@ -81,6 +80,12 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
81 | 80 | ||
82 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
83 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
83 | |||
84 | u32 acpi_target_system_state(void) | ||
85 | { | ||
86 | return acpi_target_sleep_state; | ||
87 | } | ||
88 | |||
84 | static bool pwr_btn_event_pending; | 89 | static bool pwr_btn_event_pending; |
85 | 90 | ||
86 | /* | 91 | /* |
@@ -710,177 +715,6 @@ int acpi_suspend(u32 acpi_state) | |||
710 | return -EINVAL; | 715 | return -EINVAL; |
711 | } | 716 | } |
712 | 717 | ||
713 | #ifdef CONFIG_PM | ||
714 | /** | ||
715 | * acpi_pm_device_sleep_state - return preferred power state of ACPI device | ||
716 | * in the system sleep state given by %acpi_target_sleep_state | ||
717 | * @dev: device to examine; its driver model wakeup flags control | ||
718 | * whether it should be able to wake up the system | ||
719 | * @d_min_p: used to store the upper limit of allowed states range | ||
720 | * @d_max_in: specify the lowest allowed states | ||
721 | * Return value: preferred power state of the device on success, -ENODEV | ||
722 | * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure | ||
723 | * | ||
724 | * Find the lowest power (highest number) ACPI device power state that | ||
725 | * device @dev can be in while the system is in the sleep state represented | ||
726 | * by %acpi_target_sleep_state. If @wake is nonzero, the device should be | ||
727 | * able to wake up the system from this sleep state. If @d_min_p is set, | ||
728 | * the highest power (lowest number) device power state of @dev allowed | ||
729 | * in this system sleep state is stored at the location pointed to by it. | ||
730 | * | ||
731 | * The caller must ensure that @dev is valid before using this function. | ||
732 | * The caller is also responsible for figuring out if the device is | ||
733 | * supposed to be able to wake up the system and passing this information | ||
734 | * via @wake. | ||
735 | */ | ||
736 | |||
737 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | ||
738 | { | ||
739 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | ||
740 | struct acpi_device *adev; | ||
741 | char acpi_method[] = "_SxD"; | ||
742 | unsigned long long d_min, d_max; | ||
743 | |||
744 | if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) | ||
745 | return -EINVAL; | ||
746 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
747 | printk(KERN_DEBUG "ACPI handle has no context!\n"); | ||
748 | return -ENODEV; | ||
749 | } | ||
750 | |||
751 | acpi_method[2] = '0' + acpi_target_sleep_state; | ||
752 | /* | ||
753 | * If the sleep state is S0, the lowest limit from ACPI is D3, | ||
754 | * but if the device has _S0W, we will use the value from _S0W | ||
755 | * as the lowest limit from ACPI. Finally, we will constrain | ||
756 | * the lowest limit with the specified one. | ||
757 | */ | ||
758 | d_min = ACPI_STATE_D0; | ||
759 | d_max = ACPI_STATE_D3; | ||
760 | |||
761 | /* | ||
762 | * If present, _SxD methods return the minimum D-state (highest power | ||
763 | * state) we can use for the corresponding S-states. Otherwise, the | ||
764 | * minimum D-state is D0 (ACPI 3.x). | ||
765 | * | ||
766 | * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer | ||
767 | * provided -- that's our fault recovery, we ignore retval. | ||
768 | */ | ||
769 | if (acpi_target_sleep_state > ACPI_STATE_S0) | ||
770 | acpi_evaluate_integer(handle, acpi_method, NULL, &d_min); | ||
771 | |||
772 | /* | ||
773 | * If _PRW says we can wake up the system from the target sleep state, | ||
774 | * the D-state returned by _SxD is sufficient for that (we assume a | ||
775 | * wakeup-aware driver if wake is set). Still, if _SxW exists | ||
776 | * (ACPI 3.x), it should return the maximum (lowest power) D-state that | ||
777 | * can wake the system. _S0W may be valid, too. | ||
778 | */ | ||
779 | if (acpi_target_sleep_state == ACPI_STATE_S0 || | ||
780 | (device_may_wakeup(dev) && adev->wakeup.flags.valid && | ||
781 | adev->wakeup.sleep_state >= acpi_target_sleep_state)) { | ||
782 | acpi_status status; | ||
783 | |||
784 | acpi_method[3] = 'W'; | ||
785 | status = acpi_evaluate_integer(handle, acpi_method, NULL, | ||
786 | &d_max); | ||
787 | if (ACPI_FAILURE(status)) { | ||
788 | if (acpi_target_sleep_state != ACPI_STATE_S0 || | ||
789 | status != AE_NOT_FOUND) | ||
790 | d_max = d_min; | ||
791 | } else if (d_max < d_min) { | ||
792 | /* Warn the user of the broken DSDT */ | ||
793 | printk(KERN_WARNING "ACPI: Wrong value from %s\n", | ||
794 | acpi_method); | ||
795 | /* Sanitize it */ | ||
796 | d_min = d_max; | ||
797 | } | ||
798 | } | ||
799 | |||
800 | if (d_max_in < d_min) | ||
801 | return -EINVAL; | ||
802 | if (d_min_p) | ||
803 | *d_min_p = d_min; | ||
804 | /* constrain d_max with specified lowest limit (max number) */ | ||
805 | if (d_max > d_max_in) { | ||
806 | for (d_max = d_max_in; d_max > d_min; d_max--) { | ||
807 | if (adev->power.states[d_max].flags.valid) | ||
808 | break; | ||
809 | } | ||
810 | } | ||
811 | return d_max; | ||
812 | } | ||
813 | EXPORT_SYMBOL(acpi_pm_device_sleep_state); | ||
814 | #endif /* CONFIG_PM */ | ||
815 | |||
816 | #ifdef CONFIG_PM_SLEEP | ||
817 | /** | ||
818 | * acpi_pm_device_run_wake - Enable/disable wake-up for given device. | ||
819 | * @phys_dev: Device to enable/disable the platform to wake-up the system for. | ||
820 | * @enable: Whether enable or disable the wake-up functionality. | ||
821 | * | ||
822 | * Find the ACPI device object corresponding to @pci_dev and try to | ||
823 | * enable/disable the GPE associated with it. | ||
824 | */ | ||
825 | int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | ||
826 | { | ||
827 | struct acpi_device *dev; | ||
828 | acpi_handle handle; | ||
829 | |||
830 | if (!device_run_wake(phys_dev)) | ||
831 | return -EINVAL; | ||
832 | |||
833 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
834 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { | ||
835 | dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", | ||
836 | __func__); | ||
837 | return -ENODEV; | ||
838 | } | ||
839 | |||
840 | if (enable) { | ||
841 | acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); | ||
842 | acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); | ||
843 | } else { | ||
844 | acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); | ||
845 | acpi_disable_wakeup_device_power(dev); | ||
846 | } | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | EXPORT_SYMBOL(acpi_pm_device_run_wake); | ||
851 | |||
852 | /** | ||
853 | * acpi_pm_device_sleep_wake - enable or disable the system wake-up | ||
854 | * capability of given device | ||
855 | * @dev: device to handle | ||
856 | * @enable: 'true' - enable, 'false' - disable the wake-up capability | ||
857 | */ | ||
858 | int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | ||
859 | { | ||
860 | acpi_handle handle; | ||
861 | struct acpi_device *adev; | ||
862 | int error; | ||
863 | |||
864 | if (!device_can_wakeup(dev)) | ||
865 | return -EINVAL; | ||
866 | |||
867 | handle = DEVICE_ACPI_HANDLE(dev); | ||
868 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { | ||
869 | dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__); | ||
870 | return -ENODEV; | ||
871 | } | ||
872 | |||
873 | error = enable ? | ||
874 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : | ||
875 | acpi_disable_wakeup_device_power(adev); | ||
876 | if (!error) | ||
877 | dev_info(dev, "wake-up capability %s by ACPI\n", | ||
878 | enable ? "enabled" : "disabled"); | ||
879 | |||
880 | return error; | ||
881 | } | ||
882 | #endif /* CONFIG_PM_SLEEP */ | ||
883 | |||
884 | static void acpi_power_off_prepare(void) | 718 | static void acpi_power_off_prepare(void) |
885 | { | 719 | { |
886 | /* Prepare to power off the system */ | 720 | /* Prepare to power off the system */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 7c3f98ba4afe..ea61ca9129cd 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -476,7 +476,7 @@ static void fixed_event_count(u32 event_number) | |||
476 | return; | 476 | return; |
477 | } | 477 | } |
478 | 478 | ||
479 | static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, | 479 | static void acpi_global_event_handler(u32 event_type, acpi_handle device, |
480 | u32 event_number, void *context) | 480 | u32 event_number, void *context) |
481 | { | 481 | { |
482 | if (event_type == ACPI_EVENT_TYPE_GPE) | 482 | if (event_type == ACPI_EVENT_TYPE_GPE) |
@@ -638,7 +638,7 @@ void acpi_irq_stats_init(void) | |||
638 | if (all_counters == NULL) | 638 | if (all_counters == NULL) |
639 | goto fail; | 639 | goto fail; |
640 | 640 | ||
641 | status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); | 641 | status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); |
642 | if (ACPI_FAILURE(status)) | 642 | if (ACPI_FAILURE(status)) |
643 | goto fail; | 643 | goto fail; |
644 | 644 | ||
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index b1ae48054dc5..b7078afddb74 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c | |||
@@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev) | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | #ifdef CONFIG_PM | 241 | #ifdef CONFIG_PM_SLEEP |
242 | static int ahci_suspend(struct device *dev) | 242 | static int ahci_suspend(struct device *dev) |
243 | { | 243 | { |
244 | struct ahci_platform_data *pdata = dev_get_platdata(dev); | 244 | struct ahci_platform_data *pdata = dev_get_platdata(dev); |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index fd9ecf74e631..5b0ba3f20edc 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev, | |||
1105 | struct acpi_device *acpi_dev; | 1105 | struct acpi_device *acpi_dev; |
1106 | struct acpi_device_power_state *states; | 1106 | struct acpi_device_power_state *states; |
1107 | 1107 | ||
1108 | if (ap->flags & ATA_FLAG_ACPI_SATA) | 1108 | if (ap->flags & ATA_FLAG_ACPI_SATA) { |
1109 | ata_dev = &ap->link.device[sdev->channel]; | 1109 | if (!sata_pmp_attached(ap)) |
1110 | else | 1110 | ata_dev = &ap->link.device[sdev->id]; |
1111 | else | ||
1112 | ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id]; | ||
1113 | } | ||
1114 | else { | ||
1111 | ata_dev = &ap->link.device[sdev->id]; | 1115 | ata_dev = &ap->link.device[sdev->id]; |
1116 | } | ||
1112 | 1117 | ||
1113 | *handle = ata_dev_acpi_handle(ata_dev); | 1118 | *handle = ata_dev_acpi_handle(ata_dev); |
1114 | 1119 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3cc7096cfda7..f46fbd3bd3fb 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) | |||
2942 | 2942 | ||
2943 | if (xfer_mode == t->mode) | 2943 | if (xfer_mode == t->mode) |
2944 | return t; | 2944 | return t; |
2945 | |||
2946 | WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", | ||
2947 | __func__, xfer_mode); | ||
2948 | |||
2945 | return NULL; | 2949 | return NULL; |
2946 | } | 2950 | } |
2947 | 2951 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e3bda074fa12..a6df6a351d6e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) | |||
1052 | { | 1052 | { |
1053 | sdev->use_10_for_rw = 1; | 1053 | sdev->use_10_for_rw = 1; |
1054 | sdev->use_10_for_ms = 1; | 1054 | sdev->use_10_for_ms = 1; |
1055 | sdev->no_report_opcodes = 1; | ||
1056 | sdev->no_write_same = 1; | ||
1055 | 1057 | ||
1056 | /* Schedule policy is determined by ->qc_defer() callback and | 1058 | /* Schedule policy is determined by ->qc_defer() callback and |
1057 | * it needs to see every deferred qc. Set dev_blocked to 1 to | 1059 | * it needs to see every deferred qc. Set dev_blocked to 1 to |
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 26201ebef3ca..371fd2c698b7 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c | |||
@@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev) | |||
317 | return ret; | 317 | return ret; |
318 | } | 318 | } |
319 | 319 | ||
320 | ret = clk_set_rate(acdev->clk, 166000000); | ||
321 | if (ret) { | ||
322 | dev_warn(acdev->host->dev, "clock set rate failed"); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
320 | spin_lock_irqsave(&acdev->host->lock, flags); | 326 | spin_lock_irqsave(&acdev->host->lock, flags); |
321 | /* configure CF interface clock */ | 327 | /* configure CF interface clock */ |
322 | writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : | 328 | writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : |
@@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev) | |||
908 | return 0; | 914 | return 0; |
909 | } | 915 | } |
910 | 916 | ||
911 | #ifdef CONFIG_PM | 917 | #ifdef CONFIG_PM_SLEEP |
912 | static int arasan_cf_suspend(struct device *dev) | 918 | static int arasan_cf_suspend(struct device *dev) |
913 | { | 919 | { |
914 | struct ata_host *host = dev_get_drvdata(dev); | 920 | struct ata_host *host = dev_get_drvdata(dev); |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 0d7c4c2cd26f..400bf1c3e982 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = { | |||
260 | }; | 260 | }; |
261 | MODULE_DEVICE_TABLE(of, ahci_of_match); | 261 | MODULE_DEVICE_TABLE(of, ahci_of_match); |
262 | 262 | ||
263 | static int __init ahci_highbank_probe(struct platform_device *pdev) | 263 | static int __devinit ahci_highbank_probe(struct platform_device *pdev) |
264 | { | 264 | { |
265 | struct device *dev = &pdev->dev; | 265 | struct device *dev = &pdev->dev; |
266 | struct ahci_host_priv *hpriv; | 266 | struct ahci_host_priv *hpriv; |
@@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev) | |||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | 380 | ||
381 | #ifdef CONFIG_PM | 381 | #ifdef CONFIG_PM_SLEEP |
382 | static int ahci_highbank_suspend(struct device *dev) | 382 | static int ahci_highbank_suspend(struct device *dev) |
383 | { | 383 | { |
384 | struct ata_host *host = dev_get_drvdata(dev); | 384 | struct ata_host *host = dev_get_drvdata(dev); |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 44a4256533e1..08608de87e4e 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, | |||
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | static int k2_sata_softreset(struct ata_link *link, | ||
146 | unsigned int *class, unsigned long deadline) | ||
147 | { | ||
148 | u8 dmactl; | ||
149 | void __iomem *mmio = link->ap->ioaddr.bmdma_addr; | ||
150 | |||
151 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
152 | |||
153 | /* Clear the start bit */ | ||
154 | if (dmactl & ATA_DMA_START) { | ||
155 | dmactl &= ~ATA_DMA_START; | ||
156 | writeb(dmactl, mmio + ATA_DMA_CMD); | ||
157 | } | ||
158 | |||
159 | return ata_sff_softreset(link, class, deadline); | ||
160 | } | ||
161 | |||
162 | static int k2_sata_hardreset(struct ata_link *link, | ||
163 | unsigned int *class, unsigned long deadline) | ||
164 | { | ||
165 | u8 dmactl; | ||
166 | void __iomem *mmio = link->ap->ioaddr.bmdma_addr; | ||
167 | |||
168 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
169 | |||
170 | /* Clear the start bit */ | ||
171 | if (dmactl & ATA_DMA_START) { | ||
172 | dmactl &= ~ATA_DMA_START; | ||
173 | writeb(dmactl, mmio + ATA_DMA_CMD); | ||
174 | } | ||
175 | |||
176 | return sata_sff_hardreset(link, class, deadline); | ||
177 | } | ||
145 | 178 | ||
146 | static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | 179 | static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
147 | { | 180 | { |
@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { | |||
346 | 379 | ||
347 | static struct ata_port_operations k2_sata_ops = { | 380 | static struct ata_port_operations k2_sata_ops = { |
348 | .inherits = &ata_bmdma_port_ops, | 381 | .inherits = &ata_bmdma_port_ops, |
382 | .softreset = k2_sata_softreset, | ||
383 | .hardreset = k2_sata_hardreset, | ||
349 | .sff_tf_load = k2_sata_tf_load, | 384 | .sff_tf_load = k2_sata_tf_load, |
350 | .sff_tf_read = k2_sata_tf_read, | 385 | .sff_tf_read = k2_sata_tf_read, |
351 | .sff_check_status = k2_stat_check_status, | 386 | .sff_check_status = k2_stat_check_status, |
diff --git a/drivers/base/core.c b/drivers/base/core.c index abea76c36a4b..150a41580fad 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -1180,7 +1180,6 @@ void device_del(struct device *dev) | |||
1180 | if (dev->bus) | 1180 | if (dev->bus) |
1181 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 1181 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
1182 | BUS_NOTIFY_DEL_DEVICE, dev); | 1182 | BUS_NOTIFY_DEL_DEVICE, dev); |
1183 | device_pm_remove(dev); | ||
1184 | dpm_sysfs_remove(dev); | 1183 | dpm_sysfs_remove(dev); |
1185 | if (parent) | 1184 | if (parent) |
1186 | klist_del(&dev->p->knode_parent); | 1185 | klist_del(&dev->p->knode_parent); |
@@ -1205,6 +1204,7 @@ void device_del(struct device *dev) | |||
1205 | device_remove_file(dev, &uevent_attr); | 1204 | device_remove_file(dev, &uevent_attr); |
1206 | device_remove_attrs(dev); | 1205 | device_remove_attrs(dev); |
1207 | bus_remove_device(dev); | 1206 | bus_remove_device(dev); |
1207 | device_pm_remove(dev); | ||
1208 | driver_deferred_probe_del(dev); | 1208 | driver_deferred_probe_del(dev); |
1209 | 1209 | ||
1210 | /* Notify the platform of the removal, in case they | 1210 | /* Notify the platform of the removal, in case they |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 72c776f2a1f5..b2ee3bcd5a41 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/pm_runtime.h> | 22 | #include <linux/pm_runtime.h> |
23 | #include <linux/idr.h> | 23 | #include <linux/idr.h> |
24 | #include <linux/acpi.h> | ||
24 | 25 | ||
25 | #include "base.h" | 26 | #include "base.h" |
26 | #include "power/power.h" | 27 | #include "power/power.h" |
@@ -436,6 +437,7 @@ struct platform_device *platform_device_register_full( | |||
436 | goto err_alloc; | 437 | goto err_alloc; |
437 | 438 | ||
438 | pdev->dev.parent = pdevinfo->parent; | 439 | pdev->dev.parent = pdevinfo->parent; |
440 | ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); | ||
439 | 441 | ||
440 | if (pdevinfo->dma_mask) { | 442 | if (pdevinfo->dma_mask) { |
441 | /* | 443 | /* |
@@ -466,6 +468,7 @@ struct platform_device *platform_device_register_full( | |||
466 | ret = platform_device_add(pdev); | 468 | ret = platform_device_add(pdev); |
467 | if (ret) { | 469 | if (ret) { |
468 | err: | 470 | err: |
471 | ACPI_HANDLE_SET(&pdev->dev, NULL); | ||
469 | kfree(pdev->dev.dma_mask); | 472 | kfree(pdev->dev.dma_mask); |
470 | 473 | ||
471 | err_alloc: | 474 | err_alloc: |
@@ -481,8 +484,16 @@ static int platform_drv_probe(struct device *_dev) | |||
481 | { | 484 | { |
482 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 485 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
483 | struct platform_device *dev = to_platform_device(_dev); | 486 | struct platform_device *dev = to_platform_device(_dev); |
487 | int ret; | ||
484 | 488 | ||
485 | return drv->probe(dev); | 489 | if (ACPI_HANDLE(_dev)) |
490 | acpi_dev_pm_attach(_dev, true); | ||
491 | |||
492 | ret = drv->probe(dev); | ||
493 | if (ret && ACPI_HANDLE(_dev)) | ||
494 | acpi_dev_pm_detach(_dev, true); | ||
495 | |||
496 | return ret; | ||
486 | } | 497 | } |
487 | 498 | ||
488 | static int platform_drv_probe_fail(struct device *_dev) | 499 | static int platform_drv_probe_fail(struct device *_dev) |
@@ -494,8 +505,13 @@ static int platform_drv_remove(struct device *_dev) | |||
494 | { | 505 | { |
495 | struct platform_driver *drv = to_platform_driver(_dev->driver); | 506 | struct platform_driver *drv = to_platform_driver(_dev->driver); |
496 | struct platform_device *dev = to_platform_device(_dev); | 507 | struct platform_device *dev = to_platform_device(_dev); |
508 | int ret; | ||
509 | |||
510 | ret = drv->remove(dev); | ||
511 | if (ACPI_HANDLE(_dev)) | ||
512 | acpi_dev_pm_detach(_dev, true); | ||
497 | 513 | ||
498 | return drv->remove(dev); | 514 | return ret; |
499 | } | 515 | } |
500 | 516 | ||
501 | static void platform_drv_shutdown(struct device *_dev) | 517 | static void platform_drv_shutdown(struct device *_dev) |
@@ -504,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev) | |||
504 | struct platform_device *dev = to_platform_device(_dev); | 520 | struct platform_device *dev = to_platform_device(_dev); |
505 | 521 | ||
506 | drv->shutdown(dev); | 522 | drv->shutdown(dev); |
523 | if (ACPI_HANDLE(_dev)) | ||
524 | acpi_dev_pm_detach(_dev, true); | ||
507 | } | 525 | } |
508 | 526 | ||
509 | /** | 527 | /** |
@@ -709,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv) | |||
709 | if (of_driver_match_device(dev, drv)) | 727 | if (of_driver_match_device(dev, drv)) |
710 | return 1; | 728 | return 1; |
711 | 729 | ||
730 | /* Then try ACPI style match */ | ||
731 | if (acpi_driver_match_device(dev, drv)) | ||
732 | return 1; | ||
733 | |||
712 | /* Then try to match against the id table */ | 734 | /* Then try to match against the id table */ |
713 | if (pdrv->id_table) | 735 | if (pdrv->id_table) |
714 | return platform_match_id(pdrv->id_table, pdev) != NULL; | 736 | return platform_match_id(pdrv->id_table, pdev) != NULL; |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index eb78e9640c4a..9d8fde709390 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
99 | 99 | ||
100 | if (ce->status < PCE_STATUS_ERROR) { | 100 | if (ce->status < PCE_STATUS_ERROR) { |
101 | if (ce->status == PCE_STATUS_ENABLED) | 101 | if (ce->status == PCE_STATUS_ENABLED) |
102 | clk_disable(ce->clk); | 102 | clk_disable_unprepare(ce->clk); |
103 | 103 | ||
104 | if (ce->status >= PCE_STATUS_ACQUIRED) | 104 | if (ce->status >= PCE_STATUS_ACQUIRED) |
105 | clk_put(ce->clk); | 105 | clk_put(ce->clk); |
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id) | |||
396 | 396 | ||
397 | clk = clk_get(dev, con_id); | 397 | clk = clk_get(dev, con_id); |
398 | if (!IS_ERR(clk)) { | 398 | if (!IS_ERR(clk)) { |
399 | clk_enable(clk); | 399 | clk_prepare_enable(clk); |
400 | clk_put(clk); | 400 | clk_put(clk); |
401 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | 401 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); |
402 | } | 402 | } |
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id) | |||
413 | 413 | ||
414 | clk = clk_get(dev, con_id); | 414 | clk = clk_get(dev, con_id); |
415 | if (!IS_ERR(clk)) { | 415 | if (!IS_ERR(clk)) { |
416 | clk_disable(clk); | 416 | clk_disable_unprepare(clk); |
417 | clk_put(clk); | 417 | clk_put(clk); |
418 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | 418 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); |
419 | } | 419 | } |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 96b71b6536d6..acc3a8ded29d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
470 | return -EBUSY; | 470 | return -EBUSY; |
471 | 471 | ||
472 | not_suspended = 0; | 472 | not_suspended = 0; |
473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
474 | enum pm_qos_flags_status stat; | ||
475 | |||
476 | stat = dev_pm_qos_flags(pdd->dev, | ||
477 | PM_QOS_FLAG_NO_POWER_OFF | ||
478 | | PM_QOS_FLAG_REMOTE_WAKEUP); | ||
479 | if (stat > PM_QOS_FLAGS_NONE) | ||
480 | return -EBUSY; | ||
481 | |||
474 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | 482 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
475 | || pdd->dev->power.irq_safe)) | 483 | || pdd->dev->power.irq_safe)) |
476 | not_suspended++; | 484 | not_suspended++; |
485 | } | ||
477 | 486 | ||
478 | if (not_suspended > genpd->in_progress) | 487 | if (not_suspended > genpd->in_progress) |
479 | return -EBUSY; | 488 | return -EBUSY; |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index d9468642fc41..50b2831e027d 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/opp.h> | 24 | #include <linux/opp.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/export.h> | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Internal data structure organization with the OPP layer library is as | 29 | * Internal data structure organization with the OPP layer library is as |
@@ -65,6 +66,7 @@ struct opp { | |||
65 | unsigned long u_volt; | 66 | unsigned long u_volt; |
66 | 67 | ||
67 | struct device_opp *dev_opp; | 68 | struct device_opp *dev_opp; |
69 | struct rcu_head head; | ||
68 | }; | 70 | }; |
69 | 71 | ||
70 | /** | 72 | /** |
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp) | |||
160 | 162 | ||
161 | return v; | 163 | return v; |
162 | } | 164 | } |
165 | EXPORT_SYMBOL(opp_get_voltage); | ||
163 | 166 | ||
164 | /** | 167 | /** |
165 | * opp_get_freq() - Gets the frequency corresponding to an available opp | 168 | * opp_get_freq() - Gets the frequency corresponding to an available opp |
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp) | |||
189 | 192 | ||
190 | return f; | 193 | return f; |
191 | } | 194 | } |
195 | EXPORT_SYMBOL(opp_get_freq); | ||
192 | 196 | ||
193 | /** | 197 | /** |
194 | * opp_get_opp_count() - Get number of opps available in the opp list | 198 | * opp_get_opp_count() - Get number of opps available in the opp list |
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev) | |||
221 | 225 | ||
222 | return count; | 226 | return count; |
223 | } | 227 | } |
228 | EXPORT_SYMBOL(opp_get_opp_count); | ||
224 | 229 | ||
225 | /** | 230 | /** |
226 | * opp_find_freq_exact() - search for an exact frequency | 231 | * opp_find_freq_exact() - search for an exact frequency |
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev) | |||
230 | * | 235 | * |
231 | * Searches for exact match in the opp list and returns pointer to the matching | 236 | * Searches for exact match in the opp list and returns pointer to the matching |
232 | * opp if found, else returns ERR_PTR in case of error and should be handled | 237 | * opp if found, else returns ERR_PTR in case of error and should be handled |
233 | * using IS_ERR. | 238 | * using IS_ERR. Error return values can be: |
239 | * EINVAL: for bad pointer | ||
240 | * ERANGE: no match found for search | ||
241 | * ENODEV: if device not found in list of registered devices | ||
234 | * | 242 | * |
235 | * Note: available is a modifier for the search. if available=true, then the | 243 | * Note: available is a modifier for the search. if available=true, then the |
236 | * match is for exact matching frequency and is available in the stored OPP | 244 | * match is for exact matching frequency and is available in the stored OPP |
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
249 | bool available) | 257 | bool available) |
250 | { | 258 | { |
251 | struct device_opp *dev_opp; | 259 | struct device_opp *dev_opp; |
252 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 260 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
253 | 261 | ||
254 | dev_opp = find_device_opp(dev); | 262 | dev_opp = find_device_opp(dev); |
255 | if (IS_ERR(dev_opp)) { | 263 | if (IS_ERR(dev_opp)) { |
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
268 | 276 | ||
269 | return opp; | 277 | return opp; |
270 | } | 278 | } |
279 | EXPORT_SYMBOL(opp_find_freq_exact); | ||
271 | 280 | ||
272 | /** | 281 | /** |
273 | * opp_find_freq_ceil() - Search for an rounded ceil freq | 282 | * opp_find_freq_ceil() - Search for an rounded ceil freq |
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
278 | * for a device. | 287 | * for a device. |
279 | * | 288 | * |
280 | * Returns matching *opp and refreshes *freq accordingly, else returns | 289 | * Returns matching *opp and refreshes *freq accordingly, else returns |
281 | * ERR_PTR in case of error and should be handled using IS_ERR. | 290 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
291 | * values can be: | ||
292 | * EINVAL: for bad pointer | ||
293 | * ERANGE: no match found for search | ||
294 | * ENODEV: if device not found in list of registered devices | ||
282 | * | 295 | * |
283 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 296 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
284 | * protected pointer. The reason for the same is that the opp pointer which is | 297 | * protected pointer. The reason for the same is that the opp pointer which is |
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
289 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | 302 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) |
290 | { | 303 | { |
291 | struct device_opp *dev_opp; | 304 | struct device_opp *dev_opp; |
292 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 305 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
293 | 306 | ||
294 | if (!dev || !freq) { | 307 | if (!dev || !freq) { |
295 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 308 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
298 | 311 | ||
299 | dev_opp = find_device_opp(dev); | 312 | dev_opp = find_device_opp(dev); |
300 | if (IS_ERR(dev_opp)) | 313 | if (IS_ERR(dev_opp)) |
301 | return opp; | 314 | return ERR_CAST(dev_opp); |
302 | 315 | ||
303 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 316 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
304 | if (temp_opp->available && temp_opp->rate >= *freq) { | 317 | if (temp_opp->available && temp_opp->rate >= *freq) { |
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
310 | 323 | ||
311 | return opp; | 324 | return opp; |
312 | } | 325 | } |
326 | EXPORT_SYMBOL(opp_find_freq_ceil); | ||
313 | 327 | ||
314 | /** | 328 | /** |
315 | * opp_find_freq_floor() - Search for a rounded floor freq | 329 | * opp_find_freq_floor() - Search for a rounded floor freq |
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
320 | * for a device. | 334 | * for a device. |
321 | * | 335 | * |
322 | * Returns matching *opp and refreshes *freq accordingly, else returns | 336 | * Returns matching *opp and refreshes *freq accordingly, else returns |
323 | * ERR_PTR in case of error and should be handled using IS_ERR. | 337 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
338 | * values can be: | ||
339 | * EINVAL: for bad pointer | ||
340 | * ERANGE: no match found for search | ||
341 | * ENODEV: if device not found in list of registered devices | ||
324 | * | 342 | * |
325 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 343 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
326 | * protected pointer. The reason for the same is that the opp pointer which is | 344 | * protected pointer. The reason for the same is that the opp pointer which is |
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
331 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | 349 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) |
332 | { | 350 | { |
333 | struct device_opp *dev_opp; | 351 | struct device_opp *dev_opp; |
334 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | 352 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
335 | 353 | ||
336 | if (!dev || !freq) { | 354 | if (!dev || !freq) { |
337 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 355 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
340 | 358 | ||
341 | dev_opp = find_device_opp(dev); | 359 | dev_opp = find_device_opp(dev); |
342 | if (IS_ERR(dev_opp)) | 360 | if (IS_ERR(dev_opp)) |
343 | return opp; | 361 | return ERR_CAST(dev_opp); |
344 | 362 | ||
345 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 363 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
346 | if (temp_opp->available) { | 364 | if (temp_opp->available) { |
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
356 | 374 | ||
357 | return opp; | 375 | return opp; |
358 | } | 376 | } |
377 | EXPORT_SYMBOL(opp_find_freq_floor); | ||
359 | 378 | ||
360 | /** | 379 | /** |
361 | * opp_add() - Add an OPP table from a table definitions | 380 | * opp_add() - Add an OPP table from a table definitions |
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
512 | 531 | ||
513 | list_replace_rcu(&opp->node, &new_opp->node); | 532 | list_replace_rcu(&opp->node, &new_opp->node); |
514 | mutex_unlock(&dev_opp_list_lock); | 533 | mutex_unlock(&dev_opp_list_lock); |
515 | synchronize_rcu(); | 534 | kfree_rcu(opp, head); |
516 | 535 | ||
517 | /* Notify the change of the OPP availability */ | 536 | /* Notify the change of the OPP availability */ |
518 | if (availability_req) | 537 | if (availability_req) |
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
522 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, | 541 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, |
523 | new_opp); | 542 | new_opp); |
524 | 543 | ||
525 | /* clean up old opp */ | 544 | return 0; |
526 | new_opp = opp; | ||
527 | goto out; | ||
528 | 545 | ||
529 | unlock: | 546 | unlock: |
530 | mutex_unlock(&dev_opp_list_lock); | 547 | mutex_unlock(&dev_opp_list_lock); |
531 | out: | ||
532 | kfree(new_opp); | 548 | kfree(new_opp); |
533 | return r; | 549 | return r; |
534 | } | 550 | } |
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq) | |||
552 | { | 568 | { |
553 | return opp_set_availability(dev, freq, true); | 569 | return opp_set_availability(dev, freq, true); |
554 | } | 570 | } |
571 | EXPORT_SYMBOL(opp_enable); | ||
555 | 572 | ||
556 | /** | 573 | /** |
557 | * opp_disable() - Disable a specific OPP | 574 | * opp_disable() - Disable a specific OPP |
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq) | |||
573 | { | 590 | { |
574 | return opp_set_availability(dev, freq, false); | 591 | return opp_set_availability(dev, freq, false); |
575 | } | 592 | } |
593 | EXPORT_SYMBOL(opp_disable); | ||
576 | 594 | ||
577 | #ifdef CONFIG_CPU_FREQ | 595 | #ifdef CONFIG_CPU_FREQ |
578 | /** | 596 | /** |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 0dbfdf4419af..b16686a0a5a2 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev); | |||
93 | extern void rpm_sysfs_remove(struct device *dev); | 93 | extern void rpm_sysfs_remove(struct device *dev); |
94 | extern int wakeup_sysfs_add(struct device *dev); | 94 | extern int wakeup_sysfs_add(struct device *dev); |
95 | extern void wakeup_sysfs_remove(struct device *dev); | 95 | extern void wakeup_sysfs_remove(struct device *dev); |
96 | extern int pm_qos_sysfs_add(struct device *dev); | 96 | extern int pm_qos_sysfs_add_latency(struct device *dev); |
97 | extern void pm_qos_sysfs_remove(struct device *dev); | 97 | extern void pm_qos_sysfs_remove_latency(struct device *dev); |
98 | extern int pm_qos_sysfs_add_flags(struct device *dev); | ||
99 | extern void pm_qos_sysfs_remove_flags(struct device *dev); | ||
98 | 100 | ||
99 | #else /* CONFIG_PM */ | 101 | #else /* CONFIG_PM */ |
100 | 102 | ||
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 74a67e0019a2..ff46387f5308 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
42 | #include <linux/export.h> | 42 | #include <linux/export.h> |
43 | #include <linux/pm_runtime.h> | ||
43 | 44 | ||
44 | #include "power.h" | 45 | #include "power.h" |
45 | 46 | ||
@@ -48,6 +49,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); | |||
48 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 49 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
49 | 50 | ||
50 | /** | 51 | /** |
52 | * __dev_pm_qos_flags - Check PM QoS flags for a given device. | ||
53 | * @dev: Device to check the PM QoS flags for. | ||
54 | * @mask: Flags to check against. | ||
55 | * | ||
56 | * This routine must be called with dev->power.lock held. | ||
57 | */ | ||
58 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) | ||
59 | { | ||
60 | struct dev_pm_qos *qos = dev->power.qos; | ||
61 | struct pm_qos_flags *pqf; | ||
62 | s32 val; | ||
63 | |||
64 | if (!qos) | ||
65 | return PM_QOS_FLAGS_UNDEFINED; | ||
66 | |||
67 | pqf = &qos->flags; | ||
68 | if (list_empty(&pqf->list)) | ||
69 | return PM_QOS_FLAGS_UNDEFINED; | ||
70 | |||
71 | val = pqf->effective_flags & mask; | ||
72 | if (val) | ||
73 | return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; | ||
74 | |||
75 | return PM_QOS_FLAGS_NONE; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). | ||
80 | * @dev: Device to check the PM QoS flags for. | ||
81 | * @mask: Flags to check against. | ||
82 | */ | ||
83 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) | ||
84 | { | ||
85 | unsigned long irqflags; | ||
86 | enum pm_qos_flags_status ret; | ||
87 | |||
88 | spin_lock_irqsave(&dev->power.lock, irqflags); | ||
89 | ret = __dev_pm_qos_flags(dev, mask); | ||
90 | spin_unlock_irqrestore(&dev->power.lock, irqflags); | ||
91 | |||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | /** | ||
51 | * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. | 96 | * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. |
52 | * @dev: Device to get the PM QoS constraint value for. | 97 | * @dev: Device to get the PM QoS constraint value for. |
53 | * | 98 | * |
@@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | |||
55 | */ | 100 | */ |
56 | s32 __dev_pm_qos_read_value(struct device *dev) | 101 | s32 __dev_pm_qos_read_value(struct device *dev) |
57 | { | 102 | { |
58 | struct pm_qos_constraints *c = dev->power.constraints; | 103 | return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; |
59 | |||
60 | return c ? pm_qos_read_value(c) : 0; | ||
61 | } | 104 | } |
62 | 105 | ||
63 | /** | 106 | /** |
@@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev) | |||
76 | return ret; | 119 | return ret; |
77 | } | 120 | } |
78 | 121 | ||
79 | /* | 122 | /** |
80 | * apply_constraint | 123 | * apply_constraint - Add/modify/remove device PM QoS request. |
81 | * @req: constraint request to apply | 124 | * @req: Constraint request to apply |
82 | * @action: action to perform add/update/remove, of type enum pm_qos_req_action | 125 | * @action: Action to perform (add/update/remove). |
83 | * @value: defines the qos request | 126 | * @value: Value to assign to the QoS request. |
84 | * | 127 | * |
85 | * Internal function to update the constraints list using the PM QoS core | 128 | * Internal function to update the constraints list using the PM QoS core |
86 | * code and if needed call the per-device and the global notification | 129 | * code and if needed call the per-device and the global notification |
87 | * callbacks | 130 | * callbacks |
88 | */ | 131 | */ |
89 | static int apply_constraint(struct dev_pm_qos_request *req, | 132 | static int apply_constraint(struct dev_pm_qos_request *req, |
90 | enum pm_qos_req_action action, int value) | 133 | enum pm_qos_req_action action, s32 value) |
91 | { | 134 | { |
92 | int ret, curr_value; | 135 | struct dev_pm_qos *qos = req->dev->power.qos; |
93 | 136 | int ret; | |
94 | ret = pm_qos_update_target(req->dev->power.constraints, | ||
95 | &req->node, action, value); | ||
96 | 137 | ||
97 | if (ret) { | 138 | switch(req->type) { |
98 | /* Call the global callbacks if needed */ | 139 | case DEV_PM_QOS_LATENCY: |
99 | curr_value = pm_qos_read_value(req->dev->power.constraints); | 140 | ret = pm_qos_update_target(&qos->latency, &req->data.pnode, |
100 | blocking_notifier_call_chain(&dev_pm_notifiers, | 141 | action, value); |
101 | (unsigned long)curr_value, | 142 | if (ret) { |
102 | req); | 143 | value = pm_qos_read_value(&qos->latency); |
144 | blocking_notifier_call_chain(&dev_pm_notifiers, | ||
145 | (unsigned long)value, | ||
146 | req); | ||
147 | } | ||
148 | break; | ||
149 | case DEV_PM_QOS_FLAGS: | ||
150 | ret = pm_qos_update_flags(&qos->flags, &req->data.flr, | ||
151 | action, value); | ||
152 | break; | ||
153 | default: | ||
154 | ret = -EINVAL; | ||
103 | } | 155 | } |
104 | 156 | ||
105 | return ret; | 157 | return ret; |
@@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req, | |||
114 | */ | 166 | */ |
115 | static int dev_pm_qos_constraints_allocate(struct device *dev) | 167 | static int dev_pm_qos_constraints_allocate(struct device *dev) |
116 | { | 168 | { |
169 | struct dev_pm_qos *qos; | ||
117 | struct pm_qos_constraints *c; | 170 | struct pm_qos_constraints *c; |
118 | struct blocking_notifier_head *n; | 171 | struct blocking_notifier_head *n; |
119 | 172 | ||
120 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 173 | qos = kzalloc(sizeof(*qos), GFP_KERNEL); |
121 | if (!c) | 174 | if (!qos) |
122 | return -ENOMEM; | 175 | return -ENOMEM; |
123 | 176 | ||
124 | n = kzalloc(sizeof(*n), GFP_KERNEL); | 177 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
125 | if (!n) { | 178 | if (!n) { |
126 | kfree(c); | 179 | kfree(qos); |
127 | return -ENOMEM; | 180 | return -ENOMEM; |
128 | } | 181 | } |
129 | BLOCKING_INIT_NOTIFIER_HEAD(n); | 182 | BLOCKING_INIT_NOTIFIER_HEAD(n); |
130 | 183 | ||
184 | c = &qos->latency; | ||
131 | plist_head_init(&c->list); | 185 | plist_head_init(&c->list); |
132 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | 186 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; |
133 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | 187 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; |
134 | c->type = PM_QOS_MIN; | 188 | c->type = PM_QOS_MIN; |
135 | c->notifiers = n; | 189 | c->notifiers = n; |
136 | 190 | ||
191 | INIT_LIST_HEAD(&qos->flags.list); | ||
192 | |||
137 | spin_lock_irq(&dev->power.lock); | 193 | spin_lock_irq(&dev->power.lock); |
138 | dev->power.constraints = c; | 194 | dev->power.qos = qos; |
139 | spin_unlock_irq(&dev->power.lock); | 195 | spin_unlock_irq(&dev->power.lock); |
140 | 196 | ||
141 | return 0; | 197 | return 0; |
@@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
151 | void dev_pm_qos_constraints_init(struct device *dev) | 207 | void dev_pm_qos_constraints_init(struct device *dev) |
152 | { | 208 | { |
153 | mutex_lock(&dev_pm_qos_mtx); | 209 | mutex_lock(&dev_pm_qos_mtx); |
154 | dev->power.constraints = NULL; | 210 | dev->power.qos = NULL; |
155 | dev->power.power_state = PMSG_ON; | 211 | dev->power.power_state = PMSG_ON; |
156 | mutex_unlock(&dev_pm_qos_mtx); | 212 | mutex_unlock(&dev_pm_qos_mtx); |
157 | } | 213 | } |
@@ -164,24 +220,28 @@ void dev_pm_qos_constraints_init(struct device *dev) | |||
164 | */ | 220 | */ |
165 | void dev_pm_qos_constraints_destroy(struct device *dev) | 221 | void dev_pm_qos_constraints_destroy(struct device *dev) |
166 | { | 222 | { |
223 | struct dev_pm_qos *qos; | ||
167 | struct dev_pm_qos_request *req, *tmp; | 224 | struct dev_pm_qos_request *req, *tmp; |
168 | struct pm_qos_constraints *c; | 225 | struct pm_qos_constraints *c; |
226 | struct pm_qos_flags *f; | ||
169 | 227 | ||
170 | /* | 228 | /* |
171 | * If the device's PM QoS resume latency limit has been exposed to user | 229 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
172 | * space, it has to be hidden at this point. | 230 | * exposed to user space, they have to be hidden at this point. |
173 | */ | 231 | */ |
174 | dev_pm_qos_hide_latency_limit(dev); | 232 | dev_pm_qos_hide_latency_limit(dev); |
233 | dev_pm_qos_hide_flags(dev); | ||
175 | 234 | ||
176 | mutex_lock(&dev_pm_qos_mtx); | 235 | mutex_lock(&dev_pm_qos_mtx); |
177 | 236 | ||
178 | dev->power.power_state = PMSG_INVALID; | 237 | dev->power.power_state = PMSG_INVALID; |
179 | c = dev->power.constraints; | 238 | qos = dev->power.qos; |
180 | if (!c) | 239 | if (!qos) |
181 | goto out; | 240 | goto out; |
182 | 241 | ||
183 | /* Flush the constraints list for the device */ | 242 | /* Flush the constraints lists for the device. */ |
184 | plist_for_each_entry_safe(req, tmp, &c->list, node) { | 243 | c = &qos->latency; |
244 | plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { | ||
185 | /* | 245 | /* |
186 | * Update constraints list and call the notification | 246 | * Update constraints list and call the notification |
187 | * callbacks if needed | 247 | * callbacks if needed |
@@ -189,13 +249,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
189 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | 249 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); |
190 | memset(req, 0, sizeof(*req)); | 250 | memset(req, 0, sizeof(*req)); |
191 | } | 251 | } |
252 | f = &qos->flags; | ||
253 | list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { | ||
254 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
255 | memset(req, 0, sizeof(*req)); | ||
256 | } | ||
192 | 257 | ||
193 | spin_lock_irq(&dev->power.lock); | 258 | spin_lock_irq(&dev->power.lock); |
194 | dev->power.constraints = NULL; | 259 | dev->power.qos = NULL; |
195 | spin_unlock_irq(&dev->power.lock); | 260 | spin_unlock_irq(&dev->power.lock); |
196 | 261 | ||
197 | kfree(c->notifiers); | 262 | kfree(c->notifiers); |
198 | kfree(c); | 263 | kfree(qos); |
199 | 264 | ||
200 | out: | 265 | out: |
201 | mutex_unlock(&dev_pm_qos_mtx); | 266 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -205,6 +270,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
205 | * dev_pm_qos_add_request - inserts new qos request into the list | 270 | * dev_pm_qos_add_request - inserts new qos request into the list |
206 | * @dev: target device for the constraint | 271 | * @dev: target device for the constraint |
207 | * @req: pointer to a preallocated handle | 272 | * @req: pointer to a preallocated handle |
273 | * @type: type of the request | ||
208 | * @value: defines the qos request | 274 | * @value: defines the qos request |
209 | * | 275 | * |
210 | * This function inserts a new entry in the device constraints list of | 276 | * This function inserts a new entry in the device constraints list of |
@@ -218,9 +284,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
218 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory | 284 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory |
219 | * to allocate for data structures, -ENODEV if the device has just been removed | 285 | * to allocate for data structures, -ENODEV if the device has just been removed |
220 | * from the system. | 286 | * from the system. |
287 | * | ||
288 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
289 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
221 | */ | 290 | */ |
222 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | 291 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
223 | s32 value) | 292 | enum dev_pm_qos_req_type type, s32 value) |
224 | { | 293 | { |
225 | int ret = 0; | 294 | int ret = 0; |
226 | 295 | ||
@@ -235,7 +304,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
235 | 304 | ||
236 | mutex_lock(&dev_pm_qos_mtx); | 305 | mutex_lock(&dev_pm_qos_mtx); |
237 | 306 | ||
238 | if (!dev->power.constraints) { | 307 | if (!dev->power.qos) { |
239 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | 308 | if (dev->power.power_state.event == PM_EVENT_INVALID) { |
240 | /* The device has been removed from the system. */ | 309 | /* The device has been removed from the system. */ |
241 | req->dev = NULL; | 310 | req->dev = NULL; |
@@ -251,8 +320,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
251 | } | 320 | } |
252 | } | 321 | } |
253 | 322 | ||
254 | if (!ret) | 323 | if (!ret) { |
324 | req->type = type; | ||
255 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | 325 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); |
326 | } | ||
256 | 327 | ||
257 | out: | 328 | out: |
258 | mutex_unlock(&dev_pm_qos_mtx); | 329 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -262,6 +333,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | |||
262 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | 333 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); |
263 | 334 | ||
264 | /** | 335 | /** |
336 | * __dev_pm_qos_update_request - Modify an existing device PM QoS request. | ||
337 | * @req : PM QoS request to modify. | ||
338 | * @new_value: New value to request. | ||
339 | */ | ||
340 | static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
341 | s32 new_value) | ||
342 | { | ||
343 | s32 curr_value; | ||
344 | int ret = 0; | ||
345 | |||
346 | if (!req->dev->power.qos) | ||
347 | return -ENODEV; | ||
348 | |||
349 | switch(req->type) { | ||
350 | case DEV_PM_QOS_LATENCY: | ||
351 | curr_value = req->data.pnode.prio; | ||
352 | break; | ||
353 | case DEV_PM_QOS_FLAGS: | ||
354 | curr_value = req->data.flr.flags; | ||
355 | break; | ||
356 | default: | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | if (curr_value != new_value) | ||
361 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); | ||
362 | |||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | /** | ||
265 | * dev_pm_qos_update_request - modifies an existing qos request | 367 | * dev_pm_qos_update_request - modifies an existing qos request |
266 | * @req : handle to list element holding a dev_pm_qos request to use | 368 | * @req : handle to list element holding a dev_pm_qos request to use |
267 | * @new_value: defines the qos request | 369 | * @new_value: defines the qos request |
@@ -275,11 +377,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | |||
275 | * 0 if the aggregated constraint value has not changed, | 377 | * 0 if the aggregated constraint value has not changed, |
276 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | 378 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been |
277 | * removed from the system | 379 | * removed from the system |
380 | * | ||
381 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
382 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
278 | */ | 383 | */ |
279 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | 384 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) |
280 | s32 new_value) | ||
281 | { | 385 | { |
282 | int ret = 0; | 386 | int ret; |
283 | 387 | ||
284 | if (!req) /*guard against callers passing in null */ | 388 | if (!req) /*guard against callers passing in null */ |
285 | return -EINVAL; | 389 | return -EINVAL; |
@@ -289,17 +393,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |||
289 | return -EINVAL; | 393 | return -EINVAL; |
290 | 394 | ||
291 | mutex_lock(&dev_pm_qos_mtx); | 395 | mutex_lock(&dev_pm_qos_mtx); |
292 | 396 | ret = __dev_pm_qos_update_request(req, new_value); | |
293 | if (req->dev->power.constraints) { | ||
294 | if (new_value != req->node.prio) | ||
295 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, | ||
296 | new_value); | ||
297 | } else { | ||
298 | /* Return if the device has been removed */ | ||
299 | ret = -ENODEV; | ||
300 | } | ||
301 | |||
302 | mutex_unlock(&dev_pm_qos_mtx); | 397 | mutex_unlock(&dev_pm_qos_mtx); |
398 | |||
303 | return ret; | 399 | return ret; |
304 | } | 400 | } |
305 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | 401 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); |
@@ -315,6 +411,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | |||
315 | * 0 if the aggregated constraint value has not changed, | 411 | * 0 if the aggregated constraint value has not changed, |
316 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | 412 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been |
317 | * removed from the system | 413 | * removed from the system |
414 | * | ||
415 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
416 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
318 | */ | 417 | */ |
319 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | 418 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
320 | { | 419 | { |
@@ -329,7 +428,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |||
329 | 428 | ||
330 | mutex_lock(&dev_pm_qos_mtx); | 429 | mutex_lock(&dev_pm_qos_mtx); |
331 | 430 | ||
332 | if (req->dev->power.constraints) { | 431 | if (req->dev->power.qos) { |
333 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | 432 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, |
334 | PM_QOS_DEFAULT_VALUE); | 433 | PM_QOS_DEFAULT_VALUE); |
335 | memset(req, 0, sizeof(*req)); | 434 | memset(req, 0, sizeof(*req)); |
@@ -362,13 +461,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | |||
362 | 461 | ||
363 | mutex_lock(&dev_pm_qos_mtx); | 462 | mutex_lock(&dev_pm_qos_mtx); |
364 | 463 | ||
365 | if (!dev->power.constraints) | 464 | if (!dev->power.qos) |
366 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? | 465 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? |
367 | dev_pm_qos_constraints_allocate(dev) : -ENODEV; | 466 | dev_pm_qos_constraints_allocate(dev) : -ENODEV; |
368 | 467 | ||
369 | if (!ret) | 468 | if (!ret) |
370 | ret = blocking_notifier_chain_register( | 469 | ret = blocking_notifier_chain_register( |
371 | dev->power.constraints->notifiers, notifier); | 470 | dev->power.qos->latency.notifiers, notifier); |
372 | 471 | ||
373 | mutex_unlock(&dev_pm_qos_mtx); | 472 | mutex_unlock(&dev_pm_qos_mtx); |
374 | return ret; | 473 | return ret; |
@@ -393,9 +492,9 @@ int dev_pm_qos_remove_notifier(struct device *dev, | |||
393 | mutex_lock(&dev_pm_qos_mtx); | 492 | mutex_lock(&dev_pm_qos_mtx); |
394 | 493 | ||
395 | /* Silently return if the constraints object is not present. */ | 494 | /* Silently return if the constraints object is not present. */ |
396 | if (dev->power.constraints) | 495 | if (dev->power.qos) |
397 | retval = blocking_notifier_chain_unregister( | 496 | retval = blocking_notifier_chain_unregister( |
398 | dev->power.constraints->notifiers, | 497 | dev->power.qos->latency.notifiers, |
399 | notifier); | 498 | notifier); |
400 | 499 | ||
401 | mutex_unlock(&dev_pm_qos_mtx); | 500 | mutex_unlock(&dev_pm_qos_mtx); |
@@ -449,9 +548,10 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
449 | ancestor = ancestor->parent; | 548 | ancestor = ancestor->parent; |
450 | 549 | ||
451 | if (ancestor) | 550 | if (ancestor) |
452 | error = dev_pm_qos_add_request(ancestor, req, value); | 551 | error = dev_pm_qos_add_request(ancestor, req, |
552 | DEV_PM_QOS_LATENCY, value); | ||
453 | 553 | ||
454 | if (error) | 554 | if (error < 0) |
455 | req->dev = NULL; | 555 | req->dev = NULL; |
456 | 556 | ||
457 | return error; | 557 | return error; |
@@ -459,10 +559,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
459 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); | 559 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); |
460 | 560 | ||
461 | #ifdef CONFIG_PM_RUNTIME | 561 | #ifdef CONFIG_PM_RUNTIME |
462 | static void __dev_pm_qos_drop_user_request(struct device *dev) | 562 | static void __dev_pm_qos_drop_user_request(struct device *dev, |
563 | enum dev_pm_qos_req_type type) | ||
463 | { | 564 | { |
464 | dev_pm_qos_remove_request(dev->power.pq_req); | 565 | switch(type) { |
465 | dev->power.pq_req = NULL; | 566 | case DEV_PM_QOS_LATENCY: |
567 | dev_pm_qos_remove_request(dev->power.qos->latency_req); | ||
568 | dev->power.qos->latency_req = NULL; | ||
569 | break; | ||
570 | case DEV_PM_QOS_FLAGS: | ||
571 | dev_pm_qos_remove_request(dev->power.qos->flags_req); | ||
572 | dev->power.qos->flags_req = NULL; | ||
573 | break; | ||
574 | } | ||
466 | } | 575 | } |
467 | 576 | ||
468 | /** | 577 | /** |
@@ -478,21 +587,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
478 | if (!device_is_registered(dev) || value < 0) | 587 | if (!device_is_registered(dev) || value < 0) |
479 | return -EINVAL; | 588 | return -EINVAL; |
480 | 589 | ||
481 | if (dev->power.pq_req) | 590 | if (dev->power.qos && dev->power.qos->latency_req) |
482 | return -EEXIST; | 591 | return -EEXIST; |
483 | 592 | ||
484 | req = kzalloc(sizeof(*req), GFP_KERNEL); | 593 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
485 | if (!req) | 594 | if (!req) |
486 | return -ENOMEM; | 595 | return -ENOMEM; |
487 | 596 | ||
488 | ret = dev_pm_qos_add_request(dev, req, value); | 597 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); |
489 | if (ret < 0) | 598 | if (ret < 0) |
490 | return ret; | 599 | return ret; |
491 | 600 | ||
492 | dev->power.pq_req = req; | 601 | dev->power.qos->latency_req = req; |
493 | ret = pm_qos_sysfs_add(dev); | 602 | ret = pm_qos_sysfs_add_latency(dev); |
494 | if (ret) | 603 | if (ret) |
495 | __dev_pm_qos_drop_user_request(dev); | 604 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
496 | 605 | ||
497 | return ret; | 606 | return ret; |
498 | } | 607 | } |
@@ -504,10 +613,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | |||
504 | */ | 613 | */ |
505 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 614 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
506 | { | 615 | { |
507 | if (dev->power.pq_req) { | 616 | if (dev->power.qos && dev->power.qos->latency_req) { |
508 | pm_qos_sysfs_remove(dev); | 617 | pm_qos_sysfs_remove_latency(dev); |
509 | __dev_pm_qos_drop_user_request(dev); | 618 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
510 | } | 619 | } |
511 | } | 620 | } |
512 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 621 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
622 | |||
623 | /** | ||
624 | * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. | ||
625 | * @dev: Device whose PM QoS flags are to be exposed to user space. | ||
626 | * @val: Initial values of the flags. | ||
627 | */ | ||
628 | int dev_pm_qos_expose_flags(struct device *dev, s32 val) | ||
629 | { | ||
630 | struct dev_pm_qos_request *req; | ||
631 | int ret; | ||
632 | |||
633 | if (!device_is_registered(dev)) | ||
634 | return -EINVAL; | ||
635 | |||
636 | if (dev->power.qos && dev->power.qos->flags_req) | ||
637 | return -EEXIST; | ||
638 | |||
639 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
640 | if (!req) | ||
641 | return -ENOMEM; | ||
642 | |||
643 | pm_runtime_get_sync(dev); | ||
644 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); | ||
645 | if (ret < 0) | ||
646 | goto fail; | ||
647 | |||
648 | dev->power.qos->flags_req = req; | ||
649 | ret = pm_qos_sysfs_add_flags(dev); | ||
650 | if (ret) | ||
651 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
652 | |||
653 | fail: | ||
654 | pm_runtime_put(dev); | ||
655 | return ret; | ||
656 | } | ||
657 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | ||
658 | |||
659 | /** | ||
660 | * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. | ||
661 | * @dev: Device whose PM QoS flags are to be hidden from user space. | ||
662 | */ | ||
663 | void dev_pm_qos_hide_flags(struct device *dev) | ||
664 | { | ||
665 | if (dev->power.qos && dev->power.qos->flags_req) { | ||
666 | pm_qos_sysfs_remove_flags(dev); | ||
667 | pm_runtime_get_sync(dev); | ||
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
669 | pm_runtime_put(dev); | ||
670 | } | ||
671 | } | ||
672 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | ||
673 | |||
674 | /** | ||
675 | * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. | ||
676 | * @dev: Device to update the PM QoS flags request for. | ||
677 | * @mask: Flags to set/clear. | ||
678 | * @set: Whether to set or clear the flags (true means set). | ||
679 | */ | ||
680 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) | ||
681 | { | ||
682 | s32 value; | ||
683 | int ret; | ||
684 | |||
685 | if (!dev->power.qos || !dev->power.qos->flags_req) | ||
686 | return -EINVAL; | ||
687 | |||
688 | pm_runtime_get_sync(dev); | ||
689 | mutex_lock(&dev_pm_qos_mtx); | ||
690 | |||
691 | value = dev_pm_qos_requested_flags(dev); | ||
692 | if (set) | ||
693 | value |= mask; | ||
694 | else | ||
695 | value &= ~mask; | ||
696 | |||
697 | ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); | ||
698 | |||
699 | mutex_unlock(&dev_pm_qos_mtx); | ||
700 | pm_runtime_put(dev); | ||
701 | |||
702 | return ret; | ||
703 | } | ||
513 | #endif /* CONFIG_PM_RUNTIME */ | 704 | #endif /* CONFIG_PM_RUNTIME */ |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index b91dc6f1e914..50d16e3cb0a9 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | |||
221 | static ssize_t pm_qos_latency_show(struct device *dev, | 221 | static ssize_t pm_qos_latency_show(struct device *dev, |
222 | struct device_attribute *attr, char *buf) | 222 | struct device_attribute *attr, char *buf) |
223 | { | 223 | { |
224 | return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); | 224 | return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); |
225 | } | 225 | } |
226 | 226 | ||
227 | static ssize_t pm_qos_latency_store(struct device *dev, | 227 | static ssize_t pm_qos_latency_store(struct device *dev, |
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev, | |||
237 | if (value < 0) | 237 | if (value < 0) |
238 | return -EINVAL; | 238 | return -EINVAL; |
239 | 239 | ||
240 | ret = dev_pm_qos_update_request(dev->power.pq_req, value); | 240 | ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); |
241 | return ret < 0 ? ret : n; | 241 | return ret < 0 ? ret : n; |
242 | } | 242 | } |
243 | 243 | ||
244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, | 244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, |
245 | pm_qos_latency_show, pm_qos_latency_store); | 245 | pm_qos_latency_show, pm_qos_latency_store); |
246 | |||
247 | static ssize_t pm_qos_no_power_off_show(struct device *dev, | ||
248 | struct device_attribute *attr, | ||
249 | char *buf) | ||
250 | { | ||
251 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
252 | & PM_QOS_FLAG_NO_POWER_OFF)); | ||
253 | } | ||
254 | |||
255 | static ssize_t pm_qos_no_power_off_store(struct device *dev, | ||
256 | struct device_attribute *attr, | ||
257 | const char *buf, size_t n) | ||
258 | { | ||
259 | int ret; | ||
260 | |||
261 | if (kstrtoint(buf, 0, &ret)) | ||
262 | return -EINVAL; | ||
263 | |||
264 | if (ret != 0 && ret != 1) | ||
265 | return -EINVAL; | ||
266 | |||
267 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); | ||
268 | return ret < 0 ? ret : n; | ||
269 | } | ||
270 | |||
271 | static DEVICE_ATTR(pm_qos_no_power_off, 0644, | ||
272 | pm_qos_no_power_off_show, pm_qos_no_power_off_store); | ||
273 | |||
274 | static ssize_t pm_qos_remote_wakeup_show(struct device *dev, | ||
275 | struct device_attribute *attr, | ||
276 | char *buf) | ||
277 | { | ||
278 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
279 | & PM_QOS_FLAG_REMOTE_WAKEUP)); | ||
280 | } | ||
281 | |||
282 | static ssize_t pm_qos_remote_wakeup_store(struct device *dev, | ||
283 | struct device_attribute *attr, | ||
284 | const char *buf, size_t n) | ||
285 | { | ||
286 | int ret; | ||
287 | |||
288 | if (kstrtoint(buf, 0, &ret)) | ||
289 | return -EINVAL; | ||
290 | |||
291 | if (ret != 0 && ret != 1) | ||
292 | return -EINVAL; | ||
293 | |||
294 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret); | ||
295 | return ret < 0 ? ret : n; | ||
296 | } | ||
297 | |||
298 | static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, | ||
299 | pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store); | ||
246 | #endif /* CONFIG_PM_RUNTIME */ | 300 | #endif /* CONFIG_PM_RUNTIME */ |
247 | 301 | ||
248 | #ifdef CONFIG_PM_SLEEP | 302 | #ifdef CONFIG_PM_SLEEP |
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = { | |||
564 | .attrs = runtime_attrs, | 618 | .attrs = runtime_attrs, |
565 | }; | 619 | }; |
566 | 620 | ||
567 | static struct attribute *pm_qos_attrs[] = { | 621 | static struct attribute *pm_qos_latency_attrs[] = { |
568 | #ifdef CONFIG_PM_RUNTIME | 622 | #ifdef CONFIG_PM_RUNTIME |
569 | &dev_attr_pm_qos_resume_latency_us.attr, | 623 | &dev_attr_pm_qos_resume_latency_us.attr, |
570 | #endif /* CONFIG_PM_RUNTIME */ | 624 | #endif /* CONFIG_PM_RUNTIME */ |
571 | NULL, | 625 | NULL, |
572 | }; | 626 | }; |
573 | static struct attribute_group pm_qos_attr_group = { | 627 | static struct attribute_group pm_qos_latency_attr_group = { |
574 | .name = power_group_name, | 628 | .name = power_group_name, |
575 | .attrs = pm_qos_attrs, | 629 | .attrs = pm_qos_latency_attrs, |
630 | }; | ||
631 | |||
632 | static struct attribute *pm_qos_flags_attrs[] = { | ||
633 | #ifdef CONFIG_PM_RUNTIME | ||
634 | &dev_attr_pm_qos_no_power_off.attr, | ||
635 | &dev_attr_pm_qos_remote_wakeup.attr, | ||
636 | #endif /* CONFIG_PM_RUNTIME */ | ||
637 | NULL, | ||
638 | }; | ||
639 | static struct attribute_group pm_qos_flags_attr_group = { | ||
640 | .name = power_group_name, | ||
641 | .attrs = pm_qos_flags_attrs, | ||
576 | }; | 642 | }; |
577 | 643 | ||
578 | int dpm_sysfs_add(struct device *dev) | 644 | int dpm_sysfs_add(struct device *dev) |
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev) | |||
615 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | 681 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |
616 | } | 682 | } |
617 | 683 | ||
618 | int pm_qos_sysfs_add(struct device *dev) | 684 | int pm_qos_sysfs_add_latency(struct device *dev) |
685 | { | ||
686 | return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
687 | } | ||
688 | |||
689 | void pm_qos_sysfs_remove_latency(struct device *dev) | ||
690 | { | ||
691 | sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
692 | } | ||
693 | |||
694 | int pm_qos_sysfs_add_flags(struct device *dev) | ||
619 | { | 695 | { |
620 | return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); | 696 | return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); |
621 | } | 697 | } |
622 | 698 | ||
623 | void pm_qos_sysfs_remove(struct device *dev) | 699 | void pm_qos_sysfs_remove_flags(struct device *dev) |
624 | { | 700 | { |
625 | sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); | 701 | sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); |
626 | } | 702 | } |
627 | 703 | ||
628 | void rpm_sysfs_remove(struct device *dev) | 704 | void rpm_sysfs_remove(struct device *dev) |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3804a0af3ef1..9fe4f1865558 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) | |||
935 | 935 | ||
936 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ | 936 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ |
937 | if (!fastfail) | 937 | if (!fastfail) |
938 | q->request_fn(q); | 938 | __blk_run_queue(q); |
939 | } | 939 | } |
940 | 940 | ||
941 | static void | 941 | static void |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 1c49d7173966..2ddd64a9ffde 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4330,6 +4330,7 @@ out_unreg_region: | |||
4330 | out_unreg_blkdev: | 4330 | out_unreg_blkdev: |
4331 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 4331 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
4332 | out_put_disk: | 4332 | out_put_disk: |
4333 | destroy_workqueue(floppy_wq); | ||
4333 | for (drive = 0; drive < N_DRIVE; drive++) { | 4334 | for (drive = 0; drive < N_DRIVE; drive++) { |
4334 | if (!disks[drive]) | 4335 | if (!disks[drive]) |
4335 | break; | 4336 | break; |
@@ -4340,7 +4341,6 @@ out_put_disk: | |||
4340 | } | 4341 | } |
4341 | put_disk(disks[drive]); | 4342 | put_disk(disks[drive]); |
4342 | } | 4343 | } |
4343 | destroy_workqueue(floppy_wq); | ||
4344 | return err; | 4344 | return err; |
4345 | } | 4345 | } |
4346 | 4346 | ||
@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void) | |||
4555 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 4555 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
4556 | platform_driver_unregister(&floppy_driver); | 4556 | platform_driver_unregister(&floppy_driver); |
4557 | 4557 | ||
4558 | destroy_workqueue(floppy_wq); | ||
4559 | |||
4558 | for (drive = 0; drive < N_DRIVE; drive++) { | 4560 | for (drive = 0; drive < N_DRIVE; drive++) { |
4559 | del_timer_sync(&motor_off_timer[drive]); | 4561 | del_timer_sync(&motor_off_timer[drive]); |
4560 | 4562 | ||
@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void) | |||
4578 | 4580 | ||
4579 | cancel_delayed_work_sync(&fd_timeout); | 4581 | cancel_delayed_work_sync(&fd_timeout); |
4580 | cancel_delayed_work_sync(&fd_timer); | 4582 | cancel_delayed_work_sync(&fd_timer); |
4581 | destroy_workqueue(floppy_wq); | ||
4582 | 4583 | ||
4583 | if (atomic_read(&usage_count)) | 4584 | if (atomic_read(&usage_count)) |
4584 | floppy_release_irq_and_dma(); | 4585 | floppy_release_irq_and_dma(); |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index adc6f36564cf..9694dd99bbbc 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data) | |||
559 | struct mtip_cmd *command; | 559 | struct mtip_cmd *command; |
560 | int tag, cmdto_cnt = 0; | 560 | int tag, cmdto_cnt = 0; |
561 | unsigned int bit, group; | 561 | unsigned int bit, group; |
562 | unsigned int num_command_slots = port->dd->slot_groups * 32; | 562 | unsigned int num_command_slots; |
563 | unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; | 563 | unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; |
564 | 564 | ||
565 | if (unlikely(!port)) | 565 | if (unlikely(!port)) |
@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data) | |||
572 | } | 572 | } |
573 | /* clear the tag accumulator */ | 573 | /* clear the tag accumulator */ |
574 | memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); | 574 | memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); |
575 | num_command_slots = port->dd->slot_groups * 32; | ||
575 | 576 | ||
576 | for (tag = 0; tag < num_command_slots; tag++) { | 577 | for (tag = 0; tag < num_command_slots; tag++) { |
577 | /* | 578 | /* |
@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd, | |||
2218 | fis.device); | 2219 | fis.device); |
2219 | 2220 | ||
2220 | /* check for erase mode support during secure erase.*/ | 2221 | /* check for erase mode support during secure erase.*/ |
2221 | if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) | 2222 | if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && |
2222 | && (outbuf[0] & MTIP_SEC_ERASE_MODE)) { | 2223 | (outbuf[0] & MTIP_SEC_ERASE_MODE)) { |
2223 | erasemode = 1; | 2224 | erasemode = 1; |
2224 | } | 2225 | } |
2225 | 2226 | ||
@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, | |||
2439 | * return value | 2440 | * return value |
2440 | * None | 2441 | * None |
2441 | */ | 2442 | */ |
2442 | static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, | 2443 | static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, |
2443 | int nsect, int nents, int tag, void *callback, | 2444 | int nsect, int nents, int tag, void *callback, |
2444 | void *data, int dir) | 2445 | void *data, int dir) |
2445 | { | 2446 | { |
@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, | |||
2447 | struct mtip_port *port = dd->port; | 2448 | struct mtip_port *port = dd->port; |
2448 | struct mtip_cmd *command = &port->commands[tag]; | 2449 | struct mtip_cmd *command = &port->commands[tag]; |
2449 | int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 2450 | int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2451 | u64 start = sector; | ||
2450 | 2452 | ||
2451 | /* Map the scatter list for DMA access */ | 2453 | /* Map the scatter list for DMA access */ |
2452 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); | 2454 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); |
@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, | |||
2465 | fis->opts = 1 << 7; | 2467 | fis->opts = 1 << 7; |
2466 | fis->command = | 2468 | fis->command = |
2467 | (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); | 2469 | (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); |
2468 | *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); | 2470 | fis->lba_low = start & 0xFF; |
2469 | *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); | 2471 | fis->lba_mid = (start >> 8) & 0xFF; |
2472 | fis->lba_hi = (start >> 16) & 0xFF; | ||
2473 | fis->lba_low_ex = (start >> 24) & 0xFF; | ||
2474 | fis->lba_mid_ex = (start >> 32) & 0xFF; | ||
2475 | fis->lba_hi_ex = (start >> 40) & 0xFF; | ||
2470 | fis->device = 1 << 6; | 2476 | fis->device = 1 << 6; |
2471 | fis->features = nsect & 0xFF; | 2477 | fis->features = nsect & 0xFF; |
2472 | fis->features_ex = (nsect >> 8) & 0xFF; | 2478 | fis->features_ex = (nsect >> 8) & 0xFF; |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 5f4a917bd8bb..b1742640556a 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 | 34 | #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 |
35 | 35 | ||
36 | /* check for erase mode support during secure erase */ | 36 | /* check for erase mode support during secure erase */ |
37 | #define MTIP_SEC_ERASE_MODE 0x3 | 37 | #define MTIP_SEC_ERASE_MODE 0x2 |
38 | 38 | ||
39 | /* # of times to retry timed out/failed IOs */ | 39 | /* # of times to retry timed out/failed IOs */ |
40 | #define MTIP_MAX_RETRIES 2 | 40 | #define MTIP_MAX_RETRIES 2 |
@@ -155,14 +155,14 @@ enum { | |||
155 | MTIP_DDF_REBUILD_FAILED_BIT = 8, | 155 | MTIP_DDF_REBUILD_FAILED_BIT = 8, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | __packed struct smart_attr{ | 158 | struct smart_attr { |
159 | u8 attr_id; | 159 | u8 attr_id; |
160 | u16 flags; | 160 | u16 flags; |
161 | u8 cur; | 161 | u8 cur; |
162 | u8 worst; | 162 | u8 worst; |
163 | u32 data; | 163 | u32 data; |
164 | u8 res[3]; | 164 | u8 res[3]; |
165 | }; | 165 | } __packed; |
166 | 166 | ||
167 | /* Register Frame Information Structure (FIS), host to device. */ | 167 | /* Register Frame Information Structure (FIS), host to device. */ |
168 | struct host_to_dev_fis { | 168 | struct host_to_dev_fis { |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index fc2de5528dcc..b00000e8aef6 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = { | |||
67 | { USB_DEVICE(0x13d3, 0x3304) }, | 67 | { USB_DEVICE(0x13d3, 0x3304) }, |
68 | { USB_DEVICE(0x0930, 0x0215) }, | 68 | { USB_DEVICE(0x0930, 0x0215) }, |
69 | { USB_DEVICE(0x0489, 0xE03D) }, | 69 | { USB_DEVICE(0x0489, 0xE03D) }, |
70 | { USB_DEVICE(0x0489, 0xE027) }, | ||
70 | 71 | ||
71 | /* Atheros AR9285 Malbec with sflash firmware */ | 72 | /* Atheros AR9285 Malbec with sflash firmware */ |
72 | { USB_DEVICE(0x03F0, 0x311D) }, | 73 | { USB_DEVICE(0x03F0, 0x311D) }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index debda27df9b0..ee82f2fb65f0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = { | |||
124 | { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, | 124 | { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, |
125 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, | 125 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, |
126 | { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, | 126 | { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, |
127 | { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, | ||
127 | 128 | ||
128 | /* Atheros AR9285 Malbec with sflash firmware */ | 129 | /* Atheros AR9285 Malbec with sflash firmware */ |
129 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | 130 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, |
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index ff63560b8467..0c48b0e05ed6 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c | |||
@@ -22,6 +22,26 @@ | |||
22 | #include <linux/pm_runtime.h> | 22 | #include <linux/pm_runtime.h> |
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_platform.h> | 24 | #include <linux/of_platform.h> |
25 | #include <linux/platform_data/omap_ocp2scp.h> | ||
26 | |||
27 | /** | ||
28 | * _count_resources - count for the number of resources | ||
29 | * @res: struct resource * | ||
30 | * | ||
31 | * Count and return the number of resources populated for the device that is | ||
32 | * connected to ocp2scp. | ||
33 | */ | ||
34 | static unsigned _count_resources(struct resource *res) | ||
35 | { | ||
36 | int cnt = 0; | ||
37 | |||
38 | while (res->start != res->end) { | ||
39 | cnt++; | ||
40 | res++; | ||
41 | } | ||
42 | |||
43 | return cnt; | ||
44 | } | ||
25 | 45 | ||
26 | static int ocp2scp_remove_devices(struct device *dev, void *c) | 46 | static int ocp2scp_remove_devices(struct device *dev, void *c) |
27 | { | 47 | { |
@@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c) | |||
34 | 54 | ||
35 | static int __devinit omap_ocp2scp_probe(struct platform_device *pdev) | 55 | static int __devinit omap_ocp2scp_probe(struct platform_device *pdev) |
36 | { | 56 | { |
37 | int ret; | 57 | int ret; |
38 | struct device_node *np = pdev->dev.of_node; | 58 | unsigned res_cnt, i; |
59 | struct device_node *np = pdev->dev.of_node; | ||
60 | struct platform_device *pdev_child; | ||
61 | struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data; | ||
62 | struct omap_ocp2scp_dev *dev; | ||
39 | 63 | ||
40 | if (np) { | 64 | if (np) { |
41 | ret = of_platform_populate(np, NULL, NULL, &pdev->dev); | 65 | ret = of_platform_populate(np, NULL, NULL, &pdev->dev); |
42 | if (ret) { | 66 | if (ret) { |
43 | dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n"); | 67 | dev_err(&pdev->dev, |
68 | "failed to add resources for ocp2scp child\n"); | ||
44 | goto err0; | 69 | goto err0; |
45 | } | 70 | } |
71 | } else if (pdata) { | ||
72 | for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++, | ||
73 | dev++) { | ||
74 | res_cnt = _count_resources(dev->res); | ||
75 | |||
76 | pdev_child = platform_device_alloc(dev->drv_name, | ||
77 | PLATFORM_DEVID_AUTO); | ||
78 | if (!pdev_child) { | ||
79 | dev_err(&pdev->dev, | ||
80 | "failed to allocate mem for ocp2scp child\n"); | ||
81 | goto err0; | ||
82 | } | ||
83 | |||
84 | ret = platform_device_add_resources(pdev_child, | ||
85 | dev->res, res_cnt); | ||
86 | if (ret) { | ||
87 | dev_err(&pdev->dev, | ||
88 | "failed to add resources for ocp2scp child\n"); | ||
89 | goto err1; | ||
90 | } | ||
91 | |||
92 | pdev_child->dev.parent = &pdev->dev; | ||
93 | |||
94 | ret = platform_device_add(pdev_child); | ||
95 | if (ret) { | ||
96 | dev_err(&pdev->dev, | ||
97 | "failed to register ocp2scp child device\n"); | ||
98 | goto err1; | ||
99 | } | ||
100 | } | ||
101 | } else { | ||
102 | dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n"); | ||
103 | return -EINVAL; | ||
46 | } | 104 | } |
105 | |||
47 | pm_runtime_enable(&pdev->dev); | 106 | pm_runtime_enable(&pdev->dev); |
48 | 107 | ||
49 | return 0; | 108 | return 0; |
50 | 109 | ||
110 | err1: | ||
111 | platform_device_put(pdev_child); | ||
112 | |||
51 | err0: | 113 | err0: |
52 | device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); | 114 | device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); |
53 | 115 | ||
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c index ca4a25ed844c..e2c17d187d98 100644 --- a/drivers/clk/ux500/u8500_clk.c +++ b/drivers/clk/ux500/u8500_clk.c | |||
@@ -40,7 +40,7 @@ void u8500_clk_init(void) | |||
40 | CLK_IS_ROOT|CLK_IGNORE_UNUSED, | 40 | CLK_IS_ROOT|CLK_IGNORE_UNUSED, |
41 | 32768); | 41 | 32768); |
42 | clk_register_clkdev(clk, "clk32k", NULL); | 42 | clk_register_clkdev(clk, "clk32k", NULL); |
43 | clk_register_clkdev(clk, NULL, "rtc-pl031"); | 43 | clk_register_clkdev(clk, "apb_pclk", "rtc-pl031"); |
44 | 44 | ||
45 | /* PRCMU clocks */ | 45 | /* PRCMU clocks */ |
46 | fw_version = prcmu_get_fw_version(); | 46 | fw_version = prcmu_get_fw_version(); |
@@ -228,10 +228,17 @@ void u8500_clk_init(void) | |||
228 | 228 | ||
229 | clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE, | 229 | clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE, |
230 | BIT(2), 0); | 230 | BIT(2), 0); |
231 | clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1"); | ||
232 | |||
231 | clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE, | 233 | clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE, |
232 | BIT(3), 0); | 234 | BIT(3), 0); |
235 | clk_register_clkdev(clk, "apb_pclk", "msp0"); | ||
236 | clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0"); | ||
237 | |||
233 | clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE, | 238 | clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE, |
234 | BIT(4), 0); | 239 | BIT(4), 0); |
240 | clk_register_clkdev(clk, "apb_pclk", "msp1"); | ||
241 | clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1"); | ||
235 | 242 | ||
236 | clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE, | 243 | clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE, |
237 | BIT(5), 0); | 244 | BIT(5), 0); |
@@ -239,6 +246,7 @@ void u8500_clk_init(void) | |||
239 | 246 | ||
240 | clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE, | 247 | clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE, |
241 | BIT(6), 0); | 248 | BIT(6), 0); |
249 | clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2"); | ||
242 | 250 | ||
243 | clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE, | 251 | clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE, |
244 | BIT(7), 0); | 252 | BIT(7), 0); |
@@ -246,6 +254,7 @@ void u8500_clk_init(void) | |||
246 | 254 | ||
247 | clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE, | 255 | clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE, |
248 | BIT(8), 0); | 256 | BIT(8), 0); |
257 | clk_register_clkdev(clk, "apb_pclk", "slimbus0"); | ||
249 | 258 | ||
250 | clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE, | 259 | clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE, |
251 | BIT(9), 0); | 260 | BIT(9), 0); |
@@ -255,11 +264,16 @@ void u8500_clk_init(void) | |||
255 | 264 | ||
256 | clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE, | 265 | clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE, |
257 | BIT(10), 0); | 266 | BIT(10), 0); |
267 | clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4"); | ||
268 | |||
258 | clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE, | 269 | clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE, |
259 | BIT(11), 0); | 270 | BIT(11), 0); |
271 | clk_register_clkdev(clk, "apb_pclk", "msp3"); | ||
272 | clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3"); | ||
260 | 273 | ||
261 | clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE, | 274 | clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE, |
262 | BIT(0), 0); | 275 | BIT(0), 0); |
276 | clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3"); | ||
263 | 277 | ||
264 | clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE, | 278 | clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE, |
265 | BIT(1), 0); | 279 | BIT(1), 0); |
@@ -279,12 +293,13 @@ void u8500_clk_init(void) | |||
279 | 293 | ||
280 | clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE, | 294 | clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE, |
281 | BIT(5), 0); | 295 | BIT(5), 0); |
296 | clk_register_clkdev(clk, "apb_pclk", "msp2"); | ||
297 | clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2"); | ||
282 | 298 | ||
283 | clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE, | 299 | clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE, |
284 | BIT(6), 0); | 300 | BIT(6), 0); |
285 | clk_register_clkdev(clk, "apb_pclk", "sdi1"); | 301 | clk_register_clkdev(clk, "apb_pclk", "sdi1"); |
286 | 302 | ||
287 | |||
288 | clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE, | 303 | clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE, |
289 | BIT(7), 0); | 304 | BIT(7), 0); |
290 | clk_register_clkdev(clk, "apb_pclk", "sdi3"); | 305 | clk_register_clkdev(clk, "apb_pclk", "sdi3"); |
@@ -316,10 +331,15 @@ void u8500_clk_init(void) | |||
316 | 331 | ||
317 | clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE, | 332 | clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE, |
318 | BIT(1), 0); | 333 | BIT(1), 0); |
334 | clk_register_clkdev(clk, "apb_pclk", "ssp0"); | ||
335 | |||
319 | clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE, | 336 | clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE, |
320 | BIT(2), 0); | 337 | BIT(2), 0); |
338 | clk_register_clkdev(clk, "apb_pclk", "ssp1"); | ||
339 | |||
321 | clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE, | 340 | clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE, |
322 | BIT(3), 0); | 341 | BIT(3), 0); |
342 | clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0"); | ||
323 | 343 | ||
324 | clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE, | 344 | clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE, |
325 | BIT(4), 0); | 345 | BIT(4), 0); |
@@ -401,10 +421,17 @@ void u8500_clk_init(void) | |||
401 | 421 | ||
402 | clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", | 422 | clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", |
403 | U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE); | 423 | U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE); |
424 | clk_register_clkdev(clk, NULL, "nmk-i2c.1"); | ||
425 | |||
404 | clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", | 426 | clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", |
405 | U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); | 427 | U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); |
428 | clk_register_clkdev(clk, NULL, "msp0"); | ||
429 | clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0"); | ||
430 | |||
406 | clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", | 431 | clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", |
407 | U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE); | 432 | U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE); |
433 | clk_register_clkdev(clk, NULL, "msp1"); | ||
434 | clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1"); | ||
408 | 435 | ||
409 | clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", | 436 | clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", |
410 | U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE); | 437 | U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE); |
@@ -412,17 +439,25 @@ void u8500_clk_init(void) | |||
412 | 439 | ||
413 | clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", | 440 | clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", |
414 | U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE); | 441 | U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE); |
442 | clk_register_clkdev(clk, NULL, "nmk-i2c.2"); | ||
443 | |||
415 | clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", | 444 | clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", |
416 | U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); | 445 | U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE); |
417 | /* FIXME: Redefinition of BIT(3). */ | 446 | clk_register_clkdev(clk, NULL, "slimbus0"); |
447 | |||
418 | clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", | 448 | clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", |
419 | U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE); | 449 | U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE); |
450 | clk_register_clkdev(clk, NULL, "nmk-i2c.4"); | ||
451 | |||
420 | clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", | 452 | clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", |
421 | U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE); | 453 | U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE); |
454 | clk_register_clkdev(clk, NULL, "msp3"); | ||
455 | clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3"); | ||
422 | 456 | ||
423 | /* Periph2 */ | 457 | /* Periph2 */ |
424 | clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", | 458 | clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", |
425 | U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE); | 459 | U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE); |
460 | clk_register_clkdev(clk, NULL, "nmk-i2c.3"); | ||
426 | 461 | ||
427 | clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", | 462 | clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", |
428 | U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE); | 463 | U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE); |
@@ -430,6 +465,8 @@ void u8500_clk_init(void) | |||
430 | 465 | ||
431 | clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", | 466 | clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", |
432 | U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE); | 467 | U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE); |
468 | clk_register_clkdev(clk, NULL, "msp2"); | ||
469 | clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2"); | ||
433 | 470 | ||
434 | clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", | 471 | clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", |
435 | U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE); | 472 | U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE); |
@@ -450,10 +487,15 @@ void u8500_clk_init(void) | |||
450 | /* Periph3 */ | 487 | /* Periph3 */ |
451 | clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", | 488 | clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", |
452 | U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE); | 489 | U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE); |
490 | clk_register_clkdev(clk, NULL, "ssp0"); | ||
491 | |||
453 | clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", | 492 | clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", |
454 | U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE); | 493 | U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE); |
494 | clk_register_clkdev(clk, NULL, "ssp1"); | ||
495 | |||
455 | clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", | 496 | clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", |
456 | U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE); | 497 | U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE); |
498 | clk_register_clkdev(clk, NULL, "nmk-i2c.0"); | ||
457 | 499 | ||
458 | clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", | 500 | clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", |
459 | U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE); | 501 | U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 5961e6415f08..a0b3661d90b0 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ | |||
76 | help | 76 | help |
77 | This adds the CPUFreq driver for Samsung EXYNOS5250 | 77 | This adds the CPUFreq driver for Samsung EXYNOS5250 |
78 | SoC. | 78 | SoC. |
79 | |||
80 | config ARM_SPEAR_CPUFREQ | ||
81 | bool "SPEAr CPUFreq support" | ||
82 | depends on PLAT_SPEAR | ||
83 | default y | ||
84 | help | ||
85 | This adds the CPUFreq driver support for SPEAr SOCs. | ||
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 1bc90e1306d8..1f254ec087c1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o | |||
7 | obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o | 7 | obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o |
8 | obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o | 8 | obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o |
9 | obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o | 9 | obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o |
10 | obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | 10 | obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o |
11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | 11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o |
12 | 12 | ||
13 | # CPUfreq cross-arch helpers | 13 | # CPUfreq cross-arch helpers |
14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | 14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o |
@@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o | |||
50 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o | 50 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o |
51 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o | 51 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o |
52 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o | 52 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o |
53 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o | ||
53 | 54 | ||
54 | ################################################################################## | 55 | ################################################################################## |
55 | # PowerPC platform drivers | 56 | # PowerPC platform drivers |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index e9158278c71d..52bf36d599f5 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
174 | .attr = cpu0_cpufreq_attr, | 174 | .attr = cpu0_cpufreq_attr, |
175 | }; | 175 | }; |
176 | 176 | ||
177 | static int __devinit cpu0_cpufreq_driver_init(void) | 177 | static int cpu0_cpufreq_driver_init(void) |
178 | { | 178 | { |
179 | struct device_node *np; | 179 | struct device_node *np; |
180 | int ret; | 180 | int ret; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fb8a5279c5d8..1f93dbd72355 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
18 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 21 | #include <linux/module.h> |
20 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void) | |||
127 | pure_initcall(init_cpufreq_transition_notifier_list); | 129 | pure_initcall(init_cpufreq_transition_notifier_list); |
128 | 130 | ||
129 | static int off __read_mostly; | 131 | static int off __read_mostly; |
130 | int cpufreq_disabled(void) | 132 | static int cpufreq_disabled(void) |
131 | { | 133 | { |
132 | return off; | 134 | return off; |
133 | } | 135 | } |
@@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
402 | static ssize_t store_##file_name \ | 404 | static ssize_t store_##file_name \ |
403 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ | 405 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ |
404 | { \ | 406 | { \ |
405 | unsigned int ret = -EINVAL; \ | 407 | unsigned int ret; \ |
406 | struct cpufreq_policy new_policy; \ | 408 | struct cpufreq_policy new_policy; \ |
407 | \ | 409 | \ |
408 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ | 410 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ |
@@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) | |||
445 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | 447 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) |
446 | return sprintf(buf, "performance\n"); | 448 | return sprintf(buf, "performance\n"); |
447 | else if (policy->governor) | 449 | else if (policy->governor) |
448 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", | 450 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", |
449 | policy->governor->name); | 451 | policy->governor->name); |
450 | return -EINVAL; | 452 | return -EINVAL; |
451 | } | 453 | } |
@@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) | |||
457 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | 459 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, |
458 | const char *buf, size_t count) | 460 | const char *buf, size_t count) |
459 | { | 461 | { |
460 | unsigned int ret = -EINVAL; | 462 | unsigned int ret; |
461 | char str_governor[16]; | 463 | char str_governor[16]; |
462 | struct cpufreq_policy new_policy; | 464 | struct cpufreq_policy new_policy; |
463 | 465 | ||
@@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | |||
491 | */ | 493 | */ |
492 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) | 494 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) |
493 | { | 495 | { |
494 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); | 496 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); |
495 | } | 497 | } |
496 | 498 | ||
497 | /** | 499 | /** |
@@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | |||
512 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) | 514 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) |
513 | - (CPUFREQ_NAME_LEN + 2))) | 515 | - (CPUFREQ_NAME_LEN + 2))) |
514 | goto out; | 516 | goto out; |
515 | i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); | 517 | i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); |
516 | } | 518 | } |
517 | out: | 519 | out: |
518 | i += sprintf(&buf[i], "\n"); | 520 | i += sprintf(&buf[i], "\n"); |
@@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) | |||
581 | } | 583 | } |
582 | 584 | ||
583 | /** | 585 | /** |
584 | * show_scaling_driver - show the current cpufreq HW/BIOS limitation | 586 | * show_bios_limit - show the current cpufreq HW/BIOS limitation |
585 | */ | 587 | */ |
586 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) | 588 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) |
587 | { | 589 | { |
@@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1468 | unsigned int relation) | 1470 | unsigned int relation) |
1469 | { | 1471 | { |
1470 | int retval = -EINVAL; | 1472 | int retval = -EINVAL; |
1473 | unsigned int old_target_freq = target_freq; | ||
1471 | 1474 | ||
1472 | if (cpufreq_disabled()) | 1475 | if (cpufreq_disabled()) |
1473 | return -ENODEV; | 1476 | return -ENODEV; |
1474 | 1477 | ||
1475 | pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1478 | /* Make sure that target_freq is within supported range */ |
1476 | target_freq, relation); | 1479 | if (target_freq > policy->max) |
1480 | target_freq = policy->max; | ||
1481 | if (target_freq < policy->min) | ||
1482 | target_freq = policy->min; | ||
1483 | |||
1484 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | ||
1485 | policy->cpu, target_freq, relation, old_target_freq); | ||
1486 | |||
1487 | if (target_freq == policy->cur) | ||
1488 | return 0; | ||
1489 | |||
1477 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1490 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
1478 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1491 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1479 | 1492 | ||
@@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |||
1509 | { | 1522 | { |
1510 | int ret = 0; | 1523 | int ret = 0; |
1511 | 1524 | ||
1525 | if (!(cpu_online(cpu) && cpufreq_driver->getavg)) | ||
1526 | return 0; | ||
1527 | |||
1512 | policy = cpufreq_cpu_get(policy->cpu); | 1528 | policy = cpufreq_cpu_get(policy->cpu); |
1513 | if (!policy) | 1529 | if (!policy) |
1514 | return -EINVAL; | 1530 | return -EINVAL; |
1515 | 1531 | ||
1516 | if (cpu_online(cpu) && cpufreq_driver->getavg) | 1532 | ret = cpufreq_driver->getavg(policy, cpu); |
1517 | ret = cpufreq_driver->getavg(policy, cpu); | ||
1518 | 1533 | ||
1519 | cpufreq_cpu_put(policy); | 1534 | cpufreq_cpu_put(policy); |
1520 | return ret; | 1535 | return ret; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a152af7e1991..64ef737e7e72 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -11,83 +11,30 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/cpufreq.h> | 14 | #include <linux/cpufreq.h> |
18 | #include <linux/cpu.h> | 15 | #include <linux/init.h> |
19 | #include <linux/jiffies.h> | 16 | #include <linux/kernel.h> |
20 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/kobject.h> | ||
19 | #include <linux/module.h> | ||
21 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
22 | #include <linux/hrtimer.h> | 21 | #include <linux/notifier.h> |
23 | #include <linux/tick.h> | 22 | #include <linux/percpu-defs.h> |
24 | #include <linux/ktime.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/sched.h> | 24 | #include <linux/types.h> |
26 | 25 | ||
27 | /* | 26 | #include "cpufreq_governor.h" |
28 | * dbs is used in this file as a shortform for demandbased switching | ||
29 | * It helps to keep variable names smaller, simpler | ||
30 | */ | ||
31 | 27 | ||
28 | /* Conservative governor macors */ | ||
32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 29 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
33 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | 30 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
34 | |||
35 | /* | ||
36 | * The polling frequency of this governor depends on the capability of | ||
37 | * the processor. Default polling frequency is 1000 times the transition | ||
38 | * latency of the processor. The governor will work on any processor with | ||
39 | * transition latency <= 10mS, using appropriate sampling | ||
40 | * rate. | ||
41 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
42 | * this governor will not work. | ||
43 | * All times here are in uS. | ||
44 | */ | ||
45 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
46 | |||
47 | static unsigned int min_sampling_rate; | ||
48 | |||
49 | #define LATENCY_MULTIPLIER (1000) | ||
50 | #define MIN_LATENCY_MULTIPLIER (100) | ||
51 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 31 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
52 | #define MAX_SAMPLING_DOWN_FACTOR (10) | 32 | #define MAX_SAMPLING_DOWN_FACTOR (10) |
53 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
54 | |||
55 | static void do_dbs_timer(struct work_struct *work); | ||
56 | |||
57 | struct cpu_dbs_info_s { | ||
58 | cputime64_t prev_cpu_idle; | ||
59 | cputime64_t prev_cpu_wall; | ||
60 | cputime64_t prev_cpu_nice; | ||
61 | struct cpufreq_policy *cur_policy; | ||
62 | struct delayed_work work; | ||
63 | unsigned int down_skip; | ||
64 | unsigned int requested_freq; | ||
65 | int cpu; | ||
66 | unsigned int enable:1; | ||
67 | /* | ||
68 | * percpu mutex that serializes governor limit change with | ||
69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
70 | * when user is changing the governor or limits. | ||
71 | */ | ||
72 | struct mutex timer_mutex; | ||
73 | }; | ||
74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | ||
75 | 33 | ||
76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 34 | static struct dbs_data cs_dbs_data; |
35 | static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); | ||
77 | 36 | ||
78 | /* | 37 | static struct cs_dbs_tuners cs_tuners = { |
79 | * dbs_mutex protects dbs_enable in governor start/stop. | ||
80 | */ | ||
81 | static DEFINE_MUTEX(dbs_mutex); | ||
82 | |||
83 | static struct dbs_tuners { | ||
84 | unsigned int sampling_rate; | ||
85 | unsigned int sampling_down_factor; | ||
86 | unsigned int up_threshold; | ||
87 | unsigned int down_threshold; | ||
88 | unsigned int ignore_nice; | ||
89 | unsigned int freq_step; | ||
90 | } dbs_tuners_ins = { | ||
91 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 38 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
92 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | 39 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, |
93 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 40 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
@@ -95,95 +42,121 @@ static struct dbs_tuners { | |||
95 | .freq_step = 5, | 42 | .freq_step = 5, |
96 | }; | 43 | }; |
97 | 44 | ||
98 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | 45 | /* |
46 | * Every sampling_rate, we check, if current idle time is less than 20% | ||
47 | * (default), then we try to increase frequency Every sampling_rate * | ||
48 | * sampling_down_factor, we check, if current idle time is more than 80%, then | ||
49 | * we try to decrease frequency | ||
50 | * | ||
51 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | ||
52 | * happens at minimum steps of 5% (default) of maximum frequency | ||
53 | */ | ||
54 | static void cs_check_cpu(int cpu, unsigned int load) | ||
99 | { | 55 | { |
100 | u64 idle_time; | 56 | struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); |
101 | u64 cur_wall_time; | 57 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; |
102 | u64 busy_time; | 58 | unsigned int freq_target; |
59 | |||
60 | /* | ||
61 | * break out if we 'cannot' reduce the speed as the user might | ||
62 | * want freq_step to be zero | ||
63 | */ | ||
64 | if (cs_tuners.freq_step == 0) | ||
65 | return; | ||
103 | 66 | ||
104 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 67 | /* Check for frequency increase */ |
68 | if (load > cs_tuners.up_threshold) { | ||
69 | dbs_info->down_skip = 0; | ||
105 | 70 | ||
106 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | 71 | /* if we are already at full speed then break out early */ |
107 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | 72 | if (dbs_info->requested_freq == policy->max) |
108 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | 73 | return; |
109 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
110 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
111 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
112 | 74 | ||
113 | idle_time = cur_wall_time - busy_time; | 75 | freq_target = (cs_tuners.freq_step * policy->max) / 100; |
114 | if (wall) | ||
115 | *wall = jiffies_to_usecs(cur_wall_time); | ||
116 | 76 | ||
117 | return jiffies_to_usecs(idle_time); | 77 | /* max freq cannot be less than 100. But who knows.... */ |
78 | if (unlikely(freq_target == 0)) | ||
79 | freq_target = 5; | ||
80 | |||
81 | dbs_info->requested_freq += freq_target; | ||
82 | if (dbs_info->requested_freq > policy->max) | ||
83 | dbs_info->requested_freq = policy->max; | ||
84 | |||
85 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | ||
86 | CPUFREQ_RELATION_H); | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * The optimal frequency is the frequency that is the lowest that can | ||
92 | * support the current CPU usage without triggering the up policy. To be | ||
93 | * safe, we focus 10 points under the threshold. | ||
94 | */ | ||
95 | if (load < (cs_tuners.down_threshold - 10)) { | ||
96 | freq_target = (cs_tuners.freq_step * policy->max) / 100; | ||
97 | |||
98 | dbs_info->requested_freq -= freq_target; | ||
99 | if (dbs_info->requested_freq < policy->min) | ||
100 | dbs_info->requested_freq = policy->min; | ||
101 | |||
102 | /* | ||
103 | * if we cannot reduce the frequency anymore, break out early | ||
104 | */ | ||
105 | if (policy->cur == policy->min) | ||
106 | return; | ||
107 | |||
108 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | ||
109 | CPUFREQ_RELATION_H); | ||
110 | return; | ||
111 | } | ||
118 | } | 112 | } |
119 | 113 | ||
120 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 114 | static void cs_dbs_timer(struct work_struct *work) |
121 | { | 115 | { |
122 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | 116 | struct cs_cpu_dbs_info_s *dbs_info = container_of(work, |
117 | struct cs_cpu_dbs_info_s, cdbs.work.work); | ||
118 | unsigned int cpu = dbs_info->cdbs.cpu; | ||
119 | int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); | ||
123 | 120 | ||
124 | if (idle_time == -1ULL) | 121 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
125 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
126 | else | ||
127 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
128 | 122 | ||
129 | return idle_time; | 123 | dbs_check_cpu(&cs_dbs_data, cpu); |
124 | |||
125 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | ||
126 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | ||
130 | } | 127 | } |
131 | 128 | ||
132 | /* keep track of frequency transitions */ | 129 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
133 | static int | 130 | void *data) |
134 | dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
135 | void *data) | ||
136 | { | 131 | { |
137 | struct cpufreq_freqs *freq = data; | 132 | struct cpufreq_freqs *freq = data; |
138 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, | 133 | struct cs_cpu_dbs_info_s *dbs_info = |
139 | freq->cpu); | 134 | &per_cpu(cs_cpu_dbs_info, freq->cpu); |
140 | |||
141 | struct cpufreq_policy *policy; | 135 | struct cpufreq_policy *policy; |
142 | 136 | ||
143 | if (!this_dbs_info->enable) | 137 | if (!dbs_info->enable) |
144 | return 0; | 138 | return 0; |
145 | 139 | ||
146 | policy = this_dbs_info->cur_policy; | 140 | policy = dbs_info->cdbs.cur_policy; |
147 | 141 | ||
148 | /* | 142 | /* |
149 | * we only care if our internally tracked freq moves outside | 143 | * we only care if our internally tracked freq moves outside the 'valid' |
150 | * the 'valid' ranges of freqency available to us otherwise | 144 | * ranges of freqency available to us otherwise we do not change it |
151 | * we do not change it | ||
152 | */ | 145 | */ |
153 | if (this_dbs_info->requested_freq > policy->max | 146 | if (dbs_info->requested_freq > policy->max |
154 | || this_dbs_info->requested_freq < policy->min) | 147 | || dbs_info->requested_freq < policy->min) |
155 | this_dbs_info->requested_freq = freq->new; | 148 | dbs_info->requested_freq = freq->new; |
156 | 149 | ||
157 | return 0; | 150 | return 0; |
158 | } | 151 | } |
159 | 152 | ||
160 | static struct notifier_block dbs_cpufreq_notifier_block = { | ||
161 | .notifier_call = dbs_cpufreq_notifier | ||
162 | }; | ||
163 | |||
164 | /************************** sysfs interface ************************/ | 153 | /************************** sysfs interface ************************/ |
165 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 154 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
166 | struct attribute *attr, char *buf) | 155 | struct attribute *attr, char *buf) |
167 | { | 156 | { |
168 | return sprintf(buf, "%u\n", min_sampling_rate); | 157 | return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate); |
169 | } | 158 | } |
170 | 159 | ||
171 | define_one_global_ro(sampling_rate_min); | ||
172 | |||
173 | /* cpufreq_conservative Governor Tunables */ | ||
174 | #define show_one(file_name, object) \ | ||
175 | static ssize_t show_##file_name \ | ||
176 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | ||
177 | { \ | ||
178 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | ||
179 | } | ||
180 | show_one(sampling_rate, sampling_rate); | ||
181 | show_one(sampling_down_factor, sampling_down_factor); | ||
182 | show_one(up_threshold, up_threshold); | ||
183 | show_one(down_threshold, down_threshold); | ||
184 | show_one(ignore_nice_load, ignore_nice); | ||
185 | show_one(freq_step, freq_step); | ||
186 | |||
187 | static ssize_t store_sampling_down_factor(struct kobject *a, | 160 | static ssize_t store_sampling_down_factor(struct kobject *a, |
188 | struct attribute *b, | 161 | struct attribute *b, |
189 | const char *buf, size_t count) | 162 | const char *buf, size_t count) |
@@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
195 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 168 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
196 | return -EINVAL; | 169 | return -EINVAL; |
197 | 170 | ||
198 | dbs_tuners_ins.sampling_down_factor = input; | 171 | cs_tuners.sampling_down_factor = input; |
199 | return count; | 172 | return count; |
200 | } | 173 | } |
201 | 174 | ||
@@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
209 | if (ret != 1) | 182 | if (ret != 1) |
210 | return -EINVAL; | 183 | return -EINVAL; |
211 | 184 | ||
212 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 185 | cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); |
213 | return count; | 186 | return count; |
214 | } | 187 | } |
215 | 188 | ||
@@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
220 | int ret; | 193 | int ret; |
221 | ret = sscanf(buf, "%u", &input); | 194 | ret = sscanf(buf, "%u", &input); |
222 | 195 | ||
223 | if (ret != 1 || input > 100 || | 196 | if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) |
224 | input <= dbs_tuners_ins.down_threshold) | ||
225 | return -EINVAL; | 197 | return -EINVAL; |
226 | 198 | ||
227 | dbs_tuners_ins.up_threshold = input; | 199 | cs_tuners.up_threshold = input; |
228 | return count; | 200 | return count; |
229 | } | 201 | } |
230 | 202 | ||
@@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, | |||
237 | 209 | ||
238 | /* cannot be lower than 11 otherwise freq will not fall */ | 210 | /* cannot be lower than 11 otherwise freq will not fall */ |
239 | if (ret != 1 || input < 11 || input > 100 || | 211 | if (ret != 1 || input < 11 || input > 100 || |
240 | input >= dbs_tuners_ins.up_threshold) | 212 | input >= cs_tuners.up_threshold) |
241 | return -EINVAL; | 213 | return -EINVAL; |
242 | 214 | ||
243 | dbs_tuners_ins.down_threshold = input; | 215 | cs_tuners.down_threshold = input; |
244 | return count; | 216 | return count; |
245 | } | 217 | } |
246 | 218 | ||
247 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 219 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, |
248 | const char *buf, size_t count) | 220 | const char *buf, size_t count) |
249 | { | 221 | { |
250 | unsigned int input; | 222 | unsigned int input, j; |
251 | int ret; | 223 | int ret; |
252 | 224 | ||
253 | unsigned int j; | ||
254 | |||
255 | ret = sscanf(buf, "%u", &input); | 225 | ret = sscanf(buf, "%u", &input); |
256 | if (ret != 1) | 226 | if (ret != 1) |
257 | return -EINVAL; | 227 | return -EINVAL; |
@@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
259 | if (input > 1) | 229 | if (input > 1) |
260 | input = 1; | 230 | input = 1; |
261 | 231 | ||
262 | if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ | 232 | if (input == cs_tuners.ignore_nice) /* nothing to do */ |
263 | return count; | 233 | return count; |
264 | 234 | ||
265 | dbs_tuners_ins.ignore_nice = input; | 235 | cs_tuners.ignore_nice = input; |
266 | 236 | ||
267 | /* we need to re-evaluate prev_cpu_idle */ | 237 | /* we need to re-evaluate prev_cpu_idle */ |
268 | for_each_online_cpu(j) { | 238 | for_each_online_cpu(j) { |
269 | struct cpu_dbs_info_s *dbs_info; | 239 | struct cs_cpu_dbs_info_s *dbs_info; |
270 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 240 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
271 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 241 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
272 | &dbs_info->prev_cpu_wall); | 242 | &dbs_info->cdbs.prev_cpu_wall); |
273 | if (dbs_tuners_ins.ignore_nice) | 243 | if (cs_tuners.ignore_nice) |
274 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 244 | dbs_info->cdbs.prev_cpu_nice = |
245 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
275 | } | 246 | } |
276 | return count; | 247 | return count; |
277 | } | 248 | } |
@@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, | |||
289 | if (input > 100) | 260 | if (input > 100) |
290 | input = 100; | 261 | input = 100; |
291 | 262 | ||
292 | /* no need to test here if freq_step is zero as the user might actually | 263 | /* |
293 | * want this, they would be crazy though :) */ | 264 | * no need to test here if freq_step is zero as the user might actually |
294 | dbs_tuners_ins.freq_step = input; | 265 | * want this, they would be crazy though :) |
266 | */ | ||
267 | cs_tuners.freq_step = input; | ||
295 | return count; | 268 | return count; |
296 | } | 269 | } |
297 | 270 | ||
271 | show_one(cs, sampling_rate, sampling_rate); | ||
272 | show_one(cs, sampling_down_factor, sampling_down_factor); | ||
273 | show_one(cs, up_threshold, up_threshold); | ||
274 | show_one(cs, down_threshold, down_threshold); | ||
275 | show_one(cs, ignore_nice_load, ignore_nice); | ||
276 | show_one(cs, freq_step, freq_step); | ||
277 | |||
298 | define_one_global_rw(sampling_rate); | 278 | define_one_global_rw(sampling_rate); |
299 | define_one_global_rw(sampling_down_factor); | 279 | define_one_global_rw(sampling_down_factor); |
300 | define_one_global_rw(up_threshold); | 280 | define_one_global_rw(up_threshold); |
301 | define_one_global_rw(down_threshold); | 281 | define_one_global_rw(down_threshold); |
302 | define_one_global_rw(ignore_nice_load); | 282 | define_one_global_rw(ignore_nice_load); |
303 | define_one_global_rw(freq_step); | 283 | define_one_global_rw(freq_step); |
284 | define_one_global_ro(sampling_rate_min); | ||
304 | 285 | ||
305 | static struct attribute *dbs_attributes[] = { | 286 | static struct attribute *dbs_attributes[] = { |
306 | &sampling_rate_min.attr, | 287 | &sampling_rate_min.attr, |
@@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = { | |||
313 | NULL | 294 | NULL |
314 | }; | 295 | }; |
315 | 296 | ||
316 | static struct attribute_group dbs_attr_group = { | 297 | static struct attribute_group cs_attr_group = { |
317 | .attrs = dbs_attributes, | 298 | .attrs = dbs_attributes, |
318 | .name = "conservative", | 299 | .name = "conservative", |
319 | }; | 300 | }; |
320 | 301 | ||
321 | /************************** sysfs end ************************/ | 302 | /************************** sysfs end ************************/ |
322 | 303 | ||
323 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 304 | define_get_cpu_dbs_routines(cs_cpu_dbs_info); |
324 | { | ||
325 | unsigned int load = 0; | ||
326 | unsigned int max_load = 0; | ||
327 | unsigned int freq_target; | ||
328 | |||
329 | struct cpufreq_policy *policy; | ||
330 | unsigned int j; | ||
331 | |||
332 | policy = this_dbs_info->cur_policy; | ||
333 | |||
334 | /* | ||
335 | * Every sampling_rate, we check, if current idle time is less | ||
336 | * than 20% (default), then we try to increase frequency | ||
337 | * Every sampling_rate*sampling_down_factor, we check, if current | ||
338 | * idle time is more than 80%, then we try to decrease frequency | ||
339 | * | ||
340 | * Any frequency increase takes it to the maximum frequency. | ||
341 | * Frequency reduction happens at minimum steps of | ||
342 | * 5% (default) of maximum frequency | ||
343 | */ | ||
344 | |||
345 | /* Get Absolute Load */ | ||
346 | for_each_cpu(j, policy->cpus) { | ||
347 | struct cpu_dbs_info_s *j_dbs_info; | ||
348 | cputime64_t cur_wall_time, cur_idle_time; | ||
349 | unsigned int idle_time, wall_time; | ||
350 | |||
351 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | ||
352 | |||
353 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
354 | |||
355 | wall_time = (unsigned int) | ||
356 | (cur_wall_time - j_dbs_info->prev_cpu_wall); | ||
357 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
358 | |||
359 | idle_time = (unsigned int) | ||
360 | (cur_idle_time - j_dbs_info->prev_cpu_idle); | ||
361 | j_dbs_info->prev_cpu_idle = cur_idle_time; | ||
362 | |||
363 | if (dbs_tuners_ins.ignore_nice) { | ||
364 | u64 cur_nice; | ||
365 | unsigned long cur_nice_jiffies; | ||
366 | |||
367 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
368 | j_dbs_info->prev_cpu_nice; | ||
369 | /* | ||
370 | * Assumption: nice time between sampling periods will | ||
371 | * be less than 2^32 jiffies for 32 bit sys | ||
372 | */ | ||
373 | cur_nice_jiffies = (unsigned long) | ||
374 | cputime64_to_jiffies64(cur_nice); | ||
375 | |||
376 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
377 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
378 | } | ||
379 | 305 | ||
380 | if (unlikely(!wall_time || wall_time < idle_time)) | 306 | static struct notifier_block cs_cpufreq_notifier_block = { |
381 | continue; | 307 | .notifier_call = dbs_cpufreq_notifier, |
382 | 308 | }; | |
383 | load = 100 * (wall_time - idle_time) / wall_time; | ||
384 | |||
385 | if (load > max_load) | ||
386 | max_load = load; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * break out if we 'cannot' reduce the speed as the user might | ||
391 | * want freq_step to be zero | ||
392 | */ | ||
393 | if (dbs_tuners_ins.freq_step == 0) | ||
394 | return; | ||
395 | |||
396 | /* Check for frequency increase */ | ||
397 | if (max_load > dbs_tuners_ins.up_threshold) { | ||
398 | this_dbs_info->down_skip = 0; | ||
399 | |||
400 | /* if we are already at full speed then break out early */ | ||
401 | if (this_dbs_info->requested_freq == policy->max) | ||
402 | return; | ||
403 | |||
404 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | ||
405 | |||
406 | /* max freq cannot be less than 100. But who knows.... */ | ||
407 | if (unlikely(freq_target == 0)) | ||
408 | freq_target = 5; | ||
409 | |||
410 | this_dbs_info->requested_freq += freq_target; | ||
411 | if (this_dbs_info->requested_freq > policy->max) | ||
412 | this_dbs_info->requested_freq = policy->max; | ||
413 | |||
414 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | ||
415 | CPUFREQ_RELATION_H); | ||
416 | return; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * The optimal frequency is the frequency that is the lowest that | ||
421 | * can support the current CPU usage without triggering the up | ||
422 | * policy. To be safe, we focus 10 points under the threshold. | ||
423 | */ | ||
424 | if (max_load < (dbs_tuners_ins.down_threshold - 10)) { | ||
425 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | ||
426 | |||
427 | this_dbs_info->requested_freq -= freq_target; | ||
428 | if (this_dbs_info->requested_freq < policy->min) | ||
429 | this_dbs_info->requested_freq = policy->min; | ||
430 | |||
431 | /* | ||
432 | * if we cannot reduce the frequency anymore, break out early | ||
433 | */ | ||
434 | if (policy->cur == policy->min) | ||
435 | return; | ||
436 | |||
437 | __cpufreq_driver_target(policy, this_dbs_info->requested_freq, | ||
438 | CPUFREQ_RELATION_H); | ||
439 | return; | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static void do_dbs_timer(struct work_struct *work) | ||
444 | { | ||
445 | struct cpu_dbs_info_s *dbs_info = | ||
446 | container_of(work, struct cpu_dbs_info_s, work.work); | ||
447 | unsigned int cpu = dbs_info->cpu; | ||
448 | |||
449 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
450 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
451 | |||
452 | delay -= jiffies % delay; | ||
453 | |||
454 | mutex_lock(&dbs_info->timer_mutex); | ||
455 | |||
456 | dbs_check_cpu(dbs_info); | ||
457 | |||
458 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); | ||
459 | mutex_unlock(&dbs_info->timer_mutex); | ||
460 | } | ||
461 | |||
462 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | ||
463 | { | ||
464 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
465 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
466 | delay -= jiffies % delay; | ||
467 | 309 | ||
468 | dbs_info->enable = 1; | 310 | static struct cs_ops cs_ops = { |
469 | INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); | 311 | .notifier_block = &cs_cpufreq_notifier_block, |
470 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); | 312 | }; |
471 | } | ||
472 | 313 | ||
473 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 314 | static struct dbs_data cs_dbs_data = { |
474 | { | 315 | .governor = GOV_CONSERVATIVE, |
475 | dbs_info->enable = 0; | 316 | .attr_group = &cs_attr_group, |
476 | cancel_delayed_work_sync(&dbs_info->work); | 317 | .tuners = &cs_tuners, |
477 | } | 318 | .get_cpu_cdbs = get_cpu_cdbs, |
319 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | ||
320 | .gov_dbs_timer = cs_dbs_timer, | ||
321 | .gov_check_cpu = cs_check_cpu, | ||
322 | .gov_ops = &cs_ops, | ||
323 | }; | ||
478 | 324 | ||
479 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 325 | static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
480 | unsigned int event) | 326 | unsigned int event) |
481 | { | 327 | { |
482 | unsigned int cpu = policy->cpu; | 328 | return cpufreq_governor_dbs(&cs_dbs_data, policy, event); |
483 | struct cpu_dbs_info_s *this_dbs_info; | ||
484 | unsigned int j; | ||
485 | int rc; | ||
486 | |||
487 | this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); | ||
488 | |||
489 | switch (event) { | ||
490 | case CPUFREQ_GOV_START: | ||
491 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
492 | return -EINVAL; | ||
493 | |||
494 | mutex_lock(&dbs_mutex); | ||
495 | |||
496 | for_each_cpu(j, policy->cpus) { | ||
497 | struct cpu_dbs_info_s *j_dbs_info; | ||
498 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | ||
499 | j_dbs_info->cur_policy = policy; | ||
500 | |||
501 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | ||
502 | &j_dbs_info->prev_cpu_wall); | ||
503 | if (dbs_tuners_ins.ignore_nice) | ||
504 | j_dbs_info->prev_cpu_nice = | ||
505 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
506 | } | ||
507 | this_dbs_info->cpu = cpu; | ||
508 | this_dbs_info->down_skip = 0; | ||
509 | this_dbs_info->requested_freq = policy->cur; | ||
510 | |||
511 | mutex_init(&this_dbs_info->timer_mutex); | ||
512 | dbs_enable++; | ||
513 | /* | ||
514 | * Start the timerschedule work, when this governor | ||
515 | * is used for first time | ||
516 | */ | ||
517 | if (dbs_enable == 1) { | ||
518 | unsigned int latency; | ||
519 | /* policy latency is in nS. Convert it to uS first */ | ||
520 | latency = policy->cpuinfo.transition_latency / 1000; | ||
521 | if (latency == 0) | ||
522 | latency = 1; | ||
523 | |||
524 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
525 | &dbs_attr_group); | ||
526 | if (rc) { | ||
527 | mutex_unlock(&dbs_mutex); | ||
528 | return rc; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * conservative does not implement micro like ondemand | ||
533 | * governor, thus we are bound to jiffes/HZ | ||
534 | */ | ||
535 | min_sampling_rate = | ||
536 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | ||
537 | /* Bring kernel and HW constraints together */ | ||
538 | min_sampling_rate = max(min_sampling_rate, | ||
539 | MIN_LATENCY_MULTIPLIER * latency); | ||
540 | dbs_tuners_ins.sampling_rate = | ||
541 | max(min_sampling_rate, | ||
542 | latency * LATENCY_MULTIPLIER); | ||
543 | |||
544 | cpufreq_register_notifier( | ||
545 | &dbs_cpufreq_notifier_block, | ||
546 | CPUFREQ_TRANSITION_NOTIFIER); | ||
547 | } | ||
548 | mutex_unlock(&dbs_mutex); | ||
549 | |||
550 | dbs_timer_init(this_dbs_info); | ||
551 | |||
552 | break; | ||
553 | |||
554 | case CPUFREQ_GOV_STOP: | ||
555 | dbs_timer_exit(this_dbs_info); | ||
556 | |||
557 | mutex_lock(&dbs_mutex); | ||
558 | dbs_enable--; | ||
559 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
560 | |||
561 | /* | ||
562 | * Stop the timerschedule work, when this governor | ||
563 | * is used for first time | ||
564 | */ | ||
565 | if (dbs_enable == 0) | ||
566 | cpufreq_unregister_notifier( | ||
567 | &dbs_cpufreq_notifier_block, | ||
568 | CPUFREQ_TRANSITION_NOTIFIER); | ||
569 | |||
570 | mutex_unlock(&dbs_mutex); | ||
571 | if (!dbs_enable) | ||
572 | sysfs_remove_group(cpufreq_global_kobject, | ||
573 | &dbs_attr_group); | ||
574 | |||
575 | break; | ||
576 | |||
577 | case CPUFREQ_GOV_LIMITS: | ||
578 | mutex_lock(&this_dbs_info->timer_mutex); | ||
579 | if (policy->max < this_dbs_info->cur_policy->cur) | ||
580 | __cpufreq_driver_target( | ||
581 | this_dbs_info->cur_policy, | ||
582 | policy->max, CPUFREQ_RELATION_H); | ||
583 | else if (policy->min > this_dbs_info->cur_policy->cur) | ||
584 | __cpufreq_driver_target( | ||
585 | this_dbs_info->cur_policy, | ||
586 | policy->min, CPUFREQ_RELATION_L); | ||
587 | dbs_check_cpu(this_dbs_info); | ||
588 | mutex_unlock(&this_dbs_info->timer_mutex); | ||
589 | |||
590 | break; | ||
591 | } | ||
592 | return 0; | ||
593 | } | 329 | } |
594 | 330 | ||
595 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | 331 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE |
@@ -597,13 +333,14 @@ static | |||
597 | #endif | 333 | #endif |
598 | struct cpufreq_governor cpufreq_gov_conservative = { | 334 | struct cpufreq_governor cpufreq_gov_conservative = { |
599 | .name = "conservative", | 335 | .name = "conservative", |
600 | .governor = cpufreq_governor_dbs, | 336 | .governor = cs_cpufreq_governor_dbs, |
601 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 337 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
602 | .owner = THIS_MODULE, | 338 | .owner = THIS_MODULE, |
603 | }; | 339 | }; |
604 | 340 | ||
605 | static int __init cpufreq_gov_dbs_init(void) | 341 | static int __init cpufreq_gov_dbs_init(void) |
606 | { | 342 | { |
343 | mutex_init(&cs_dbs_data.mutex); | ||
607 | return cpufreq_register_governor(&cpufreq_gov_conservative); | 344 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
608 | } | 345 | } |
609 | 346 | ||
@@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void) | |||
612 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 349 | cpufreq_unregister_governor(&cpufreq_gov_conservative); |
613 | } | 350 | } |
614 | 351 | ||
615 | |||
616 | MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); | 352 | MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); |
617 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " | 353 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " |
618 | "Low Latency Frequency Transition capable processors " | 354 | "Low Latency Frequency Transition capable processors " |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c new file mode 100644 index 000000000000..6c5f1d383cdc --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/cpufreq_governor.c | ||
3 | * | ||
4 | * CPUFREQ governors common code | ||
5 | * | ||
6 | * Copyright (C) 2001 Russell King | ||
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | ||
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | ||
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
19 | #include <asm/cputime.h> | ||
20 | #include <linux/cpufreq.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/kernel_stat.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/tick.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | |||
29 | #include "cpufreq_governor.h" | ||
30 | |||
31 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | ||
32 | { | ||
33 | u64 idle_time; | ||
34 | u64 cur_wall_time; | ||
35 | u64 busy_time; | ||
36 | |||
37 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | ||
38 | |||
39 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | ||
40 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | ||
41 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | ||
42 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
43 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
44 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
45 | |||
46 | idle_time = cur_wall_time - busy_time; | ||
47 | if (wall) | ||
48 | *wall = cputime_to_usecs(cur_wall_time); | ||
49 | |||
50 | return cputime_to_usecs(idle_time); | ||
51 | } | ||
52 | |||
53 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) | ||
54 | { | ||
55 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | ||
56 | |||
57 | if (idle_time == -1ULL) | ||
58 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
59 | else | ||
60 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
61 | |||
62 | return idle_time; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); | ||
65 | |||
66 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | ||
67 | { | ||
68 | struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
69 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
70 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
71 | struct cpufreq_policy *policy; | ||
72 | unsigned int max_load = 0; | ||
73 | unsigned int ignore_nice; | ||
74 | unsigned int j; | ||
75 | |||
76 | if (dbs_data->governor == GOV_ONDEMAND) | ||
77 | ignore_nice = od_tuners->ignore_nice; | ||
78 | else | ||
79 | ignore_nice = cs_tuners->ignore_nice; | ||
80 | |||
81 | policy = cdbs->cur_policy; | ||
82 | |||
83 | /* Get Absolute Load (in terms of freq for ondemand gov) */ | ||
84 | for_each_cpu(j, policy->cpus) { | ||
85 | struct cpu_dbs_common_info *j_cdbs; | ||
86 | u64 cur_wall_time, cur_idle_time, cur_iowait_time; | ||
87 | unsigned int idle_time, wall_time, iowait_time; | ||
88 | unsigned int load; | ||
89 | |||
90 | j_cdbs = dbs_data->get_cpu_cdbs(j); | ||
91 | |||
92 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
93 | |||
94 | wall_time = (unsigned int) | ||
95 | (cur_wall_time - j_cdbs->prev_cpu_wall); | ||
96 | j_cdbs->prev_cpu_wall = cur_wall_time; | ||
97 | |||
98 | idle_time = (unsigned int) | ||
99 | (cur_idle_time - j_cdbs->prev_cpu_idle); | ||
100 | j_cdbs->prev_cpu_idle = cur_idle_time; | ||
101 | |||
102 | if (ignore_nice) { | ||
103 | u64 cur_nice; | ||
104 | unsigned long cur_nice_jiffies; | ||
105 | |||
106 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
107 | cdbs->prev_cpu_nice; | ||
108 | /* | ||
109 | * Assumption: nice time between sampling periods will | ||
110 | * be less than 2^32 jiffies for 32 bit sys | ||
111 | */ | ||
112 | cur_nice_jiffies = (unsigned long) | ||
113 | cputime64_to_jiffies64(cur_nice); | ||
114 | |||
115 | cdbs->prev_cpu_nice = | ||
116 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
117 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
118 | } | ||
119 | |||
120 | if (dbs_data->governor == GOV_ONDEMAND) { | ||
121 | struct od_cpu_dbs_info_s *od_j_dbs_info = | ||
122 | dbs_data->get_cpu_dbs_info_s(cpu); | ||
123 | |||
124 | cur_iowait_time = get_cpu_iowait_time_us(j, | ||
125 | &cur_wall_time); | ||
126 | if (cur_iowait_time == -1ULL) | ||
127 | cur_iowait_time = 0; | ||
128 | |||
129 | iowait_time = (unsigned int) (cur_iowait_time - | ||
130 | od_j_dbs_info->prev_cpu_iowait); | ||
131 | od_j_dbs_info->prev_cpu_iowait = cur_iowait_time; | ||
132 | |||
133 | /* | ||
134 | * For the purpose of ondemand, waiting for disk IO is | ||
135 | * an indication that you're performance critical, and | ||
136 | * not that the system is actually idle. So subtract the | ||
137 | * iowait time from the cpu idle time. | ||
138 | */ | ||
139 | if (od_tuners->io_is_busy && idle_time >= iowait_time) | ||
140 | idle_time -= iowait_time; | ||
141 | } | ||
142 | |||
143 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
144 | continue; | ||
145 | |||
146 | load = 100 * (wall_time - idle_time) / wall_time; | ||
147 | |||
148 | if (dbs_data->governor == GOV_ONDEMAND) { | ||
149 | int freq_avg = __cpufreq_driver_getavg(policy, j); | ||
150 | if (freq_avg <= 0) | ||
151 | freq_avg = policy->cur; | ||
152 | |||
153 | load *= freq_avg; | ||
154 | } | ||
155 | |||
156 | if (load > max_load) | ||
157 | max_load = load; | ||
158 | } | ||
159 | |||
160 | dbs_data->gov_check_cpu(cpu, max_load); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | ||
163 | |||
164 | static inline void dbs_timer_init(struct dbs_data *dbs_data, | ||
165 | struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) | ||
166 | { | ||
167 | int delay = delay_for_sampling_rate(sampling_rate); | ||
168 | |||
169 | INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); | ||
170 | schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay); | ||
171 | } | ||
172 | |||
173 | static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) | ||
174 | { | ||
175 | cancel_delayed_work_sync(&cdbs->work); | ||
176 | } | ||
177 | |||
178 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | ||
179 | struct cpufreq_policy *policy, unsigned int event) | ||
180 | { | ||
181 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; | ||
182 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; | ||
183 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
184 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
185 | struct cpu_dbs_common_info *cpu_cdbs; | ||
186 | unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; | ||
187 | int rc; | ||
188 | |||
189 | cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
190 | |||
191 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
192 | cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | ||
193 | sampling_rate = &cs_tuners->sampling_rate; | ||
194 | ignore_nice = cs_tuners->ignore_nice; | ||
195 | } else { | ||
196 | od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | ||
197 | sampling_rate = &od_tuners->sampling_rate; | ||
198 | ignore_nice = od_tuners->ignore_nice; | ||
199 | } | ||
200 | |||
201 | switch (event) { | ||
202 | case CPUFREQ_GOV_START: | ||
203 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
204 | return -EINVAL; | ||
205 | |||
206 | mutex_lock(&dbs_data->mutex); | ||
207 | |||
208 | dbs_data->enable++; | ||
209 | cpu_cdbs->cpu = cpu; | ||
210 | for_each_cpu(j, policy->cpus) { | ||
211 | struct cpu_dbs_common_info *j_cdbs; | ||
212 | j_cdbs = dbs_data->get_cpu_cdbs(j); | ||
213 | |||
214 | j_cdbs->cur_policy = policy; | ||
215 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | ||
216 | &j_cdbs->prev_cpu_wall); | ||
217 | if (ignore_nice) | ||
218 | j_cdbs->prev_cpu_nice = | ||
219 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Start the timerschedule work, when this governor is used for | ||
224 | * first time | ||
225 | */ | ||
226 | if (dbs_data->enable != 1) | ||
227 | goto second_time; | ||
228 | |||
229 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
230 | dbs_data->attr_group); | ||
231 | if (rc) { | ||
232 | mutex_unlock(&dbs_data->mutex); | ||
233 | return rc; | ||
234 | } | ||
235 | |||
236 | /* policy latency is in nS. Convert it to uS first */ | ||
237 | latency = policy->cpuinfo.transition_latency / 1000; | ||
238 | if (latency == 0) | ||
239 | latency = 1; | ||
240 | |||
241 | /* | ||
242 | * conservative does not implement micro like ondemand | ||
243 | * governor, thus we are bound to jiffes/HZ | ||
244 | */ | ||
245 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
246 | struct cs_ops *ops = dbs_data->gov_ops; | ||
247 | |||
248 | cpufreq_register_notifier(ops->notifier_block, | ||
249 | CPUFREQ_TRANSITION_NOTIFIER); | ||
250 | |||
251 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
252 | jiffies_to_usecs(10); | ||
253 | } else { | ||
254 | struct od_ops *ops = dbs_data->gov_ops; | ||
255 | |||
256 | od_tuners->io_is_busy = ops->io_busy(); | ||
257 | } | ||
258 | |||
259 | /* Bring kernel and HW constraints together */ | ||
260 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | ||
261 | MIN_LATENCY_MULTIPLIER * latency); | ||
262 | *sampling_rate = max(dbs_data->min_sampling_rate, latency * | ||
263 | LATENCY_MULTIPLIER); | ||
264 | |||
265 | second_time: | ||
266 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
267 | cs_dbs_info->down_skip = 0; | ||
268 | cs_dbs_info->enable = 1; | ||
269 | cs_dbs_info->requested_freq = policy->cur; | ||
270 | } else { | ||
271 | struct od_ops *ops = dbs_data->gov_ops; | ||
272 | od_dbs_info->rate_mult = 1; | ||
273 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
274 | ops->powersave_bias_init_cpu(cpu); | ||
275 | } | ||
276 | mutex_unlock(&dbs_data->mutex); | ||
277 | |||
278 | mutex_init(&cpu_cdbs->timer_mutex); | ||
279 | dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); | ||
280 | break; | ||
281 | |||
282 | case CPUFREQ_GOV_STOP: | ||
283 | if (dbs_data->governor == GOV_CONSERVATIVE) | ||
284 | cs_dbs_info->enable = 0; | ||
285 | |||
286 | dbs_timer_exit(cpu_cdbs); | ||
287 | |||
288 | mutex_lock(&dbs_data->mutex); | ||
289 | mutex_destroy(&cpu_cdbs->timer_mutex); | ||
290 | dbs_data->enable--; | ||
291 | if (!dbs_data->enable) { | ||
292 | struct cs_ops *ops = dbs_data->gov_ops; | ||
293 | |||
294 | sysfs_remove_group(cpufreq_global_kobject, | ||
295 | dbs_data->attr_group); | ||
296 | if (dbs_data->governor == GOV_CONSERVATIVE) | ||
297 | cpufreq_unregister_notifier(ops->notifier_block, | ||
298 | CPUFREQ_TRANSITION_NOTIFIER); | ||
299 | } | ||
300 | mutex_unlock(&dbs_data->mutex); | ||
301 | |||
302 | break; | ||
303 | |||
304 | case CPUFREQ_GOV_LIMITS: | ||
305 | mutex_lock(&cpu_cdbs->timer_mutex); | ||
306 | if (policy->max < cpu_cdbs->cur_policy->cur) | ||
307 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | ||
308 | policy->max, CPUFREQ_RELATION_H); | ||
309 | else if (policy->min > cpu_cdbs->cur_policy->cur) | ||
310 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | ||
311 | policy->min, CPUFREQ_RELATION_L); | ||
312 | dbs_check_cpu(dbs_data, cpu); | ||
313 | mutex_unlock(&cpu_cdbs->timer_mutex); | ||
314 | break; | ||
315 | } | ||
316 | return 0; | ||
317 | } | ||
318 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); | ||
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h new file mode 100644 index 000000000000..f6616540c53d --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/cpufreq_governor.h | ||
3 | * | ||
4 | * Header file for CPUFreq governors common code | ||
5 | * | ||
6 | * Copyright (C) 2001 Russell King | ||
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | ||
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | ||
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #ifndef _CPUFREQ_GOVERNER_H | ||
18 | #define _CPUFREQ_GOVERNER_H | ||
19 | |||
20 | #include <linux/cpufreq.h> | ||
21 | #include <linux/kobject.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/workqueue.h> | ||
24 | #include <linux/sysfs.h> | ||
25 | |||
26 | /* | ||
27 | * The polling frequency depends on the capability of the processor. Default | ||
28 | * polling frequency is 1000 times the transition latency of the processor. The | ||
29 | * governor will work on any processor with transition latency <= 10mS, using | ||
30 | * appropriate sampling rate. | ||
31 | * | ||
32 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
33 | * this governor will not work. All times here are in uS. | ||
34 | */ | ||
35 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
36 | #define LATENCY_MULTIPLIER (1000) | ||
37 | #define MIN_LATENCY_MULTIPLIER (100) | ||
38 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
39 | |||
40 | /* Ondemand Sampling types */ | ||
41 | enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; | ||
42 | |||
43 | /* Macro creating sysfs show routines */ | ||
44 | #define show_one(_gov, file_name, object) \ | ||
45 | static ssize_t show_##file_name \ | ||
46 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | ||
47 | { \ | ||
48 | return sprintf(buf, "%u\n", _gov##_tuners.object); \ | ||
49 | } | ||
50 | |||
51 | #define define_get_cpu_dbs_routines(_dbs_info) \ | ||
52 | static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ | ||
53 | { \ | ||
54 | return &per_cpu(_dbs_info, cpu).cdbs; \ | ||
55 | } \ | ||
56 | \ | ||
57 | static void *get_cpu_dbs_info_s(int cpu) \ | ||
58 | { \ | ||
59 | return &per_cpu(_dbs_info, cpu); \ | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Abbreviations: | ||
64 | * dbs: used as a shortform for demand based switching It helps to keep variable | ||
65 | * names smaller, simpler | ||
66 | * cdbs: common dbs | ||
67 | * on_*: On-demand governor | ||
68 | * cs_*: Conservative governor | ||
69 | */ | ||
70 | |||
71 | /* Per cpu structures */ | ||
72 | struct cpu_dbs_common_info { | ||
73 | int cpu; | ||
74 | u64 prev_cpu_idle; | ||
75 | u64 prev_cpu_wall; | ||
76 | u64 prev_cpu_nice; | ||
77 | struct cpufreq_policy *cur_policy; | ||
78 | struct delayed_work work; | ||
79 | /* | ||
80 | * percpu mutex that serializes governor limit change with gov_dbs_timer | ||
81 | * invocation. We do not want gov_dbs_timer to run when user is changing | ||
82 | * the governor or limits. | ||
83 | */ | ||
84 | struct mutex timer_mutex; | ||
85 | }; | ||
86 | |||
87 | struct od_cpu_dbs_info_s { | ||
88 | struct cpu_dbs_common_info cdbs; | ||
89 | u64 prev_cpu_iowait; | ||
90 | struct cpufreq_frequency_table *freq_table; | ||
91 | unsigned int freq_lo; | ||
92 | unsigned int freq_lo_jiffies; | ||
93 | unsigned int freq_hi_jiffies; | ||
94 | unsigned int rate_mult; | ||
95 | unsigned int sample_type:1; | ||
96 | }; | ||
97 | |||
98 | struct cs_cpu_dbs_info_s { | ||
99 | struct cpu_dbs_common_info cdbs; | ||
100 | unsigned int down_skip; | ||
101 | unsigned int requested_freq; | ||
102 | unsigned int enable:1; | ||
103 | }; | ||
104 | |||
105 | /* Governers sysfs tunables */ | ||
106 | struct od_dbs_tuners { | ||
107 | unsigned int ignore_nice; | ||
108 | unsigned int sampling_rate; | ||
109 | unsigned int sampling_down_factor; | ||
110 | unsigned int up_threshold; | ||
111 | unsigned int down_differential; | ||
112 | unsigned int powersave_bias; | ||
113 | unsigned int io_is_busy; | ||
114 | }; | ||
115 | |||
116 | struct cs_dbs_tuners { | ||
117 | unsigned int ignore_nice; | ||
118 | unsigned int sampling_rate; | ||
119 | unsigned int sampling_down_factor; | ||
120 | unsigned int up_threshold; | ||
121 | unsigned int down_threshold; | ||
122 | unsigned int freq_step; | ||
123 | }; | ||
124 | |||
125 | /* Per Governer data */ | ||
126 | struct dbs_data { | ||
127 | /* Common across governors */ | ||
128 | #define GOV_ONDEMAND 0 | ||
129 | #define GOV_CONSERVATIVE 1 | ||
130 | int governor; | ||
131 | unsigned int min_sampling_rate; | ||
132 | unsigned int enable; /* number of CPUs using this policy */ | ||
133 | struct attribute_group *attr_group; | ||
134 | void *tuners; | ||
135 | |||
136 | /* dbs_mutex protects dbs_enable in governor start/stop */ | ||
137 | struct mutex mutex; | ||
138 | |||
139 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); | ||
140 | void *(*get_cpu_dbs_info_s)(int cpu); | ||
141 | void (*gov_dbs_timer)(struct work_struct *work); | ||
142 | void (*gov_check_cpu)(int cpu, unsigned int load); | ||
143 | |||
144 | /* Governor specific ops, see below */ | ||
145 | void *gov_ops; | ||
146 | }; | ||
147 | |||
148 | /* Governor specific ops, will be passed to dbs_data->gov_ops */ | ||
149 | struct od_ops { | ||
150 | int (*io_busy)(void); | ||
151 | void (*powersave_bias_init_cpu)(int cpu); | ||
152 | unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, | ||
153 | unsigned int freq_next, unsigned int relation); | ||
154 | void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); | ||
155 | }; | ||
156 | |||
157 | struct cs_ops { | ||
158 | struct notifier_block *notifier_block; | ||
159 | }; | ||
160 | |||
161 | static inline int delay_for_sampling_rate(unsigned int sampling_rate) | ||
162 | { | ||
163 | int delay = usecs_to_jiffies(sampling_rate); | ||
164 | |||
165 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
166 | if (num_online_cpus() > 1) | ||
167 | delay -= jiffies % delay; | ||
168 | |||
169 | return delay; | ||
170 | } | ||
171 | |||
172 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); | ||
173 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); | ||
174 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | ||
175 | struct cpufreq_policy *policy, unsigned int event); | ||
176 | #endif /* _CPUFREQ_GOVERNER_H */ | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 396322f2a83f..7731f7c7e79a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -10,24 +10,23 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | #include <linux/module.h> | 14 | |
15 | #include <linux/init.h> | ||
16 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
17 | #include <linux/cpu.h> | 16 | #include <linux/init.h> |
18 | #include <linux/jiffies.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/kernel_stat.h> | 18 | #include <linux/kernel_stat.h> |
19 | #include <linux/kobject.h> | ||
20 | #include <linux/module.h> | ||
20 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
21 | #include <linux/hrtimer.h> | 22 | #include <linux/percpu-defs.h> |
23 | #include <linux/sysfs.h> | ||
22 | #include <linux/tick.h> | 24 | #include <linux/tick.h> |
23 | #include <linux/ktime.h> | 25 | #include <linux/types.h> |
24 | #include <linux/sched.h> | ||
25 | 26 | ||
26 | /* | 27 | #include "cpufreq_governor.h" |
27 | * dbs is used in this file as a shortform for demandbased switching | ||
28 | * It helps to keep variable names smaller, simpler | ||
29 | */ | ||
30 | 28 | ||
29 | /* On-demand governor macors */ | ||
31 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | 30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) |
32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
33 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 32 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
@@ -38,80 +37,14 @@ | |||
38 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | 37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
39 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
40 | 39 | ||
41 | /* | 40 | static struct dbs_data od_dbs_data; |
42 | * The polling frequency of this governor depends on the capability of | 41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); |
43 | * the processor. Default polling frequency is 1000 times the transition | ||
44 | * latency of the processor. The governor will work on any processor with | ||
45 | * transition latency <= 10mS, using appropriate sampling | ||
46 | * rate. | ||
47 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
48 | * this governor will not work. | ||
49 | * All times here are in uS. | ||
50 | */ | ||
51 | #define MIN_SAMPLING_RATE_RATIO (2) | ||
52 | |||
53 | static unsigned int min_sampling_rate; | ||
54 | |||
55 | #define LATENCY_MULTIPLIER (1000) | ||
56 | #define MIN_LATENCY_MULTIPLIER (100) | ||
57 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | ||
58 | |||
59 | static void do_dbs_timer(struct work_struct *work); | ||
60 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | ||
61 | unsigned int event); | ||
62 | 42 | ||
63 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
64 | static | 44 | static struct cpufreq_governor cpufreq_gov_ondemand; |
65 | #endif | 45 | #endif |
66 | struct cpufreq_governor cpufreq_gov_ondemand = { | ||
67 | .name = "ondemand", | ||
68 | .governor = cpufreq_governor_dbs, | ||
69 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
70 | .owner = THIS_MODULE, | ||
71 | }; | ||
72 | 46 | ||
73 | /* Sampling types */ | 47 | static struct od_dbs_tuners od_tuners = { |
74 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
75 | |||
76 | struct cpu_dbs_info_s { | ||
77 | cputime64_t prev_cpu_idle; | ||
78 | cputime64_t prev_cpu_iowait; | ||
79 | cputime64_t prev_cpu_wall; | ||
80 | cputime64_t prev_cpu_nice; | ||
81 | struct cpufreq_policy *cur_policy; | ||
82 | struct delayed_work work; | ||
83 | struct cpufreq_frequency_table *freq_table; | ||
84 | unsigned int freq_lo; | ||
85 | unsigned int freq_lo_jiffies; | ||
86 | unsigned int freq_hi_jiffies; | ||
87 | unsigned int rate_mult; | ||
88 | int cpu; | ||
89 | unsigned int sample_type:1; | ||
90 | /* | ||
91 | * percpu mutex that serializes governor limit change with | ||
92 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
93 | * when user is changing the governor or limits. | ||
94 | */ | ||
95 | struct mutex timer_mutex; | ||
96 | }; | ||
97 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); | ||
98 | |||
99 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | ||
100 | |||
101 | /* | ||
102 | * dbs_mutex protects dbs_enable in governor start/stop. | ||
103 | */ | ||
104 | static DEFINE_MUTEX(dbs_mutex); | ||
105 | |||
106 | static struct dbs_tuners { | ||
107 | unsigned int sampling_rate; | ||
108 | unsigned int up_threshold; | ||
109 | unsigned int down_differential; | ||
110 | unsigned int ignore_nice; | ||
111 | unsigned int sampling_down_factor; | ||
112 | unsigned int powersave_bias; | ||
113 | unsigned int io_is_busy; | ||
114 | } dbs_tuners_ins = { | ||
115 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
116 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
117 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 50 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, |
@@ -119,48 +52,35 @@ static struct dbs_tuners { | |||
119 | .powersave_bias = 0, | 52 | .powersave_bias = 0, |
120 | }; | 53 | }; |
121 | 54 | ||
122 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | 55 | static void ondemand_powersave_bias_init_cpu(int cpu) |
123 | { | ||
124 | u64 idle_time; | ||
125 | u64 cur_wall_time; | ||
126 | u64 busy_time; | ||
127 | |||
128 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | ||
129 | |||
130 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | ||
131 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | ||
132 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | ||
133 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | ||
134 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | ||
135 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | ||
136 | |||
137 | idle_time = cur_wall_time - busy_time; | ||
138 | if (wall) | ||
139 | *wall = jiffies_to_usecs(cur_wall_time); | ||
140 | |||
141 | return jiffies_to_usecs(idle_time); | ||
142 | } | ||
143 | |||
144 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | ||
145 | { | 56 | { |
146 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | 57 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
147 | |||
148 | if (idle_time == -1ULL) | ||
149 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
150 | else | ||
151 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
152 | 58 | ||
153 | return idle_time; | 59 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
60 | dbs_info->freq_lo = 0; | ||
154 | } | 61 | } |
155 | 62 | ||
156 | static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) | 63 | /* |
64 | * Not all CPUs want IO time to be accounted as busy; this depends on how | ||
65 | * efficient idling at a higher frequency/voltage is. | ||
66 | * Pavel Machek says this is not so for various generations of AMD and old | ||
67 | * Intel systems. | ||
68 | * Mike Chan (androidlcom) calis this is also not true for ARM. | ||
69 | * Because of this, whitelist specific known (series) of CPUs by default, and | ||
70 | * leave all others up to the user. | ||
71 | */ | ||
72 | static int should_io_be_busy(void) | ||
157 | { | 73 | { |
158 | u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); | 74 | #if defined(CONFIG_X86) |
159 | 75 | /* | |
160 | if (iowait_time == -1ULL) | 76 | * For Intel, Core 2 (model 15) andl later have an efficient idle. |
161 | return 0; | 77 | */ |
162 | 78 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
163 | return iowait_time; | 79 | boot_cpu_data.x86 == 6 && |
80 | boot_cpu_data.x86_model >= 15) | ||
81 | return 1; | ||
82 | #endif | ||
83 | return 0; | ||
164 | } | 84 | } |
165 | 85 | ||
166 | /* | 86 | /* |
@@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal | |||
169 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. | 89 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. |
170 | */ | 90 | */ |
171 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | 91 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, |
172 | unsigned int freq_next, | 92 | unsigned int freq_next, unsigned int relation) |
173 | unsigned int relation) | ||
174 | { | 93 | { |
175 | unsigned int freq_req, freq_reduc, freq_avg; | 94 | unsigned int freq_req, freq_reduc, freq_avg; |
176 | unsigned int freq_hi, freq_lo; | 95 | unsigned int freq_hi, freq_lo; |
177 | unsigned int index = 0; | 96 | unsigned int index = 0; |
178 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | 97 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
179 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, | 98 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
180 | policy->cpu); | 99 | policy->cpu); |
181 | 100 | ||
182 | if (!dbs_info->freq_table) { | 101 | if (!dbs_info->freq_table) { |
@@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
188 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, | 107 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, |
189 | relation, &index); | 108 | relation, &index); |
190 | freq_req = dbs_info->freq_table[index].frequency; | 109 | freq_req = dbs_info->freq_table[index].frequency; |
191 | freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; | 110 | freq_reduc = freq_req * od_tuners.powersave_bias / 1000; |
192 | freq_avg = freq_req - freq_reduc; | 111 | freq_avg = freq_req - freq_reduc; |
193 | 112 | ||
194 | /* Find freq bounds for freq_avg in freq_table */ | 113 | /* Find freq bounds for freq_avg in freq_table */ |
@@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
207 | dbs_info->freq_lo_jiffies = 0; | 126 | dbs_info->freq_lo_jiffies = 0; |
208 | return freq_lo; | 127 | return freq_lo; |
209 | } | 128 | } |
210 | jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 129 | jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); |
211 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; | 130 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
212 | jiffies_hi += ((freq_hi - freq_lo) / 2); | 131 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
213 | jiffies_hi /= (freq_hi - freq_lo); | 132 | jiffies_hi /= (freq_hi - freq_lo); |
@@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
218 | return freq_hi; | 137 | return freq_hi; |
219 | } | 138 | } |
220 | 139 | ||
221 | static void ondemand_powersave_bias_init_cpu(int cpu) | ||
222 | { | ||
223 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | ||
224 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); | ||
225 | dbs_info->freq_lo = 0; | ||
226 | } | ||
227 | |||
228 | static void ondemand_powersave_bias_init(void) | 140 | static void ondemand_powersave_bias_init(void) |
229 | { | 141 | { |
230 | int i; | 142 | int i; |
@@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void) | |||
233 | } | 145 | } |
234 | } | 146 | } |
235 | 147 | ||
236 | /************************** sysfs interface ************************/ | 148 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) |
149 | { | ||
150 | if (od_tuners.powersave_bias) | ||
151 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | ||
152 | else if (p->cur == p->max) | ||
153 | return; | ||
237 | 154 | ||
238 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 155 | __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? |
239 | struct attribute *attr, char *buf) | 156 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
157 | } | ||
158 | |||
159 | /* | ||
160 | * Every sampling_rate, we check, if current idle time is less than 20% | ||
161 | * (default), then we try to increase frequency Every sampling_rate, we look for | ||
162 | * a the lowest frequency which can sustain the load while keeping idle time | ||
163 | * over 30%. If such a frequency exist, we try to decrease to this frequency. | ||
164 | * | ||
165 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | ||
166 | * happens at minimum steps of 5% (default) of current frequency | ||
167 | */ | ||
168 | static void od_check_cpu(int cpu, unsigned int load_freq) | ||
240 | { | 169 | { |
241 | return sprintf(buf, "%u\n", min_sampling_rate); | 170 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
171 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; | ||
172 | |||
173 | dbs_info->freq_lo = 0; | ||
174 | |||
175 | /* Check for frequency increase */ | ||
176 | if (load_freq > od_tuners.up_threshold * policy->cur) { | ||
177 | /* If switching to max speed, apply sampling_down_factor */ | ||
178 | if (policy->cur < policy->max) | ||
179 | dbs_info->rate_mult = | ||
180 | od_tuners.sampling_down_factor; | ||
181 | dbs_freq_increase(policy, policy->max); | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | /* Check for frequency decrease */ | ||
186 | /* if we cannot reduce the frequency anymore, break out early */ | ||
187 | if (policy->cur == policy->min) | ||
188 | return; | ||
189 | |||
190 | /* | ||
191 | * The optimal frequency is the frequency that is the lowest that can | ||
192 | * support the current CPU usage without triggering the up policy. To be | ||
193 | * safe, we focus 10 points under the threshold. | ||
194 | */ | ||
195 | if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * | ||
196 | policy->cur) { | ||
197 | unsigned int freq_next; | ||
198 | freq_next = load_freq / (od_tuners.up_threshold - | ||
199 | od_tuners.down_differential); | ||
200 | |||
201 | /* No longer fully busy, reset rate_mult */ | ||
202 | dbs_info->rate_mult = 1; | ||
203 | |||
204 | if (freq_next < policy->min) | ||
205 | freq_next = policy->min; | ||
206 | |||
207 | if (!od_tuners.powersave_bias) { | ||
208 | __cpufreq_driver_target(policy, freq_next, | ||
209 | CPUFREQ_RELATION_L); | ||
210 | } else { | ||
211 | int freq = powersave_bias_target(policy, freq_next, | ||
212 | CPUFREQ_RELATION_L); | ||
213 | __cpufreq_driver_target(policy, freq, | ||
214 | CPUFREQ_RELATION_L); | ||
215 | } | ||
216 | } | ||
242 | } | 217 | } |
243 | 218 | ||
244 | define_one_global_ro(sampling_rate_min); | 219 | static void od_dbs_timer(struct work_struct *work) |
220 | { | ||
221 | struct od_cpu_dbs_info_s *dbs_info = | ||
222 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); | ||
223 | unsigned int cpu = dbs_info->cdbs.cpu; | ||
224 | int delay, sample_type = dbs_info->sample_type; | ||
225 | |||
226 | mutex_lock(&dbs_info->cdbs.timer_mutex); | ||
227 | |||
228 | /* Common NORMAL_SAMPLE setup */ | ||
229 | dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
230 | if (sample_type == OD_SUB_SAMPLE) { | ||
231 | delay = dbs_info->freq_lo_jiffies; | ||
232 | __cpufreq_driver_target(dbs_info->cdbs.cur_policy, | ||
233 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | ||
234 | } else { | ||
235 | dbs_check_cpu(&od_dbs_data, cpu); | ||
236 | if (dbs_info->freq_lo) { | ||
237 | /* Setup timer for SUB_SAMPLE */ | ||
238 | dbs_info->sample_type = OD_SUB_SAMPLE; | ||
239 | delay = dbs_info->freq_hi_jiffies; | ||
240 | } else { | ||
241 | delay = delay_for_sampling_rate(od_tuners.sampling_rate | ||
242 | * dbs_info->rate_mult); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | ||
247 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | ||
248 | } | ||
249 | |||
250 | /************************** sysfs interface ************************/ | ||
245 | 251 | ||
246 | /* cpufreq_ondemand Governor Tunables */ | 252 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
247 | #define show_one(file_name, object) \ | 253 | struct attribute *attr, char *buf) |
248 | static ssize_t show_##file_name \ | 254 | { |
249 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | 255 | return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); |
250 | { \ | ||
251 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | ||
252 | } | 256 | } |
253 | show_one(sampling_rate, sampling_rate); | ||
254 | show_one(io_is_busy, io_is_busy); | ||
255 | show_one(up_threshold, up_threshold); | ||
256 | show_one(sampling_down_factor, sampling_down_factor); | ||
257 | show_one(ignore_nice_load, ignore_nice); | ||
258 | show_one(powersave_bias, powersave_bias); | ||
259 | 257 | ||
260 | /** | 258 | /** |
261 | * update_sampling_rate - update sampling rate effective immediately if needed. | 259 | * update_sampling_rate - update sampling rate effective immediately if needed. |
262 | * @new_rate: new sampling rate | 260 | * @new_rate: new sampling rate |
263 | * | 261 | * |
264 | * If new rate is smaller than the old, simply updaing | 262 | * If new rate is smaller than the old, simply updaing |
265 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, | 263 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
266 | * if the original sampling_rate was 1 second and the requested new sampling | 264 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
267 | * rate is 10 ms because the user needs immediate reaction from ondemand | 265 | * ms because the user needs immediate reaction from ondemand governor, but not |
268 | * governor, but not sure if higher frequency will be required or not, | 266 | * sure if higher frequency will be required or not, then, the governor may |
269 | * then, the governor may change the sampling rate too late; up to 1 second | 267 | * change the sampling rate too late; up to 1 second later. Thus, if we are |
270 | * later. Thus, if we are reducing the sampling rate, we need to make the | 268 | * reducing the sampling rate, we need to make the new value effective |
271 | * new value effective immediately. | 269 | * immediately. |
272 | */ | 270 | */ |
273 | static void update_sampling_rate(unsigned int new_rate) | 271 | static void update_sampling_rate(unsigned int new_rate) |
274 | { | 272 | { |
275 | int cpu; | 273 | int cpu; |
276 | 274 | ||
277 | dbs_tuners_ins.sampling_rate = new_rate | 275 | od_tuners.sampling_rate = new_rate = max(new_rate, |
278 | = max(new_rate, min_sampling_rate); | 276 | od_dbs_data.min_sampling_rate); |
279 | 277 | ||
280 | for_each_online_cpu(cpu) { | 278 | for_each_online_cpu(cpu) { |
281 | struct cpufreq_policy *policy; | 279 | struct cpufreq_policy *policy; |
282 | struct cpu_dbs_info_s *dbs_info; | 280 | struct od_cpu_dbs_info_s *dbs_info; |
283 | unsigned long next_sampling, appointed_at; | 281 | unsigned long next_sampling, appointed_at; |
284 | 282 | ||
285 | policy = cpufreq_cpu_get(cpu); | 283 | policy = cpufreq_cpu_get(cpu); |
286 | if (!policy) | 284 | if (!policy) |
287 | continue; | 285 | continue; |
286 | if (policy->governor != &cpufreq_gov_ondemand) { | ||
287 | cpufreq_cpu_put(policy); | ||
288 | continue; | ||
289 | } | ||
288 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); | 290 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); |
289 | cpufreq_cpu_put(policy); | 291 | cpufreq_cpu_put(policy); |
290 | 292 | ||
291 | mutex_lock(&dbs_info->timer_mutex); | 293 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
292 | 294 | ||
293 | if (!delayed_work_pending(&dbs_info->work)) { | 295 | if (!delayed_work_pending(&dbs_info->cdbs.work)) { |
294 | mutex_unlock(&dbs_info->timer_mutex); | 296 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
295 | continue; | 297 | continue; |
296 | } | 298 | } |
297 | 299 | ||
298 | next_sampling = jiffies + usecs_to_jiffies(new_rate); | 300 | next_sampling = jiffies + usecs_to_jiffies(new_rate); |
299 | appointed_at = dbs_info->work.timer.expires; | 301 | appointed_at = dbs_info->cdbs.work.timer.expires; |
300 | |||
301 | 302 | ||
302 | if (time_before(next_sampling, appointed_at)) { | 303 | if (time_before(next_sampling, appointed_at)) { |
303 | 304 | ||
304 | mutex_unlock(&dbs_info->timer_mutex); | 305 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
305 | cancel_delayed_work_sync(&dbs_info->work); | 306 | cancel_delayed_work_sync(&dbs_info->cdbs.work); |
306 | mutex_lock(&dbs_info->timer_mutex); | 307 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
307 | 308 | ||
308 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, | 309 | schedule_delayed_work_on(dbs_info->cdbs.cpu, |
309 | usecs_to_jiffies(new_rate)); | 310 | &dbs_info->cdbs.work, |
311 | usecs_to_jiffies(new_rate)); | ||
310 | 312 | ||
311 | } | 313 | } |
312 | mutex_unlock(&dbs_info->timer_mutex); | 314 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
313 | } | 315 | } |
314 | } | 316 | } |
315 | 317 | ||
@@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, | |||
334 | ret = sscanf(buf, "%u", &input); | 336 | ret = sscanf(buf, "%u", &input); |
335 | if (ret != 1) | 337 | if (ret != 1) |
336 | return -EINVAL; | 338 | return -EINVAL; |
337 | dbs_tuners_ins.io_is_busy = !!input; | 339 | od_tuners.io_is_busy = !!input; |
338 | return count; | 340 | return count; |
339 | } | 341 | } |
340 | 342 | ||
@@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
349 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 351 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
350 | return -EINVAL; | 352 | return -EINVAL; |
351 | } | 353 | } |
352 | dbs_tuners_ins.up_threshold = input; | 354 | od_tuners.up_threshold = input; |
353 | return count; | 355 | return count; |
354 | } | 356 | } |
355 | 357 | ||
@@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
362 | 364 | ||
363 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 365 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
364 | return -EINVAL; | 366 | return -EINVAL; |
365 | dbs_tuners_ins.sampling_down_factor = input; | 367 | od_tuners.sampling_down_factor = input; |
366 | 368 | ||
367 | /* Reset down sampling multiplier in case it was active */ | 369 | /* Reset down sampling multiplier in case it was active */ |
368 | for_each_online_cpu(j) { | 370 | for_each_online_cpu(j) { |
369 | struct cpu_dbs_info_s *dbs_info; | 371 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
370 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 372 | j); |
371 | dbs_info->rate_mult = 1; | 373 | dbs_info->rate_mult = 1; |
372 | } | 374 | } |
373 | return count; | 375 | return count; |
@@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
388 | if (input > 1) | 390 | if (input > 1) |
389 | input = 1; | 391 | input = 1; |
390 | 392 | ||
391 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | 393 | if (input == od_tuners.ignore_nice) { /* nothing to do */ |
392 | return count; | 394 | return count; |
393 | } | 395 | } |
394 | dbs_tuners_ins.ignore_nice = input; | 396 | od_tuners.ignore_nice = input; |
395 | 397 | ||
396 | /* we need to re-evaluate prev_cpu_idle */ | 398 | /* we need to re-evaluate prev_cpu_idle */ |
397 | for_each_online_cpu(j) { | 399 | for_each_online_cpu(j) { |
398 | struct cpu_dbs_info_s *dbs_info; | 400 | struct od_cpu_dbs_info_s *dbs_info; |
399 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 401 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
400 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 402 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
401 | &dbs_info->prev_cpu_wall); | 403 | &dbs_info->cdbs.prev_cpu_wall); |
402 | if (dbs_tuners_ins.ignore_nice) | 404 | if (od_tuners.ignore_nice) |
403 | dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 405 | dbs_info->cdbs.prev_cpu_nice = |
406 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
404 | 407 | ||
405 | } | 408 | } |
406 | return count; | 409 | return count; |
@@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | |||
419 | if (input > 1000) | 422 | if (input > 1000) |
420 | input = 1000; | 423 | input = 1000; |
421 | 424 | ||
422 | dbs_tuners_ins.powersave_bias = input; | 425 | od_tuners.powersave_bias = input; |
423 | ondemand_powersave_bias_init(); | 426 | ondemand_powersave_bias_init(); |
424 | return count; | 427 | return count; |
425 | } | 428 | } |
426 | 429 | ||
430 | show_one(od, sampling_rate, sampling_rate); | ||
431 | show_one(od, io_is_busy, io_is_busy); | ||
432 | show_one(od, up_threshold, up_threshold); | ||
433 | show_one(od, sampling_down_factor, sampling_down_factor); | ||
434 | show_one(od, ignore_nice_load, ignore_nice); | ||
435 | show_one(od, powersave_bias, powersave_bias); | ||
436 | |||
427 | define_one_global_rw(sampling_rate); | 437 | define_one_global_rw(sampling_rate); |
428 | define_one_global_rw(io_is_busy); | 438 | define_one_global_rw(io_is_busy); |
429 | define_one_global_rw(up_threshold); | 439 | define_one_global_rw(up_threshold); |
430 | define_one_global_rw(sampling_down_factor); | 440 | define_one_global_rw(sampling_down_factor); |
431 | define_one_global_rw(ignore_nice_load); | 441 | define_one_global_rw(ignore_nice_load); |
432 | define_one_global_rw(powersave_bias); | 442 | define_one_global_rw(powersave_bias); |
443 | define_one_global_ro(sampling_rate_min); | ||
433 | 444 | ||
434 | static struct attribute *dbs_attributes[] = { | 445 | static struct attribute *dbs_attributes[] = { |
435 | &sampling_rate_min.attr, | 446 | &sampling_rate_min.attr, |
@@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = { | |||
442 | NULL | 453 | NULL |
443 | }; | 454 | }; |
444 | 455 | ||
445 | static struct attribute_group dbs_attr_group = { | 456 | static struct attribute_group od_attr_group = { |
446 | .attrs = dbs_attributes, | 457 | .attrs = dbs_attributes, |
447 | .name = "ondemand", | 458 | .name = "ondemand", |
448 | }; | 459 | }; |
449 | 460 | ||
450 | /************************** sysfs end ************************/ | 461 | /************************** sysfs end ************************/ |
451 | 462 | ||
452 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) | 463 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
453 | { | ||
454 | if (dbs_tuners_ins.powersave_bias) | ||
455 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | ||
456 | else if (p->cur == p->max) | ||
457 | return; | ||
458 | |||
459 | __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? | ||
460 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); | ||
461 | } | ||
462 | |||
463 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | ||
464 | { | ||
465 | unsigned int max_load_freq; | ||
466 | |||
467 | struct cpufreq_policy *policy; | ||
468 | unsigned int j; | ||
469 | |||
470 | this_dbs_info->freq_lo = 0; | ||
471 | policy = this_dbs_info->cur_policy; | ||
472 | |||
473 | /* | ||
474 | * Every sampling_rate, we check, if current idle time is less | ||
475 | * than 20% (default), then we try to increase frequency | ||
476 | * Every sampling_rate, we look for a the lowest | ||
477 | * frequency which can sustain the load while keeping idle time over | ||
478 | * 30%. If such a frequency exist, we try to decrease to this frequency. | ||
479 | * | ||
480 | * Any frequency increase takes it to the maximum frequency. | ||
481 | * Frequency reduction happens at minimum steps of | ||
482 | * 5% (default) of current frequency | ||
483 | */ | ||
484 | |||
485 | /* Get Absolute Load - in terms of freq */ | ||
486 | max_load_freq = 0; | ||
487 | |||
488 | for_each_cpu(j, policy->cpus) { | ||
489 | struct cpu_dbs_info_s *j_dbs_info; | ||
490 | cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; | ||
491 | unsigned int idle_time, wall_time, iowait_time; | ||
492 | unsigned int load, load_freq; | ||
493 | int freq_avg; | ||
494 | |||
495 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | ||
496 | |||
497 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | ||
498 | cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); | ||
499 | |||
500 | wall_time = (unsigned int) | ||
501 | (cur_wall_time - j_dbs_info->prev_cpu_wall); | ||
502 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
503 | |||
504 | idle_time = (unsigned int) | ||
505 | (cur_idle_time - j_dbs_info->prev_cpu_idle); | ||
506 | j_dbs_info->prev_cpu_idle = cur_idle_time; | ||
507 | |||
508 | iowait_time = (unsigned int) | ||
509 | (cur_iowait_time - j_dbs_info->prev_cpu_iowait); | ||
510 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; | ||
511 | |||
512 | if (dbs_tuners_ins.ignore_nice) { | ||
513 | u64 cur_nice; | ||
514 | unsigned long cur_nice_jiffies; | ||
515 | |||
516 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | ||
517 | j_dbs_info->prev_cpu_nice; | ||
518 | /* | ||
519 | * Assumption: nice time between sampling periods will | ||
520 | * be less than 2^32 jiffies for 32 bit sys | ||
521 | */ | ||
522 | cur_nice_jiffies = (unsigned long) | ||
523 | cputime64_to_jiffies64(cur_nice); | ||
524 | |||
525 | j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
526 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * For the purpose of ondemand, waiting for disk IO is an | ||
531 | * indication that you're performance critical, and not that | ||
532 | * the system is actually idle. So subtract the iowait time | ||
533 | * from the cpu idle time. | ||
534 | */ | ||
535 | |||
536 | if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) | ||
537 | idle_time -= iowait_time; | ||
538 | |||
539 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
540 | continue; | ||
541 | |||
542 | load = 100 * (wall_time - idle_time) / wall_time; | ||
543 | |||
544 | freq_avg = __cpufreq_driver_getavg(policy, j); | ||
545 | if (freq_avg <= 0) | ||
546 | freq_avg = policy->cur; | ||
547 | |||
548 | load_freq = load * freq_avg; | ||
549 | if (load_freq > max_load_freq) | ||
550 | max_load_freq = load_freq; | ||
551 | } | ||
552 | 464 | ||
553 | /* Check for frequency increase */ | 465 | static struct od_ops od_ops = { |
554 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { | 466 | .io_busy = should_io_be_busy, |
555 | /* If switching to max speed, apply sampling_down_factor */ | 467 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, |
556 | if (policy->cur < policy->max) | 468 | .powersave_bias_target = powersave_bias_target, |
557 | this_dbs_info->rate_mult = | 469 | .freq_increase = dbs_freq_increase, |
558 | dbs_tuners_ins.sampling_down_factor; | 470 | }; |
559 | dbs_freq_increase(policy, policy->max); | ||
560 | return; | ||
561 | } | ||
562 | |||
563 | /* Check for frequency decrease */ | ||
564 | /* if we cannot reduce the frequency anymore, break out early */ | ||
565 | if (policy->cur == policy->min) | ||
566 | return; | ||
567 | |||
568 | /* | ||
569 | * The optimal frequency is the frequency that is the lowest that | ||
570 | * can support the current CPU usage without triggering the up | ||
571 | * policy. To be safe, we focus 10 points under the threshold. | ||
572 | */ | ||
573 | if (max_load_freq < | ||
574 | (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * | ||
575 | policy->cur) { | ||
576 | unsigned int freq_next; | ||
577 | freq_next = max_load_freq / | ||
578 | (dbs_tuners_ins.up_threshold - | ||
579 | dbs_tuners_ins.down_differential); | ||
580 | |||
581 | /* No longer fully busy, reset rate_mult */ | ||
582 | this_dbs_info->rate_mult = 1; | ||
583 | |||
584 | if (freq_next < policy->min) | ||
585 | freq_next = policy->min; | ||
586 | |||
587 | if (!dbs_tuners_ins.powersave_bias) { | ||
588 | __cpufreq_driver_target(policy, freq_next, | ||
589 | CPUFREQ_RELATION_L); | ||
590 | } else { | ||
591 | int freq = powersave_bias_target(policy, freq_next, | ||
592 | CPUFREQ_RELATION_L); | ||
593 | __cpufreq_driver_target(policy, freq, | ||
594 | CPUFREQ_RELATION_L); | ||
595 | } | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void do_dbs_timer(struct work_struct *work) | ||
600 | { | ||
601 | struct cpu_dbs_info_s *dbs_info = | ||
602 | container_of(work, struct cpu_dbs_info_s, work.work); | ||
603 | unsigned int cpu = dbs_info->cpu; | ||
604 | int sample_type = dbs_info->sample_type; | ||
605 | |||
606 | int delay; | ||
607 | |||
608 | mutex_lock(&dbs_info->timer_mutex); | ||
609 | |||
610 | /* Common NORMAL_SAMPLE setup */ | ||
611 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | ||
612 | if (!dbs_tuners_ins.powersave_bias || | ||
613 | sample_type == DBS_NORMAL_SAMPLE) { | ||
614 | dbs_check_cpu(dbs_info); | ||
615 | if (dbs_info->freq_lo) { | ||
616 | /* Setup timer for SUB_SAMPLE */ | ||
617 | dbs_info->sample_type = DBS_SUB_SAMPLE; | ||
618 | delay = dbs_info->freq_hi_jiffies; | ||
619 | } else { | ||
620 | /* We want all CPUs to do sampling nearly on | ||
621 | * same jiffy | ||
622 | */ | ||
623 | delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate | ||
624 | * dbs_info->rate_mult); | ||
625 | |||
626 | if (num_online_cpus() > 1) | ||
627 | delay -= jiffies % delay; | ||
628 | } | ||
629 | } else { | ||
630 | __cpufreq_driver_target(dbs_info->cur_policy, | ||
631 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | ||
632 | delay = dbs_info->freq_lo_jiffies; | ||
633 | } | ||
634 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); | ||
635 | mutex_unlock(&dbs_info->timer_mutex); | ||
636 | } | ||
637 | |||
638 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | ||
639 | { | ||
640 | /* We want all CPUs to do sampling nearly on same jiffy */ | ||
641 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | ||
642 | |||
643 | if (num_online_cpus() > 1) | ||
644 | delay -= jiffies % delay; | ||
645 | 471 | ||
646 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 472 | static struct dbs_data od_dbs_data = { |
647 | INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); | 473 | .governor = GOV_ONDEMAND, |
648 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); | 474 | .attr_group = &od_attr_group, |
649 | } | 475 | .tuners = &od_tuners, |
476 | .get_cpu_cdbs = get_cpu_cdbs, | ||
477 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | ||
478 | .gov_dbs_timer = od_dbs_timer, | ||
479 | .gov_check_cpu = od_check_cpu, | ||
480 | .gov_ops = &od_ops, | ||
481 | }; | ||
650 | 482 | ||
651 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 483 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
484 | unsigned int event) | ||
652 | { | 485 | { |
653 | cancel_delayed_work_sync(&dbs_info->work); | 486 | return cpufreq_governor_dbs(&od_dbs_data, policy, event); |
654 | } | 487 | } |
655 | 488 | ||
656 | /* | 489 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
657 | * Not all CPUs want IO time to be accounted as busy; this dependson how | 490 | static |
658 | * efficient idling at a higher frequency/voltage is. | ||
659 | * Pavel Machek says this is not so for various generations of AMD and old | ||
660 | * Intel systems. | ||
661 | * Mike Chan (androidlcom) calis this is also not true for ARM. | ||
662 | * Because of this, whitelist specific known (series) of CPUs by default, and | ||
663 | * leave all others up to the user. | ||
664 | */ | ||
665 | static int should_io_be_busy(void) | ||
666 | { | ||
667 | #if defined(CONFIG_X86) | ||
668 | /* | ||
669 | * For Intel, Core 2 (model 15) andl later have an efficient idle. | ||
670 | */ | ||
671 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
672 | boot_cpu_data.x86 == 6 && | ||
673 | boot_cpu_data.x86_model >= 15) | ||
674 | return 1; | ||
675 | #endif | 491 | #endif |
676 | return 0; | 492 | struct cpufreq_governor cpufreq_gov_ondemand = { |
677 | } | 493 | .name = "ondemand", |
678 | 494 | .governor = od_cpufreq_governor_dbs, | |
679 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 495 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
680 | unsigned int event) | 496 | .owner = THIS_MODULE, |
681 | { | 497 | }; |
682 | unsigned int cpu = policy->cpu; | ||
683 | struct cpu_dbs_info_s *this_dbs_info; | ||
684 | unsigned int j; | ||
685 | int rc; | ||
686 | |||
687 | this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | ||
688 | |||
689 | switch (event) { | ||
690 | case CPUFREQ_GOV_START: | ||
691 | if ((!cpu_online(cpu)) || (!policy->cur)) | ||
692 | return -EINVAL; | ||
693 | |||
694 | mutex_lock(&dbs_mutex); | ||
695 | |||
696 | dbs_enable++; | ||
697 | for_each_cpu(j, policy->cpus) { | ||
698 | struct cpu_dbs_info_s *j_dbs_info; | ||
699 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | ||
700 | j_dbs_info->cur_policy = policy; | ||
701 | |||
702 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | ||
703 | &j_dbs_info->prev_cpu_wall); | ||
704 | if (dbs_tuners_ins.ignore_nice) | ||
705 | j_dbs_info->prev_cpu_nice = | ||
706 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | ||
707 | } | ||
708 | this_dbs_info->cpu = cpu; | ||
709 | this_dbs_info->rate_mult = 1; | ||
710 | ondemand_powersave_bias_init_cpu(cpu); | ||
711 | /* | ||
712 | * Start the timerschedule work, when this governor | ||
713 | * is used for first time | ||
714 | */ | ||
715 | if (dbs_enable == 1) { | ||
716 | unsigned int latency; | ||
717 | |||
718 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
719 | &dbs_attr_group); | ||
720 | if (rc) { | ||
721 | mutex_unlock(&dbs_mutex); | ||
722 | return rc; | ||
723 | } | ||
724 | |||
725 | /* policy latency is in nS. Convert it to uS first */ | ||
726 | latency = policy->cpuinfo.transition_latency / 1000; | ||
727 | if (latency == 0) | ||
728 | latency = 1; | ||
729 | /* Bring kernel and HW constraints together */ | ||
730 | min_sampling_rate = max(min_sampling_rate, | ||
731 | MIN_LATENCY_MULTIPLIER * latency); | ||
732 | dbs_tuners_ins.sampling_rate = | ||
733 | max(min_sampling_rate, | ||
734 | latency * LATENCY_MULTIPLIER); | ||
735 | dbs_tuners_ins.io_is_busy = should_io_be_busy(); | ||
736 | } | ||
737 | mutex_unlock(&dbs_mutex); | ||
738 | |||
739 | mutex_init(&this_dbs_info->timer_mutex); | ||
740 | dbs_timer_init(this_dbs_info); | ||
741 | break; | ||
742 | |||
743 | case CPUFREQ_GOV_STOP: | ||
744 | dbs_timer_exit(this_dbs_info); | ||
745 | |||
746 | mutex_lock(&dbs_mutex); | ||
747 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
748 | dbs_enable--; | ||
749 | mutex_unlock(&dbs_mutex); | ||
750 | if (!dbs_enable) | ||
751 | sysfs_remove_group(cpufreq_global_kobject, | ||
752 | &dbs_attr_group); | ||
753 | |||
754 | break; | ||
755 | |||
756 | case CPUFREQ_GOV_LIMITS: | ||
757 | mutex_lock(&this_dbs_info->timer_mutex); | ||
758 | if (policy->max < this_dbs_info->cur_policy->cur) | ||
759 | __cpufreq_driver_target(this_dbs_info->cur_policy, | ||
760 | policy->max, CPUFREQ_RELATION_H); | ||
761 | else if (policy->min > this_dbs_info->cur_policy->cur) | ||
762 | __cpufreq_driver_target(this_dbs_info->cur_policy, | ||
763 | policy->min, CPUFREQ_RELATION_L); | ||
764 | dbs_check_cpu(this_dbs_info); | ||
765 | mutex_unlock(&this_dbs_info->timer_mutex); | ||
766 | break; | ||
767 | } | ||
768 | return 0; | ||
769 | } | ||
770 | 498 | ||
771 | static int __init cpufreq_gov_dbs_init(void) | 499 | static int __init cpufreq_gov_dbs_init(void) |
772 | { | 500 | { |
773 | u64 idle_time; | 501 | u64 idle_time; |
774 | int cpu = get_cpu(); | 502 | int cpu = get_cpu(); |
775 | 503 | ||
504 | mutex_init(&od_dbs_data.mutex); | ||
776 | idle_time = get_cpu_idle_time_us(cpu, NULL); | 505 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
777 | put_cpu(); | 506 | put_cpu(); |
778 | if (idle_time != -1ULL) { | 507 | if (idle_time != -1ULL) { |
779 | /* Idle micro accounting is supported. Use finer thresholds */ | 508 | /* Idle micro accounting is supported. Use finer thresholds */ |
780 | dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | 509 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
781 | dbs_tuners_ins.down_differential = | 510 | od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; |
782 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
783 | /* | 511 | /* |
784 | * In nohz/micro accounting case we set the minimum frequency | 512 | * In nohz/micro accounting case we set the minimum frequency |
785 | * not depending on HZ, but fixed (very low). The deferred | 513 | * not depending on HZ, but fixed (very low). The deferred |
786 | * timer might skip some samples if idle/sleeping as needed. | 514 | * timer might skip some samples if idle/sleeping as needed. |
787 | */ | 515 | */ |
788 | min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | 516 | od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
789 | } else { | 517 | } else { |
790 | /* For correct statistics, we need 10 ticks for each measure */ | 518 | /* For correct statistics, we need 10 ticks for each measure */ |
791 | min_sampling_rate = | 519 | od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
792 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | 520 | jiffies_to_usecs(10); |
793 | } | 521 | } |
794 | 522 | ||
795 | return cpufreq_register_governor(&cpufreq_gov_ondemand); | 523 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
@@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void) | |||
800 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); | 528 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
801 | } | 529 | } |
802 | 530 | ||
803 | |||
804 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); | 531 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
805 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); | 532 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); |
806 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | 533 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index f13a8a9af6a1..ceee06849b91 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/cpufreq.h> | 17 | #include <linux/cpufreq.h> |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 4c2eb512f2bc..2d948a171155 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/cpufreq.h> | 17 | #include <linux/cpufreq.h> |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 399831690fed..e40e50809644 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -37,7 +37,7 @@ struct cpufreq_stats { | |||
37 | unsigned int max_state; | 37 | unsigned int max_state; |
38 | unsigned int state_num; | 38 | unsigned int state_num; |
39 | unsigned int last_index; | 39 | unsigned int last_index; |
40 | cputime64_t *time_in_state; | 40 | u64 *time_in_state; |
41 | unsigned int *freq_table; | 41 | unsigned int *freq_table; |
42 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 42 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
43 | unsigned int *trans_table; | 43 | unsigned int *trans_table; |
@@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, | |||
223 | count++; | 223 | count++; |
224 | } | 224 | } |
225 | 225 | ||
226 | alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); | 226 | alloc_size = count * sizeof(int) + count * sizeof(u64); |
227 | 227 | ||
228 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 228 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
229 | alloc_size += count * count * sizeof(int); | 229 | alloc_size += count * count * sizeof(int); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index bedac1aa9be3..c8c3d293cc57 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
16 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index af2d81e10f71..7012ea8bf1e7 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -31,13 +31,13 @@ static unsigned int locking_frequency; | |||
31 | static bool frequency_locked; | 31 | static bool frequency_locked; |
32 | static DEFINE_MUTEX(cpufreq_lock); | 32 | static DEFINE_MUTEX(cpufreq_lock); |
33 | 33 | ||
34 | int exynos_verify_speed(struct cpufreq_policy *policy) | 34 | static int exynos_verify_speed(struct cpufreq_policy *policy) |
35 | { | 35 | { |
36 | return cpufreq_frequency_table_verify(policy, | 36 | return cpufreq_frequency_table_verify(policy, |
37 | exynos_info->freq_table); | 37 | exynos_info->freq_table); |
38 | } | 38 | } |
39 | 39 | ||
40 | unsigned int exynos_getspeed(unsigned int cpu) | 40 | static unsigned int exynos_getspeed(unsigned int cpu) |
41 | { | 41 | { |
42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; | 42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; |
43 | } | 43 | } |
@@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
100 | } | 100 | } |
101 | arm_volt = volt_table[index]; | 101 | arm_volt = volt_table[index]; |
102 | 102 | ||
103 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 103 | for_each_cpu(freqs.cpu, policy->cpus) |
104 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
104 | 105 | ||
105 | /* When the new frequency is higher than current frequency */ | 106 | /* When the new frequency is higher than current frequency */ |
106 | if ((freqs.new > freqs.old) && !safe_arm_volt) { | 107 | if ((freqs.new > freqs.old) && !safe_arm_volt) { |
@@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
115 | if (freqs.new != freqs.old) | 116 | if (freqs.new != freqs.old) |
116 | exynos_info->set_freq(old_index, index); | 117 | exynos_info->set_freq(old_index, index); |
117 | 118 | ||
118 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 119 | for_each_cpu(freqs.cpu, policy->cpus) |
120 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
119 | 121 | ||
120 | /* When the new frequency is lower than current frequency */ | 122 | /* When the new frequency is lower than current frequency */ |
121 | if ((freqs.new < freqs.old) || | 123 | if ((freqs.new < freqs.old) || |
@@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
235 | cpumask_copy(policy->related_cpus, cpu_possible_mask); | 237 | cpumask_copy(policy->related_cpus, cpu_possible_mask); |
236 | cpumask_copy(policy->cpus, cpu_online_mask); | 238 | cpumask_copy(policy->cpus, cpu_online_mask); |
237 | } else { | 239 | } else { |
240 | policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
238 | cpumask_setall(policy->cpus); | 241 | cpumask_setall(policy->cpus); |
239 | } | 242 | } |
240 | 243 | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 90431cb92804..49cda256efb2 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 15 | #include <linux/module.h> |
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 53ddbc760af7..f1fa500ac105 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) | |||
930 | return 0; | 930 | return 0; |
931 | } | 931 | } |
932 | 932 | ||
933 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) | 933 | static int longhaul_cpu_exit(struct cpufreq_policy *policy) |
934 | { | 934 | { |
935 | cpufreq_frequency_table_put_attr(policy->cpu); | 935 | cpufreq_frequency_table_put_attr(policy->cpu); |
936 | return 0; | 936 | return 0; |
@@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = { | |||
946 | .target = longhaul_target, | 946 | .target = longhaul_target, |
947 | .get = longhaul_get, | 947 | .get = longhaul_get, |
948 | .init = longhaul_cpu_init, | 948 | .init = longhaul_cpu_init, |
949 | .exit = __devexit_p(longhaul_cpu_exit), | 949 | .exit = longhaul_cpu_exit, |
950 | .name = "longhaul", | 950 | .name = "longhaul", |
951 | .owner = THIS_MODULE, | 951 | .owner = THIS_MODULE, |
952 | .attr = longhaul_attr, | 952 | .attr = longhaul_attr, |
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e3ebb4fa2c3e..056faf6af1a9 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -1186,7 +1186,7 @@ err_out: | |||
1186 | return -ENODEV; | 1186 | return -ENODEV; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | 1189 | static int powernowk8_cpu_exit(struct cpufreq_policy *pol) |
1190 | { | 1190 | { |
1191 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1191 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1192 | 1192 | ||
@@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { | |||
1242 | .target = powernowk8_target, | 1242 | .target = powernowk8_target, |
1243 | .bios_limit = acpi_processor_get_bios_limit, | 1243 | .bios_limit = acpi_processor_get_bios_limit, |
1244 | .init = powernowk8_cpu_init, | 1244 | .init = powernowk8_cpu_init, |
1245 | .exit = __devexit_p(powernowk8_cpu_exit), | 1245 | .exit = powernowk8_cpu_exit, |
1246 | .get = powernowk8_get, | 1246 | .get = powernowk8_get, |
1247 | .name = "powernow-k8", | 1247 | .name = "powernow-k8", |
1248 | .owner = THIS_MODULE, | 1248 | .owner = THIS_MODULE, |
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c new file mode 100644 index 000000000000..4575cfe41755 --- /dev/null +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * drivers/cpufreq/spear-cpufreq.c | ||
3 | * | ||
4 | * CPU Frequency Scaling for SPEAr platform | ||
5 | * | ||
6 | * Copyright (C) 2012 ST Microelectronics | ||
7 | * Deepak Sikri <deepak.sikri@st.com> | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public | ||
10 | * License version 2. This program is licensed "as is" without any | ||
11 | * warranty of any kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/cpufreq.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | /* SPEAr CPUFreq driver data structure */ | ||
26 | static struct { | ||
27 | struct clk *clk; | ||
28 | unsigned int transition_latency; | ||
29 | struct cpufreq_frequency_table *freq_tbl; | ||
30 | u32 cnt; | ||
31 | } spear_cpufreq; | ||
32 | |||
33 | int spear_cpufreq_verify(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); | ||
36 | } | ||
37 | |||
38 | static unsigned int spear_cpufreq_get(unsigned int cpu) | ||
39 | { | ||
40 | return clk_get_rate(spear_cpufreq.clk) / 1000; | ||
41 | } | ||
42 | |||
43 | static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) | ||
44 | { | ||
45 | struct clk *sys_pclk; | ||
46 | int pclk; | ||
47 | /* | ||
48 | * In SPEAr1340, cpu clk's parent sys clk can take input from | ||
49 | * following sources | ||
50 | */ | ||
51 | const char *sys_clk_src[] = { | ||
52 | "sys_syn_clk", | ||
53 | "pll1_clk", | ||
54 | "pll2_clk", | ||
55 | "pll3_clk", | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * As sys clk can have multiple source with their own range | ||
60 | * limitation so we choose possible sources accordingly | ||
61 | */ | ||
62 | if (newfreq <= 300000000) | ||
63 | pclk = 0; /* src is sys_syn_clk */ | ||
64 | else if (newfreq > 300000000 && newfreq <= 500000000) | ||
65 | pclk = 3; /* src is pll3_clk */ | ||
66 | else if (newfreq == 600000000) | ||
67 | pclk = 1; /* src is pll1_clk */ | ||
68 | else | ||
69 | return ERR_PTR(-EINVAL); | ||
70 | |||
71 | /* Get parent to sys clock */ | ||
72 | sys_pclk = clk_get(NULL, sys_clk_src[pclk]); | ||
73 | if (IS_ERR(sys_pclk)) | ||
74 | pr_err("Failed to get %s clock\n", sys_clk_src[pclk]); | ||
75 | |||
76 | return sys_pclk; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * In SPEAr1340, we cannot use newfreq directly because we need to actually | ||
81 | * access a source clock (clk) which might not be ancestor of cpu at present. | ||
82 | * Hence in SPEAr1340 we would operate on source clock directly before switching | ||
83 | * cpu clock to it. | ||
84 | */ | ||
85 | static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) | ||
86 | { | ||
87 | struct clk *sys_clk; | ||
88 | int ret = 0; | ||
89 | |||
90 | sys_clk = clk_get_parent(spear_cpufreq.clk); | ||
91 | if (IS_ERR(sys_clk)) { | ||
92 | pr_err("failed to get cpu's parent (sys) clock\n"); | ||
93 | return PTR_ERR(sys_clk); | ||
94 | } | ||
95 | |||
96 | /* Set the rate of the source clock before changing the parent */ | ||
97 | ret = clk_set_rate(sys_pclk, newfreq); | ||
98 | if (ret) { | ||
99 | pr_err("Failed to set sys clk rate to %lu\n", newfreq); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | ret = clk_set_parent(sys_clk, sys_pclk); | ||
104 | if (ret) { | ||
105 | pr_err("Failed to set sys clk parent\n"); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int spear_cpufreq_target(struct cpufreq_policy *policy, | ||
113 | unsigned int target_freq, unsigned int relation) | ||
114 | { | ||
115 | struct cpufreq_freqs freqs; | ||
116 | unsigned long newfreq; | ||
117 | struct clk *srcclk; | ||
118 | int index, ret, mult = 1; | ||
119 | |||
120 | if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, | ||
121 | target_freq, relation, &index)) | ||
122 | return -EINVAL; | ||
123 | |||
124 | freqs.cpu = policy->cpu; | ||
125 | freqs.old = spear_cpufreq_get(0); | ||
126 | |||
127 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; | ||
128 | if (of_machine_is_compatible("st,spear1340")) { | ||
129 | /* | ||
130 | * SPEAr1340 is special in the sense that due to the possibility | ||
131 | * of multiple clock sources for cpu clk's parent we can have | ||
132 | * different clock source for different frequency of cpu clk. | ||
133 | * Hence we need to choose one from amongst these possible clock | ||
134 | * sources. | ||
135 | */ | ||
136 | srcclk = spear1340_cpu_get_possible_parent(newfreq); | ||
137 | if (IS_ERR(srcclk)) { | ||
138 | pr_err("Failed to get src clk\n"); | ||
139 | return PTR_ERR(srcclk); | ||
140 | } | ||
141 | |||
142 | /* SPEAr1340: src clk is always 2 * intended cpu clk */ | ||
143 | mult = 2; | ||
144 | } else { | ||
145 | /* | ||
146 | * src clock to be altered is ancestor of cpu clock. Hence we | ||
147 | * can directly work on cpu clk | ||
148 | */ | ||
149 | srcclk = spear_cpufreq.clk; | ||
150 | } | ||
151 | |||
152 | newfreq = clk_round_rate(srcclk, newfreq * mult); | ||
153 | if (newfreq < 0) { | ||
154 | pr_err("clk_round_rate failed for cpu src clock\n"); | ||
155 | return newfreq; | ||
156 | } | ||
157 | |||
158 | freqs.new = newfreq / 1000; | ||
159 | freqs.new /= mult; | ||
160 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
161 | |||
162 | if (mult == 2) | ||
163 | ret = spear1340_set_cpu_rate(srcclk, newfreq); | ||
164 | else | ||
165 | ret = clk_set_rate(spear_cpufreq.clk, newfreq); | ||
166 | |||
167 | /* Get current rate after clk_set_rate, in case of failure */ | ||
168 | if (ret) { | ||
169 | pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); | ||
170 | freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; | ||
171 | } | ||
172 | |||
173 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int spear_cpufreq_init(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | int ret; | ||
180 | |||
181 | ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); | ||
182 | if (ret) { | ||
183 | pr_err("cpufreq_frequency_table_cpuinfo() failed"); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); | ||
188 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; | ||
189 | policy->cur = spear_cpufreq_get(0); | ||
190 | |||
191 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | ||
192 | cpumask_copy(policy->related_cpus, policy->cpus); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int spear_cpufreq_exit(struct cpufreq_policy *policy) | ||
198 | { | ||
199 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static struct freq_attr *spear_cpufreq_attr[] = { | ||
204 | &cpufreq_freq_attr_scaling_available_freqs, | ||
205 | NULL, | ||
206 | }; | ||
207 | |||
208 | static struct cpufreq_driver spear_cpufreq_driver = { | ||
209 | .name = "cpufreq-spear", | ||
210 | .flags = CPUFREQ_STICKY, | ||
211 | .verify = spear_cpufreq_verify, | ||
212 | .target = spear_cpufreq_target, | ||
213 | .get = spear_cpufreq_get, | ||
214 | .init = spear_cpufreq_init, | ||
215 | .exit = spear_cpufreq_exit, | ||
216 | .attr = spear_cpufreq_attr, | ||
217 | }; | ||
218 | |||
219 | static int spear_cpufreq_driver_init(void) | ||
220 | { | ||
221 | struct device_node *np; | ||
222 | const struct property *prop; | ||
223 | struct cpufreq_frequency_table *freq_tbl; | ||
224 | const __be32 *val; | ||
225 | int cnt, i, ret; | ||
226 | |||
227 | np = of_find_node_by_path("/cpus/cpu@0"); | ||
228 | if (!np) { | ||
229 | pr_err("No cpu node found"); | ||
230 | return -ENODEV; | ||
231 | } | ||
232 | |||
233 | if (of_property_read_u32(np, "clock-latency", | ||
234 | &spear_cpufreq.transition_latency)) | ||
235 | spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; | ||
236 | |||
237 | prop = of_find_property(np, "cpufreq_tbl", NULL); | ||
238 | if (!prop || !prop->value) { | ||
239 | pr_err("Invalid cpufreq_tbl"); | ||
240 | ret = -ENODEV; | ||
241 | goto out_put_node; | ||
242 | } | ||
243 | |||
244 | cnt = prop->length / sizeof(u32); | ||
245 | val = prop->value; | ||
246 | |||
247 | freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); | ||
248 | if (!freq_tbl) { | ||
249 | ret = -ENOMEM; | ||
250 | goto out_put_node; | ||
251 | } | ||
252 | |||
253 | for (i = 0; i < cnt; i++) { | ||
254 | freq_tbl[i].index = i; | ||
255 | freq_tbl[i].frequency = be32_to_cpup(val++); | ||
256 | } | ||
257 | |||
258 | freq_tbl[i].index = i; | ||
259 | freq_tbl[i].frequency = CPUFREQ_TABLE_END; | ||
260 | |||
261 | spear_cpufreq.freq_tbl = freq_tbl; | ||
262 | |||
263 | of_node_put(np); | ||
264 | |||
265 | spear_cpufreq.clk = clk_get(NULL, "cpu_clk"); | ||
266 | if (IS_ERR(spear_cpufreq.clk)) { | ||
267 | pr_err("Unable to get CPU clock\n"); | ||
268 | ret = PTR_ERR(spear_cpufreq.clk); | ||
269 | goto out_put_mem; | ||
270 | } | ||
271 | |||
272 | ret = cpufreq_register_driver(&spear_cpufreq_driver); | ||
273 | if (!ret) | ||
274 | return 0; | ||
275 | |||
276 | pr_err("failed register driver: %d\n", ret); | ||
277 | clk_put(spear_cpufreq.clk); | ||
278 | |||
279 | out_put_mem: | ||
280 | kfree(freq_tbl); | ||
281 | return ret; | ||
282 | |||
283 | out_put_node: | ||
284 | of_node_put(np); | ||
285 | return ret; | ||
286 | } | ||
287 | late_initcall(spear_cpufreq_driver_init); | ||
288 | |||
289 | MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); | ||
290 | MODULE_DESCRIPTION("SPEAr CPUFreq driver"); | ||
291 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index a76b689e553b..234ae651b38f 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -9,6 +9,15 @@ config CPU_IDLE | |||
9 | 9 | ||
10 | If you're using an ACPI-enabled platform, you should say Y here. | 10 | If you're using an ACPI-enabled platform, you should say Y here. |
11 | 11 | ||
12 | config CPU_IDLE_MULTIPLE_DRIVERS | ||
13 | bool "Support multiple cpuidle drivers" | ||
14 | depends on CPU_IDLE | ||
15 | default n | ||
16 | help | ||
17 | Allows the cpuidle framework to use different drivers for each CPU. | ||
18 | This is useful if you have a system with different CPU latencies and | ||
19 | states. If unsure say N. | ||
20 | |||
12 | config CPU_IDLE_GOV_LADDER | 21 | config CPU_IDLE_GOV_LADDER |
13 | bool | 22 | bool |
14 | depends on CPU_IDLE | 23 | depends on CPU_IDLE |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7f15b8514a18..8df53dd8dbe1 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -68,7 +68,7 @@ static cpuidle_enter_t cpuidle_enter_ops; | |||
68 | int cpuidle_play_dead(void) | 68 | int cpuidle_play_dead(void) |
69 | { | 69 | { |
70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 70 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
71 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 71 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
72 | int i, dead_state = -1; | 72 | int i, dead_state = -1; |
73 | int power_usage = -1; | 73 | int power_usage = -1; |
74 | 74 | ||
@@ -109,8 +109,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
109 | /* This can be moved to within driver enter routine | 109 | /* This can be moved to within driver enter routine |
110 | * but that results in multiple copies of same code. | 110 | * but that results in multiple copies of same code. |
111 | */ | 111 | */ |
112 | dev->states_usage[entered_state].time += | 112 | dev->states_usage[entered_state].time += dev->last_residency; |
113 | (unsigned long long)dev->last_residency; | ||
114 | dev->states_usage[entered_state].usage++; | 113 | dev->states_usage[entered_state].usage++; |
115 | } else { | 114 | } else { |
116 | dev->last_residency = 0; | 115 | dev->last_residency = 0; |
@@ -128,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
128 | int cpuidle_idle_call(void) | 127 | int cpuidle_idle_call(void) |
129 | { | 128 | { |
130 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 129 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
131 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 130 | struct cpuidle_driver *drv; |
132 | int next_state, entered_state; | 131 | int next_state, entered_state; |
133 | 132 | ||
134 | if (off) | 133 | if (off) |
@@ -141,9 +140,15 @@ int cpuidle_idle_call(void) | |||
141 | if (!dev || !dev->enabled) | 140 | if (!dev || !dev->enabled) |
142 | return -EBUSY; | 141 | return -EBUSY; |
143 | 142 | ||
143 | drv = cpuidle_get_cpu_driver(dev); | ||
144 | |||
144 | /* ask the governor for the next state */ | 145 | /* ask the governor for the next state */ |
145 | next_state = cpuidle_curr_governor->select(drv, dev); | 146 | next_state = cpuidle_curr_governor->select(drv, dev); |
146 | if (need_resched()) { | 147 | if (need_resched()) { |
148 | dev->last_residency = 0; | ||
149 | /* give the governor an opportunity to reflect on the outcome */ | ||
150 | if (cpuidle_curr_governor->reflect) | ||
151 | cpuidle_curr_governor->reflect(dev, next_state); | ||
147 | local_irq_enable(); | 152 | local_irq_enable(); |
148 | return 0; | 153 | return 0; |
149 | } | 154 | } |
@@ -308,15 +313,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {} | |||
308 | int cpuidle_enable_device(struct cpuidle_device *dev) | 313 | int cpuidle_enable_device(struct cpuidle_device *dev) |
309 | { | 314 | { |
310 | int ret, i; | 315 | int ret, i; |
311 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 316 | struct cpuidle_driver *drv; |
312 | 317 | ||
313 | if (!dev) | 318 | if (!dev) |
314 | return -EINVAL; | 319 | return -EINVAL; |
315 | 320 | ||
316 | if (dev->enabled) | 321 | if (dev->enabled) |
317 | return 0; | 322 | return 0; |
323 | |||
324 | drv = cpuidle_get_cpu_driver(dev); | ||
325 | |||
318 | if (!drv || !cpuidle_curr_governor) | 326 | if (!drv || !cpuidle_curr_governor) |
319 | return -EIO; | 327 | return -EIO; |
328 | |||
320 | if (!dev->state_count) | 329 | if (!dev->state_count) |
321 | dev->state_count = drv->state_count; | 330 | dev->state_count = drv->state_count; |
322 | 331 | ||
@@ -331,7 +340,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
331 | 340 | ||
332 | poll_idle_init(drv); | 341 | poll_idle_init(drv); |
333 | 342 | ||
334 | if ((ret = cpuidle_add_state_sysfs(dev))) | 343 | ret = cpuidle_add_device_sysfs(dev); |
344 | if (ret) | ||
335 | return ret; | 345 | return ret; |
336 | 346 | ||
337 | if (cpuidle_curr_governor->enable && | 347 | if (cpuidle_curr_governor->enable && |
@@ -352,7 +362,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
352 | return 0; | 362 | return 0; |
353 | 363 | ||
354 | fail_sysfs: | 364 | fail_sysfs: |
355 | cpuidle_remove_state_sysfs(dev); | 365 | cpuidle_remove_device_sysfs(dev); |
356 | 366 | ||
357 | return ret; | 367 | return ret; |
358 | } | 368 | } |
@@ -368,17 +378,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |||
368 | */ | 378 | */ |
369 | void cpuidle_disable_device(struct cpuidle_device *dev) | 379 | void cpuidle_disable_device(struct cpuidle_device *dev) |
370 | { | 380 | { |
381 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
382 | |||
371 | if (!dev || !dev->enabled) | 383 | if (!dev || !dev->enabled) |
372 | return; | 384 | return; |
373 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) | 385 | |
386 | if (!drv || !cpuidle_curr_governor) | ||
374 | return; | 387 | return; |
375 | 388 | ||
376 | dev->enabled = 0; | 389 | dev->enabled = 0; |
377 | 390 | ||
378 | if (cpuidle_curr_governor->disable) | 391 | if (cpuidle_curr_governor->disable) |
379 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); | 392 | cpuidle_curr_governor->disable(drv, dev); |
380 | 393 | ||
381 | cpuidle_remove_state_sysfs(dev); | 394 | cpuidle_remove_device_sysfs(dev); |
382 | enabled_devices--; | 395 | enabled_devices--; |
383 | } | 396 | } |
384 | 397 | ||
@@ -394,17 +407,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |||
394 | static int __cpuidle_register_device(struct cpuidle_device *dev) | 407 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
395 | { | 408 | { |
396 | int ret; | 409 | int ret; |
397 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 410 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
398 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
399 | 411 | ||
400 | if (!try_module_get(cpuidle_driver->owner)) | 412 | if (!try_module_get(drv->owner)) |
401 | return -EINVAL; | 413 | return -EINVAL; |
402 | 414 | ||
403 | init_completion(&dev->kobj_unregister); | ||
404 | |||
405 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 415 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
406 | list_add(&dev->device_list, &cpuidle_detected_devices); | 416 | list_add(&dev->device_list, &cpuidle_detected_devices); |
407 | ret = cpuidle_add_sysfs(cpu_dev); | 417 | ret = cpuidle_add_sysfs(dev); |
408 | if (ret) | 418 | if (ret) |
409 | goto err_sysfs; | 419 | goto err_sysfs; |
410 | 420 | ||
@@ -416,12 +426,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
416 | return 0; | 426 | return 0; |
417 | 427 | ||
418 | err_coupled: | 428 | err_coupled: |
419 | cpuidle_remove_sysfs(cpu_dev); | 429 | cpuidle_remove_sysfs(dev); |
420 | wait_for_completion(&dev->kobj_unregister); | ||
421 | err_sysfs: | 430 | err_sysfs: |
422 | list_del(&dev->device_list); | 431 | list_del(&dev->device_list); |
423 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 432 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
424 | module_put(cpuidle_driver->owner); | 433 | module_put(drv->owner); |
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | 436 | ||
@@ -460,8 +469,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); | |||
460 | */ | 469 | */ |
461 | void cpuidle_unregister_device(struct cpuidle_device *dev) | 470 | void cpuidle_unregister_device(struct cpuidle_device *dev) |
462 | { | 471 | { |
463 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 472 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
464 | struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); | ||
465 | 473 | ||
466 | if (dev->registered == 0) | 474 | if (dev->registered == 0) |
467 | return; | 475 | return; |
@@ -470,16 +478,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) | |||
470 | 478 | ||
471 | cpuidle_disable_device(dev); | 479 | cpuidle_disable_device(dev); |
472 | 480 | ||
473 | cpuidle_remove_sysfs(cpu_dev); | 481 | cpuidle_remove_sysfs(dev); |
474 | list_del(&dev->device_list); | 482 | list_del(&dev->device_list); |
475 | wait_for_completion(&dev->kobj_unregister); | ||
476 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | 483 | per_cpu(cpuidle_devices, dev->cpu) = NULL; |
477 | 484 | ||
478 | cpuidle_coupled_unregister_device(dev); | 485 | cpuidle_coupled_unregister_device(dev); |
479 | 486 | ||
480 | cpuidle_resume_and_unlock(); | 487 | cpuidle_resume_and_unlock(); |
481 | 488 | ||
482 | module_put(cpuidle_driver->owner); | 489 | module_put(drv->owner); |
483 | } | 490 | } |
484 | 491 | ||
485 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | 492 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); |
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 76e7f696ad8c..ee97e9672ecf 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __DRIVER_CPUIDLE_H | 5 | #ifndef __DRIVER_CPUIDLE_H |
6 | #define __DRIVER_CPUIDLE_H | 6 | #define __DRIVER_CPUIDLE_H |
7 | 7 | ||
8 | #include <linux/device.h> | ||
9 | |||
10 | /* For internal use only */ | 8 | /* For internal use only */ |
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | 9 | extern struct cpuidle_governor *cpuidle_curr_governor; |
12 | extern struct list_head cpuidle_governors; | 10 | extern struct list_head cpuidle_governors; |
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void); | |||
25 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | 23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); |
26 | 24 | ||
27 | /* sysfs */ | 25 | /* sysfs */ |
26 | |||
27 | struct device; | ||
28 | |||
28 | extern int cpuidle_add_interface(struct device *dev); | 29 | extern int cpuidle_add_interface(struct device *dev); |
29 | extern void cpuidle_remove_interface(struct device *dev); | 30 | extern void cpuidle_remove_interface(struct device *dev); |
30 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | 31 | extern int cpuidle_add_device_sysfs(struct cpuidle_device *device); |
31 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | 32 | extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device); |
32 | extern int cpuidle_add_sysfs(struct device *dev); | 33 | extern int cpuidle_add_sysfs(struct cpuidle_device *dev); |
33 | extern void cpuidle_remove_sysfs(struct device *dev); | 34 | extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); |
34 | 35 | ||
35 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 36 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
36 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, | 37 | bool cpuidle_state_is_coupled(struct cpuidle_device *dev, |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 87db3877fead..3af841fb397a 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -14,9 +14,10 @@ | |||
14 | 14 | ||
15 | #include "cpuidle.h" | 15 | #include "cpuidle.h" |
16 | 16 | ||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 17 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | int cpuidle_driver_refcount; | 18 | |
19 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
20 | static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); | ||
20 | 21 | ||
21 | static void set_power_states(struct cpuidle_driver *drv) | 22 | static void set_power_states(struct cpuidle_driver *drv) |
22 | { | 23 | { |
@@ -40,11 +41,15 @@ static void set_power_states(struct cpuidle_driver *drv) | |||
40 | drv->states[i].power_usage = -1 - i; | 41 | drv->states[i].power_usage = -1 - i; |
41 | } | 42 | } |
42 | 43 | ||
43 | /** | 44 | static void __cpuidle_driver_init(struct cpuidle_driver *drv) |
44 | * cpuidle_register_driver - registers a driver | 45 | { |
45 | * @drv: the driver | 46 | drv->refcnt = 0; |
46 | */ | 47 | |
47 | int cpuidle_register_driver(struct cpuidle_driver *drv) | 48 | if (!drv->power_specified) |
49 | set_power_states(drv); | ||
50 | } | ||
51 | |||
52 | static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) | ||
48 | { | 53 | { |
49 | if (!drv || !drv->state_count) | 54 | if (!drv || !drv->state_count) |
50 | return -EINVAL; | 55 | return -EINVAL; |
@@ -52,31 +57,145 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
52 | if (cpuidle_disabled()) | 57 | if (cpuidle_disabled()) |
53 | return -ENODEV; | 58 | return -ENODEV; |
54 | 59 | ||
55 | spin_lock(&cpuidle_driver_lock); | 60 | if (__cpuidle_get_cpu_driver(cpu)) |
56 | if (cpuidle_curr_driver) { | ||
57 | spin_unlock(&cpuidle_driver_lock); | ||
58 | return -EBUSY; | 61 | return -EBUSY; |
62 | |||
63 | __cpuidle_driver_init(drv); | ||
64 | |||
65 | __cpuidle_set_cpu_driver(drv, cpu); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu) | ||
71 | { | ||
72 | if (drv != __cpuidle_get_cpu_driver(cpu)) | ||
73 | return; | ||
74 | |||
75 | if (!WARN_ON(drv->refcnt > 0)) | ||
76 | __cpuidle_set_cpu_driver(NULL, cpu); | ||
77 | } | ||
78 | |||
79 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
80 | |||
81 | static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers); | ||
82 | |||
83 | static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
84 | { | ||
85 | per_cpu(cpuidle_drivers, cpu) = drv; | ||
86 | } | ||
87 | |||
88 | static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
89 | { | ||
90 | return per_cpu(cpuidle_drivers, cpu); | ||
91 | } | ||
92 | |||
93 | static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv) | ||
94 | { | ||
95 | int cpu; | ||
96 | for_each_present_cpu(cpu) | ||
97 | __cpuidle_unregister_driver(drv, cpu); | ||
98 | } | ||
99 | |||
100 | static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv) | ||
101 | { | ||
102 | int ret = 0; | ||
103 | int i, cpu; | ||
104 | |||
105 | for_each_present_cpu(cpu) { | ||
106 | ret = __cpuidle_register_driver(drv, cpu); | ||
107 | if (ret) | ||
108 | break; | ||
59 | } | 109 | } |
60 | 110 | ||
61 | if (!drv->power_specified) | 111 | if (ret) |
62 | set_power_states(drv); | 112 | for_each_present_cpu(i) { |
113 | if (i == cpu) | ||
114 | break; | ||
115 | __cpuidle_unregister_driver(drv, i); | ||
116 | } | ||
63 | 117 | ||
64 | cpuidle_curr_driver = drv; | ||
65 | 118 | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
123 | { | ||
124 | int ret; | ||
125 | |||
126 | spin_lock(&cpuidle_driver_lock); | ||
127 | ret = __cpuidle_register_driver(drv, cpu); | ||
66 | spin_unlock(&cpuidle_driver_lock); | 128 | spin_unlock(&cpuidle_driver_lock); |
67 | 129 | ||
68 | return 0; | 130 | return ret; |
131 | } | ||
132 | |||
133 | void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
134 | { | ||
135 | spin_lock(&cpuidle_driver_lock); | ||
136 | __cpuidle_unregister_driver(drv, cpu); | ||
137 | spin_unlock(&cpuidle_driver_lock); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * cpuidle_register_driver - registers a driver | ||
142 | * @drv: the driver | ||
143 | */ | ||
144 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | spin_lock(&cpuidle_driver_lock); | ||
149 | ret = __cpuidle_register_all_cpu_driver(drv); | ||
150 | spin_unlock(&cpuidle_driver_lock); | ||
151 | |||
152 | return ret; | ||
69 | } | 153 | } |
70 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | 154 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); |
71 | 155 | ||
72 | /** | 156 | /** |
73 | * cpuidle_get_driver - return the current driver | 157 | * cpuidle_unregister_driver - unregisters a driver |
158 | * @drv: the driver | ||
74 | */ | 159 | */ |
75 | struct cpuidle_driver *cpuidle_get_driver(void) | 160 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
161 | { | ||
162 | spin_lock(&cpuidle_driver_lock); | ||
163 | __cpuidle_unregister_all_cpu_driver(drv); | ||
164 | spin_unlock(&cpuidle_driver_lock); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
167 | |||
168 | #else | ||
169 | |||
170 | static struct cpuidle_driver *cpuidle_curr_driver; | ||
171 | |||
172 | static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) | ||
173 | { | ||
174 | cpuidle_curr_driver = drv; | ||
175 | } | ||
176 | |||
177 | static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) | ||
76 | { | 178 | { |
77 | return cpuidle_curr_driver; | 179 | return cpuidle_curr_driver; |
78 | } | 180 | } |
79 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | 181 | |
182 | /** | ||
183 | * cpuidle_register_driver - registers a driver | ||
184 | * @drv: the driver | ||
185 | */ | ||
186 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
187 | { | ||
188 | int ret, cpu; | ||
189 | |||
190 | cpu = get_cpu(); | ||
191 | spin_lock(&cpuidle_driver_lock); | ||
192 | ret = __cpuidle_register_driver(drv, cpu); | ||
193 | spin_unlock(&cpuidle_driver_lock); | ||
194 | put_cpu(); | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
80 | 199 | ||
81 | /** | 200 | /** |
82 | * cpuidle_unregister_driver - unregisters a driver | 201 | * cpuidle_unregister_driver - unregisters a driver |
@@ -84,20 +203,50 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver); | |||
84 | */ | 203 | */ |
85 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | 204 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) |
86 | { | 205 | { |
87 | if (drv != cpuidle_curr_driver) { | 206 | int cpu; |
88 | WARN(1, "invalid cpuidle_unregister_driver(%s)\n", | ||
89 | drv->name); | ||
90 | return; | ||
91 | } | ||
92 | 207 | ||
208 | cpu = get_cpu(); | ||
93 | spin_lock(&cpuidle_driver_lock); | 209 | spin_lock(&cpuidle_driver_lock); |
210 | __cpuidle_unregister_driver(drv, cpu); | ||
211 | spin_unlock(&cpuidle_driver_lock); | ||
212 | put_cpu(); | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
215 | #endif | ||
216 | |||
217 | /** | ||
218 | * cpuidle_get_driver - return the current driver | ||
219 | */ | ||
220 | struct cpuidle_driver *cpuidle_get_driver(void) | ||
221 | { | ||
222 | struct cpuidle_driver *drv; | ||
223 | int cpu; | ||
94 | 224 | ||
95 | if (!WARN_ON(cpuidle_driver_refcount > 0)) | 225 | cpu = get_cpu(); |
96 | cpuidle_curr_driver = NULL; | 226 | drv = __cpuidle_get_cpu_driver(cpu); |
227 | put_cpu(); | ||
97 | 228 | ||
229 | return drv; | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(cpuidle_get_driver); | ||
232 | |||
233 | /** | ||
234 | * cpuidle_get_cpu_driver - return the driver tied with a cpu | ||
235 | */ | ||
236 | struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) | ||
237 | { | ||
238 | struct cpuidle_driver *drv; | ||
239 | |||
240 | if (!dev) | ||
241 | return NULL; | ||
242 | |||
243 | spin_lock(&cpuidle_driver_lock); | ||
244 | drv = __cpuidle_get_cpu_driver(dev->cpu); | ||
98 | spin_unlock(&cpuidle_driver_lock); | 245 | spin_unlock(&cpuidle_driver_lock); |
246 | |||
247 | return drv; | ||
99 | } | 248 | } |
100 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | 249 | EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); |
101 | 250 | ||
102 | struct cpuidle_driver *cpuidle_driver_ref(void) | 251 | struct cpuidle_driver *cpuidle_driver_ref(void) |
103 | { | 252 | { |
@@ -105,8 +254,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
105 | 254 | ||
106 | spin_lock(&cpuidle_driver_lock); | 255 | spin_lock(&cpuidle_driver_lock); |
107 | 256 | ||
108 | drv = cpuidle_curr_driver; | 257 | drv = cpuidle_get_driver(); |
109 | cpuidle_driver_refcount++; | 258 | drv->refcnt++; |
110 | 259 | ||
111 | spin_unlock(&cpuidle_driver_lock); | 260 | spin_unlock(&cpuidle_driver_lock); |
112 | return drv; | 261 | return drv; |
@@ -114,10 +263,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void) | |||
114 | 263 | ||
115 | void cpuidle_driver_unref(void) | 264 | void cpuidle_driver_unref(void) |
116 | { | 265 | { |
266 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
267 | |||
117 | spin_lock(&cpuidle_driver_lock); | 268 | spin_lock(&cpuidle_driver_lock); |
118 | 269 | ||
119 | if (!WARN_ON(cpuidle_driver_refcount <= 0)) | 270 | if (drv && !WARN_ON(drv->refcnt <= 0)) |
120 | cpuidle_driver_refcount--; | 271 | drv->refcnt--; |
121 | 272 | ||
122 | spin_unlock(&cpuidle_driver_lock); | 273 | spin_unlock(&cpuidle_driver_lock); |
123 | } | 274 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 5b1f2c372c1f..bd40b943b6db 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -28,6 +28,13 @@ | |||
28 | #define MAX_INTERESTING 50000 | 28 | #define MAX_INTERESTING 50000 |
29 | #define STDDEV_THRESH 400 | 29 | #define STDDEV_THRESH 400 |
30 | 30 | ||
31 | /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ | ||
32 | #define MAX_DEVIATION 60 | ||
33 | |||
34 | static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); | ||
35 | static DEFINE_PER_CPU(int, hrtimer_status); | ||
36 | /* menu hrtimer mode */ | ||
37 | enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; | ||
31 | 38 | ||
32 | /* | 39 | /* |
33 | * Concepts and ideas behind the menu governor | 40 | * Concepts and ideas behind the menu governor |
@@ -109,6 +116,13 @@ | |||
109 | * | 116 | * |
110 | */ | 117 | */ |
111 | 118 | ||
119 | /* | ||
120 | * The C-state residency is so long that is is worthwhile to exit | ||
121 | * from the shallow C-state and re-enter into a deeper C-state. | ||
122 | */ | ||
123 | static unsigned int perfect_cstate_ms __read_mostly = 30; | ||
124 | module_param(perfect_cstate_ms, uint, 0000); | ||
125 | |||
112 | struct menu_device { | 126 | struct menu_device { |
113 | int last_state_idx; | 127 | int last_state_idx; |
114 | int needs_update; | 128 | int needs_update; |
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
191 | return div_u64(dividend + (divisor / 2), divisor); | 205 | return div_u64(dividend + (divisor / 2), divisor); |
192 | } | 206 | } |
193 | 207 | ||
208 | /* Cancel the hrtimer if it is not triggered yet */ | ||
209 | void menu_hrtimer_cancel(void) | ||
210 | { | ||
211 | int cpu = smp_processor_id(); | ||
212 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
213 | |||
214 | /* The timer is still not time out*/ | ||
215 | if (per_cpu(hrtimer_status, cpu)) { | ||
216 | hrtimer_cancel(hrtmr); | ||
217 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
218 | } | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); | ||
221 | |||
222 | /* Call back for hrtimer is triggered */ | ||
223 | static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) | ||
224 | { | ||
225 | int cpu = smp_processor_id(); | ||
226 | struct menu_device *data = &per_cpu(menu_devices, cpu); | ||
227 | |||
228 | /* In general case, the expected residency is much larger than | ||
229 | * deepest C-state target residency, but prediction logic still | ||
230 | * predicts a small predicted residency, so the prediction | ||
231 | * history is totally broken if the timer is triggered. | ||
232 | * So reset the correction factor. | ||
233 | */ | ||
234 | if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) | ||
235 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
236 | |||
237 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
238 | |||
239 | return HRTIMER_NORESTART; | ||
240 | } | ||
241 | |||
194 | /* | 242 | /* |
195 | * Try detecting repeating patterns by keeping track of the last 8 | 243 | * Try detecting repeating patterns by keeping track of the last 8 |
196 | * intervals, and checking if the standard deviation of that set | 244 | * intervals, and checking if the standard deviation of that set |
197 | * of points is below a threshold. If it is... then use the | 245 | * of points is below a threshold. If it is... then use the |
198 | * average of these 8 points as the estimated value. | 246 | * average of these 8 points as the estimated value. |
199 | */ | 247 | */ |
200 | static void detect_repeating_patterns(struct menu_device *data) | 248 | static u32 get_typical_interval(struct menu_device *data) |
201 | { | 249 | { |
202 | int i; | 250 | int i = 0, divisor = 0; |
203 | uint64_t avg = 0; | 251 | uint64_t max = 0, avg = 0, stddev = 0; |
204 | uint64_t stddev = 0; /* contains the square of the std deviation */ | 252 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ |
205 | 253 | unsigned int ret = 0; | |
206 | /* first calculate average and standard deviation of the past */ | ||
207 | for (i = 0; i < INTERVALS; i++) | ||
208 | avg += data->intervals[i]; | ||
209 | avg = avg / INTERVALS; | ||
210 | 254 | ||
211 | /* if the avg is beyond the known next tick, it's worthless */ | 255 | again: |
212 | if (avg > data->expected_us) | ||
213 | return; | ||
214 | 256 | ||
215 | for (i = 0; i < INTERVALS; i++) | 257 | /* first calculate average and standard deviation of the past */ |
216 | stddev += (data->intervals[i] - avg) * | 258 | max = avg = divisor = stddev = 0; |
217 | (data->intervals[i] - avg); | 259 | for (i = 0; i < INTERVALS; i++) { |
218 | 260 | int64_t value = data->intervals[i]; | |
219 | stddev = stddev / INTERVALS; | 261 | if (value <= thresh) { |
262 | avg += value; | ||
263 | divisor++; | ||
264 | if (value > max) | ||
265 | max = value; | ||
266 | } | ||
267 | } | ||
268 | do_div(avg, divisor); | ||
220 | 269 | ||
270 | for (i = 0; i < INTERVALS; i++) { | ||
271 | int64_t value = data->intervals[i]; | ||
272 | if (value <= thresh) { | ||
273 | int64_t diff = value - avg; | ||
274 | stddev += diff * diff; | ||
275 | } | ||
276 | } | ||
277 | do_div(stddev, divisor); | ||
278 | stddev = int_sqrt(stddev); | ||
221 | /* | 279 | /* |
222 | * now.. if stddev is small.. then assume we have a | 280 | * If we have outliers to the upside in our distribution, discard |
223 | * repeating pattern and predict we keep doing this. | 281 | * those by setting the threshold to exclude these outliers, then |
282 | * calculate the average and standard deviation again. Once we get | ||
283 | * down to the bottom 3/4 of our samples, stop excluding samples. | ||
284 | * | ||
285 | * This can deal with workloads that have long pauses interspersed | ||
286 | * with sporadic activity with a bunch of short pauses. | ||
287 | * | ||
288 | * The typical interval is obtained when standard deviation is small | ||
289 | * or standard deviation is small compared to the average interval. | ||
224 | */ | 290 | */ |
225 | 291 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) | |
226 | if (avg && stddev < STDDEV_THRESH) | 292 | || stddev <= 20) { |
227 | data->predicted_us = avg; | 293 | data->predicted_us = avg; |
294 | ret = 1; | ||
295 | return ret; | ||
296 | |||
297 | } else if ((divisor * 4) > INTERVALS * 3) { | ||
298 | /* Exclude the max interval */ | ||
299 | thresh = max - 1; | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return ret; | ||
228 | } | 304 | } |
229 | 305 | ||
230 | /** | 306 | /** |
@@ -240,6 +316,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
240 | int i; | 316 | int i; |
241 | int multiplier; | 317 | int multiplier; |
242 | struct timespec t; | 318 | struct timespec t; |
319 | int repeat = 0, low_predicted = 0; | ||
320 | int cpu = smp_processor_id(); | ||
321 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
243 | 322 | ||
244 | if (data->needs_update) { | 323 | if (data->needs_update) { |
245 | menu_update(drv, dev); | 324 | menu_update(drv, dev); |
@@ -274,7 +353,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
274 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | 353 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
275 | RESOLUTION * DECAY); | 354 | RESOLUTION * DECAY); |
276 | 355 | ||
277 | detect_repeating_patterns(data); | 356 | repeat = get_typical_interval(data); |
278 | 357 | ||
279 | /* | 358 | /* |
280 | * We want to default to C1 (hlt), not to busy polling | 359 | * We want to default to C1 (hlt), not to busy polling |
@@ -295,8 +374,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
295 | 374 | ||
296 | if (s->disabled || su->disable) | 375 | if (s->disabled || su->disable) |
297 | continue; | 376 | continue; |
298 | if (s->target_residency > data->predicted_us) | 377 | if (s->target_residency > data->predicted_us) { |
378 | low_predicted = 1; | ||
299 | continue; | 379 | continue; |
380 | } | ||
300 | if (s->exit_latency > latency_req) | 381 | if (s->exit_latency > latency_req) |
301 | continue; | 382 | continue; |
302 | if (s->exit_latency * multiplier > data->predicted_us) | 383 | if (s->exit_latency * multiplier > data->predicted_us) |
@@ -309,6 +390,44 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
309 | } | 390 | } |
310 | } | 391 | } |
311 | 392 | ||
393 | /* not deepest C-state chosen for low predicted residency */ | ||
394 | if (low_predicted) { | ||
395 | unsigned int timer_us = 0; | ||
396 | unsigned int perfect_us = 0; | ||
397 | |||
398 | /* | ||
399 | * Set a timer to detect whether this sleep is much | ||
400 | * longer than repeat mode predicted. If the timer | ||
401 | * triggers, the code will evaluate whether to put | ||
402 | * the CPU into a deeper C-state. | ||
403 | * The timer is cancelled on CPU wakeup. | ||
404 | */ | ||
405 | timer_us = 2 * (data->predicted_us + MAX_DEVIATION); | ||
406 | |||
407 | perfect_us = perfect_cstate_ms * 1000; | ||
408 | |||
409 | if (repeat && (4 * timer_us < data->expected_us)) { | ||
410 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
411 | ns_to_ktime(1000 * timer_us), | ||
412 | HRTIMER_MODE_REL_PINNED)); | ||
413 | /* In repeat case, menu hrtimer is started */ | ||
414 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; | ||
415 | } else if (perfect_us < data->expected_us) { | ||
416 | /* | ||
417 | * The next timer is long. This could be because | ||
418 | * we did not make a useful prediction. | ||
419 | * In that case, it makes sense to re-enter | ||
420 | * into a deeper C-state after some time. | ||
421 | */ | ||
422 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
423 | ns_to_ktime(1000 * timer_us), | ||
424 | HRTIMER_MODE_REL_PINNED)); | ||
425 | /* In general case, menu hrtimer is started */ | ||
426 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; | ||
427 | } | ||
428 | |||
429 | } | ||
430 | |||
312 | return data->last_state_idx; | 431 | return data->last_state_idx; |
313 | } | 432 | } |
314 | 433 | ||
@@ -399,6 +518,9 @@ static int menu_enable_device(struct cpuidle_driver *drv, | |||
399 | struct cpuidle_device *dev) | 518 | struct cpuidle_device *dev) |
400 | { | 519 | { |
401 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 520 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
521 | struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); | ||
522 | hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
523 | t->function = menu_hrtimer_notify; | ||
402 | 524 | ||
403 | memset(data, 0, sizeof(struct menu_device)); | 525 | memset(data, 0, sizeof(struct menu_device)); |
404 | 526 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 5f809e337b89..340942946106 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
15 | #include <linux/device.h> | ||
15 | 16 | ||
16 | #include "cpuidle.h" | 17 | #include "cpuidle.h" |
17 | 18 | ||
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
297 | NULL | 298 | NULL |
298 | }; | 299 | }; |
299 | 300 | ||
301 | struct cpuidle_state_kobj { | ||
302 | struct cpuidle_state *state; | ||
303 | struct cpuidle_state_usage *state_usage; | ||
304 | struct completion kobj_unregister; | ||
305 | struct kobject kobj; | ||
306 | }; | ||
307 | |||
300 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 308 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
301 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 309 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
302 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | 310 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) |
@@ -356,17 +364,17 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | |||
356 | } | 364 | } |
357 | 365 | ||
358 | /** | 366 | /** |
359 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | 367 | * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes |
360 | * @device: the target device | 368 | * @device: the target device |
361 | */ | 369 | */ |
362 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | 370 | static int cpuidle_add_state_sysfs(struct cpuidle_device *device) |
363 | { | 371 | { |
364 | int i, ret = -ENOMEM; | 372 | int i, ret = -ENOMEM; |
365 | struct cpuidle_state_kobj *kobj; | 373 | struct cpuidle_state_kobj *kobj; |
366 | struct cpuidle_driver *drv = cpuidle_get_driver(); | 374 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); |
367 | 375 | ||
368 | /* state statistics */ | 376 | /* state statistics */ |
369 | for (i = 0; i < device->state_count; i++) { | 377 | for (i = 0; i < drv->state_count; i++) { |
370 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 378 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
371 | if (!kobj) | 379 | if (!kobj) |
372 | goto error_state; | 380 | goto error_state; |
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
374 | kobj->state_usage = &device->states_usage[i]; | 382 | kobj->state_usage = &device->states_usage[i]; |
375 | init_completion(&kobj->kobj_unregister); | 383 | init_completion(&kobj->kobj_unregister); |
376 | 384 | ||
377 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 385 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, |
378 | "state%d", i); | 386 | &device->kobj, "state%d", i); |
379 | if (ret) { | 387 | if (ret) { |
380 | kfree(kobj); | 388 | kfree(kobj); |
381 | goto error_state; | 389 | goto error_state; |
@@ -393,10 +401,10 @@ error_state: | |||
393 | } | 401 | } |
394 | 402 | ||
395 | /** | 403 | /** |
396 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | 404 | * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes |
397 | * @device: the target device | 405 | * @device: the target device |
398 | */ | 406 | */ |
399 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | 407 | static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) |
400 | { | 408 | { |
401 | int i; | 409 | int i; |
402 | 410 | ||
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | |||
404 | cpuidle_free_state_kobj(device, i); | 412 | cpuidle_free_state_kobj(device, i); |
405 | } | 413 | } |
406 | 414 | ||
415 | #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS | ||
416 | #define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj) | ||
417 | #define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr) | ||
418 | |||
419 | #define define_one_driver_ro(_name, show) \ | ||
420 | static struct cpuidle_driver_attr attr_driver_##_name = \ | ||
421 | __ATTR(_name, 0644, show, NULL) | ||
422 | |||
423 | struct cpuidle_driver_kobj { | ||
424 | struct cpuidle_driver *drv; | ||
425 | struct completion kobj_unregister; | ||
426 | struct kobject kobj; | ||
427 | }; | ||
428 | |||
429 | struct cpuidle_driver_attr { | ||
430 | struct attribute attr; | ||
431 | ssize_t (*show)(struct cpuidle_driver *, char *); | ||
432 | ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); | ||
433 | }; | ||
434 | |||
435 | static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) | ||
436 | { | ||
437 | ssize_t ret; | ||
438 | |||
439 | spin_lock(&cpuidle_driver_lock); | ||
440 | ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); | ||
441 | spin_unlock(&cpuidle_driver_lock); | ||
442 | |||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | static void cpuidle_driver_sysfs_release(struct kobject *kobj) | ||
447 | { | ||
448 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
449 | complete(&driver_kobj->kobj_unregister); | ||
450 | } | ||
451 | |||
452 | static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr, | ||
453 | char * buf) | ||
454 | { | ||
455 | int ret = -EIO; | ||
456 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
457 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
458 | |||
459 | if (dattr->show) | ||
460 | ret = dattr->show(driver_kobj->drv, buf); | ||
461 | |||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr, | ||
466 | const char *buf, size_t size) | ||
467 | { | ||
468 | int ret = -EIO; | ||
469 | struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); | ||
470 | struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); | ||
471 | |||
472 | if (dattr->store) | ||
473 | ret = dattr->store(driver_kobj->drv, buf, size); | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | define_one_driver_ro(name, show_driver_name); | ||
479 | |||
480 | static const struct sysfs_ops cpuidle_driver_sysfs_ops = { | ||
481 | .show = cpuidle_driver_show, | ||
482 | .store = cpuidle_driver_store, | ||
483 | }; | ||
484 | |||
485 | static struct attribute *cpuidle_driver_default_attrs[] = { | ||
486 | &attr_driver_name.attr, | ||
487 | NULL | ||
488 | }; | ||
489 | |||
490 | static struct kobj_type ktype_driver_cpuidle = { | ||
491 | .sysfs_ops = &cpuidle_driver_sysfs_ops, | ||
492 | .default_attrs = cpuidle_driver_default_attrs, | ||
493 | .release = cpuidle_driver_sysfs_release, | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute | ||
498 | * @device: the target device | ||
499 | */ | ||
500 | static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
501 | { | ||
502 | struct cpuidle_driver_kobj *kdrv; | ||
503 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
504 | int ret; | ||
505 | |||
506 | kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL); | ||
507 | if (!kdrv) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | kdrv->drv = drv; | ||
511 | init_completion(&kdrv->kobj_unregister); | ||
512 | |||
513 | ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, | ||
514 | &dev->kobj, "driver"); | ||
515 | if (ret) { | ||
516 | kfree(kdrv); | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | kobject_uevent(&kdrv->kobj, KOBJ_ADD); | ||
521 | dev->kobj_driver = kdrv; | ||
522 | |||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute | ||
528 | * @device: the target device | ||
529 | */ | ||
530 | static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
531 | { | ||
532 | struct cpuidle_driver_kobj *kdrv = dev->kobj_driver; | ||
533 | kobject_put(&kdrv->kobj); | ||
534 | wait_for_completion(&kdrv->kobj_unregister); | ||
535 | kfree(kdrv); | ||
536 | } | ||
537 | #else | ||
538 | static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) | ||
539 | { | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) | ||
544 | { | ||
545 | ; | ||
546 | } | ||
547 | #endif | ||
548 | |||
549 | /** | ||
550 | * cpuidle_add_device_sysfs - adds device specific sysfs attributes | ||
551 | * @device: the target device | ||
552 | */ | ||
553 | int cpuidle_add_device_sysfs(struct cpuidle_device *device) | ||
554 | { | ||
555 | int ret; | ||
556 | |||
557 | ret = cpuidle_add_state_sysfs(device); | ||
558 | if (ret) | ||
559 | return ret; | ||
560 | |||
561 | ret = cpuidle_add_driver_sysfs(device); | ||
562 | if (ret) | ||
563 | cpuidle_remove_state_sysfs(device); | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * cpuidle_remove_device_sysfs : removes device specific sysfs attributes | ||
569 | * @device : the target device | ||
570 | */ | ||
571 | void cpuidle_remove_device_sysfs(struct cpuidle_device *device) | ||
572 | { | ||
573 | cpuidle_remove_driver_sysfs(device); | ||
574 | cpuidle_remove_state_sysfs(device); | ||
575 | } | ||
576 | |||
407 | /** | 577 | /** |
408 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | 578 | * cpuidle_add_sysfs - creates a sysfs instance for the target device |
409 | * @dev: the target device | 579 | * @dev: the target device |
410 | */ | 580 | */ |
411 | int cpuidle_add_sysfs(struct device *cpu_dev) | 581 | int cpuidle_add_sysfs(struct cpuidle_device *dev) |
412 | { | 582 | { |
413 | int cpu = cpu_dev->id; | 583 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); |
414 | struct cpuidle_device *dev; | ||
415 | int error; | 584 | int error; |
416 | 585 | ||
417 | dev = per_cpu(cpuidle_devices, cpu); | 586 | init_completion(&dev->kobj_unregister); |
587 | |||
418 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, | 588 | error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, |
419 | "cpuidle"); | 589 | "cpuidle"); |
420 | if (!error) | 590 | if (!error) |
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev) | |||
426 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | 596 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device |
427 | * @dev: the target device | 597 | * @dev: the target device |
428 | */ | 598 | */ |
429 | void cpuidle_remove_sysfs(struct device *cpu_dev) | 599 | void cpuidle_remove_sysfs(struct cpuidle_device *dev) |
430 | { | 600 | { |
431 | int cpu = cpu_dev->id; | ||
432 | struct cpuidle_device *dev; | ||
433 | |||
434 | dev = per_cpu(cpuidle_devices, cpu); | ||
435 | kobject_put(&dev->kobj); | 601 | kobject_put(&dev->kobj); |
602 | wait_for_completion(&dev->kobj_unregister); | ||
436 | } | 603 | } |
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index f6b0a6e2ea50..0f079be13305 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig | |||
@@ -30,7 +30,7 @@ if PM_DEVFREQ | |||
30 | comment "DEVFREQ Governors" | 30 | comment "DEVFREQ Governors" |
31 | 31 | ||
32 | config DEVFREQ_GOV_SIMPLE_ONDEMAND | 32 | config DEVFREQ_GOV_SIMPLE_ONDEMAND |
33 | bool "Simple Ondemand" | 33 | tristate "Simple Ondemand" |
34 | help | 34 | help |
35 | Chooses frequency based on the recent load on the device. Works | 35 | Chooses frequency based on the recent load on the device. Works |
36 | similar as ONDEMAND governor of CPUFREQ does. A device with | 36 | similar as ONDEMAND governor of CPUFREQ does. A device with |
@@ -39,7 +39,7 @@ config DEVFREQ_GOV_SIMPLE_ONDEMAND | |||
39 | values to the governor with data field at devfreq_add_device(). | 39 | values to the governor with data field at devfreq_add_device(). |
40 | 40 | ||
41 | config DEVFREQ_GOV_PERFORMANCE | 41 | config DEVFREQ_GOV_PERFORMANCE |
42 | bool "Performance" | 42 | tristate "Performance" |
43 | help | 43 | help |
44 | Sets the frequency at the maximum available frequency. | 44 | Sets the frequency at the maximum available frequency. |
45 | This governor always returns UINT_MAX as frequency so that | 45 | This governor always returns UINT_MAX as frequency so that |
@@ -47,7 +47,7 @@ config DEVFREQ_GOV_PERFORMANCE | |||
47 | at any time. | 47 | at any time. |
48 | 48 | ||
49 | config DEVFREQ_GOV_POWERSAVE | 49 | config DEVFREQ_GOV_POWERSAVE |
50 | bool "Powersave" | 50 | tristate "Powersave" |
51 | help | 51 | help |
52 | Sets the frequency at the minimum available frequency. | 52 | Sets the frequency at the minimum available frequency. |
53 | This governor always returns 0 as frequency so that | 53 | This governor always returns 0 as frequency so that |
@@ -55,7 +55,7 @@ config DEVFREQ_GOV_POWERSAVE | |||
55 | at any time. | 55 | at any time. |
56 | 56 | ||
57 | config DEVFREQ_GOV_USERSPACE | 57 | config DEVFREQ_GOV_USERSPACE |
58 | bool "Userspace" | 58 | tristate "Userspace" |
59 | help | 59 | help |
60 | Sets the frequency at the user specified one. | 60 | Sets the frequency at the user specified one. |
61 | This governor returns the user configured frequency if there | 61 | This governor returns the user configured frequency if there |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b146d76f04cf..53766f39aadd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -27,21 +27,17 @@ | |||
27 | #include <linux/hrtimer.h> | 27 | #include <linux/hrtimer.h> |
28 | #include "governor.h" | 28 | #include "governor.h" |
29 | 29 | ||
30 | struct class *devfreq_class; | 30 | static struct class *devfreq_class; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * devfreq_work periodically monitors every registered device. | 33 | * devfreq core provides delayed work based load monitoring helper |
34 | * The minimum polling interval is one jiffy. The polling interval is | 34 | * functions. Governors can use these or can implement their own |
35 | * determined by the minimum polling period among all polling devfreq | 35 | * monitoring mechanism. |
36 | * devices. The resolution of polling interval is one jiffy. | ||
37 | */ | 36 | */ |
38 | static bool polling; | ||
39 | static struct workqueue_struct *devfreq_wq; | 37 | static struct workqueue_struct *devfreq_wq; |
40 | static struct delayed_work devfreq_work; | ||
41 | |||
42 | /* wait removing if this is to be removed */ | ||
43 | static struct devfreq *wait_remove_device; | ||
44 | 38 | ||
39 | /* The list of all device-devfreq governors */ | ||
40 | static LIST_HEAD(devfreq_governor_list); | ||
45 | /* The list of all device-devfreq */ | 41 | /* The list of all device-devfreq */ |
46 | static LIST_HEAD(devfreq_list); | 42 | static LIST_HEAD(devfreq_list); |
47 | static DEFINE_MUTEX(devfreq_list_lock); | 43 | static DEFINE_MUTEX(devfreq_list_lock); |
@@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev) | |||
73 | } | 69 | } |
74 | 70 | ||
75 | /** | 71 | /** |
72 | * devfreq_get_freq_level() - Lookup freq_table for the frequency | ||
73 | * @devfreq: the devfreq instance | ||
74 | * @freq: the target frequency | ||
75 | */ | ||
76 | static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) | ||
77 | { | ||
78 | int lev; | ||
79 | |||
80 | for (lev = 0; lev < devfreq->profile->max_state; lev++) | ||
81 | if (freq == devfreq->profile->freq_table[lev]) | ||
82 | return lev; | ||
83 | |||
84 | return -EINVAL; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * devfreq_update_status() - Update statistics of devfreq behavior | ||
89 | * @devfreq: the devfreq instance | ||
90 | * @freq: the update target frequency | ||
91 | */ | ||
92 | static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) | ||
93 | { | ||
94 | int lev, prev_lev; | ||
95 | unsigned long cur_time; | ||
96 | |||
97 | lev = devfreq_get_freq_level(devfreq, freq); | ||
98 | if (lev < 0) | ||
99 | return lev; | ||
100 | |||
101 | cur_time = jiffies; | ||
102 | devfreq->time_in_state[lev] += | ||
103 | cur_time - devfreq->last_stat_updated; | ||
104 | if (freq != devfreq->previous_freq) { | ||
105 | prev_lev = devfreq_get_freq_level(devfreq, | ||
106 | devfreq->previous_freq); | ||
107 | devfreq->trans_table[(prev_lev * | ||
108 | devfreq->profile->max_state) + lev]++; | ||
109 | devfreq->total_trans++; | ||
110 | } | ||
111 | devfreq->last_stat_updated = cur_time; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * find_devfreq_governor() - find devfreq governor from name | ||
118 | * @name: name of the governor | ||
119 | * | ||
120 | * Search the list of devfreq governors and return the matched | ||
121 | * governor's pointer. devfreq_list_lock should be held by the caller. | ||
122 | */ | ||
123 | static struct devfreq_governor *find_devfreq_governor(const char *name) | ||
124 | { | ||
125 | struct devfreq_governor *tmp_governor; | ||
126 | |||
127 | if (unlikely(IS_ERR_OR_NULL(name))) { | ||
128 | pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); | ||
129 | return ERR_PTR(-EINVAL); | ||
130 | } | ||
131 | WARN(!mutex_is_locked(&devfreq_list_lock), | ||
132 | "devfreq_list_lock must be locked."); | ||
133 | |||
134 | list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { | ||
135 | if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) | ||
136 | return tmp_governor; | ||
137 | } | ||
138 | |||
139 | return ERR_PTR(-ENODEV); | ||
140 | } | ||
141 | |||
142 | /* Load monitoring helper functions for governors use */ | ||
143 | |||
144 | /** | ||
76 | * update_devfreq() - Reevaluate the device and configure frequency. | 145 | * update_devfreq() - Reevaluate the device and configure frequency. |
77 | * @devfreq: the devfreq instance. | 146 | * @devfreq: the devfreq instance. |
78 | * | 147 | * |
@@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq) | |||
90 | return -EINVAL; | 159 | return -EINVAL; |
91 | } | 160 | } |
92 | 161 | ||
162 | if (!devfreq->governor) | ||
163 | return -EINVAL; | ||
164 | |||
93 | /* Reevaluate the proper frequency */ | 165 | /* Reevaluate the proper frequency */ |
94 | err = devfreq->governor->get_target_freq(devfreq, &freq); | 166 | err = devfreq->governor->get_target_freq(devfreq, &freq); |
95 | if (err) | 167 | if (err) |
@@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq) | |||
116 | if (err) | 188 | if (err) |
117 | return err; | 189 | return err; |
118 | 190 | ||
191 | if (devfreq->profile->freq_table) | ||
192 | if (devfreq_update_status(devfreq, freq)) | ||
193 | dev_err(&devfreq->dev, | ||
194 | "Couldn't update frequency transition information.\n"); | ||
195 | |||
119 | devfreq->previous_freq = freq; | 196 | devfreq->previous_freq = freq; |
120 | return err; | 197 | return err; |
121 | } | 198 | } |
199 | EXPORT_SYMBOL(update_devfreq); | ||
200 | |||
201 | /** | ||
202 | * devfreq_monitor() - Periodically poll devfreq objects. | ||
203 | * @work: the work struct used to run devfreq_monitor periodically. | ||
204 | * | ||
205 | */ | ||
206 | static void devfreq_monitor(struct work_struct *work) | ||
207 | { | ||
208 | int err; | ||
209 | struct devfreq *devfreq = container_of(work, | ||
210 | struct devfreq, work.work); | ||
211 | |||
212 | mutex_lock(&devfreq->lock); | ||
213 | err = update_devfreq(devfreq); | ||
214 | if (err) | ||
215 | dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); | ||
216 | |||
217 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
218 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
219 | mutex_unlock(&devfreq->lock); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * devfreq_monitor_start() - Start load monitoring of devfreq instance | ||
224 | * @devfreq: the devfreq instance. | ||
225 | * | ||
226 | * Helper function for starting devfreq device load monitoing. By | ||
227 | * default delayed work based monitoring is supported. Function | ||
228 | * to be called from governor in response to DEVFREQ_GOV_START | ||
229 | * event when device is added to devfreq framework. | ||
230 | */ | ||
231 | void devfreq_monitor_start(struct devfreq *devfreq) | ||
232 | { | ||
233 | INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); | ||
234 | if (devfreq->profile->polling_ms) | ||
235 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
236 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
237 | } | ||
238 | EXPORT_SYMBOL(devfreq_monitor_start); | ||
239 | |||
240 | /** | ||
241 | * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance | ||
242 | * @devfreq: the devfreq instance. | ||
243 | * | ||
244 | * Helper function to stop devfreq device load monitoing. Function | ||
245 | * to be called from governor in response to DEVFREQ_GOV_STOP | ||
246 | * event when device is removed from devfreq framework. | ||
247 | */ | ||
248 | void devfreq_monitor_stop(struct devfreq *devfreq) | ||
249 | { | ||
250 | cancel_delayed_work_sync(&devfreq->work); | ||
251 | } | ||
252 | EXPORT_SYMBOL(devfreq_monitor_stop); | ||
253 | |||
254 | /** | ||
255 | * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance | ||
256 | * @devfreq: the devfreq instance. | ||
257 | * | ||
258 | * Helper function to suspend devfreq device load monitoing. Function | ||
259 | * to be called from governor in response to DEVFREQ_GOV_SUSPEND | ||
260 | * event or when polling interval is set to zero. | ||
261 | * | ||
262 | * Note: Though this function is same as devfreq_monitor_stop(), | ||
263 | * intentionally kept separate to provide hooks for collecting | ||
264 | * transition statistics. | ||
265 | */ | ||
266 | void devfreq_monitor_suspend(struct devfreq *devfreq) | ||
267 | { | ||
268 | mutex_lock(&devfreq->lock); | ||
269 | if (devfreq->stop_polling) { | ||
270 | mutex_unlock(&devfreq->lock); | ||
271 | return; | ||
272 | } | ||
273 | |||
274 | devfreq->stop_polling = true; | ||
275 | mutex_unlock(&devfreq->lock); | ||
276 | cancel_delayed_work_sync(&devfreq->work); | ||
277 | } | ||
278 | EXPORT_SYMBOL(devfreq_monitor_suspend); | ||
279 | |||
280 | /** | ||
281 | * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance | ||
282 | * @devfreq: the devfreq instance. | ||
283 | * | ||
284 | * Helper function to resume devfreq device load monitoing. Function | ||
285 | * to be called from governor in response to DEVFREQ_GOV_RESUME | ||
286 | * event or when polling interval is set to non-zero. | ||
287 | */ | ||
288 | void devfreq_monitor_resume(struct devfreq *devfreq) | ||
289 | { | ||
290 | mutex_lock(&devfreq->lock); | ||
291 | if (!devfreq->stop_polling) | ||
292 | goto out; | ||
293 | |||
294 | if (!delayed_work_pending(&devfreq->work) && | ||
295 | devfreq->profile->polling_ms) | ||
296 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
297 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
298 | devfreq->stop_polling = false; | ||
299 | |||
300 | out: | ||
301 | mutex_unlock(&devfreq->lock); | ||
302 | } | ||
303 | EXPORT_SYMBOL(devfreq_monitor_resume); | ||
304 | |||
305 | /** | ||
306 | * devfreq_interval_update() - Update device devfreq monitoring interval | ||
307 | * @devfreq: the devfreq instance. | ||
308 | * @delay: new polling interval to be set. | ||
309 | * | ||
310 | * Helper function to set new load monitoring polling interval. Function | ||
311 | * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. | ||
312 | */ | ||
313 | void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) | ||
314 | { | ||
315 | unsigned int cur_delay = devfreq->profile->polling_ms; | ||
316 | unsigned int new_delay = *delay; | ||
317 | |||
318 | mutex_lock(&devfreq->lock); | ||
319 | devfreq->profile->polling_ms = new_delay; | ||
320 | |||
321 | if (devfreq->stop_polling) | ||
322 | goto out; | ||
323 | |||
324 | /* if new delay is zero, stop polling */ | ||
325 | if (!new_delay) { | ||
326 | mutex_unlock(&devfreq->lock); | ||
327 | cancel_delayed_work_sync(&devfreq->work); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | /* if current delay is zero, start polling with new delay */ | ||
332 | if (!cur_delay) { | ||
333 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
334 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
335 | goto out; | ||
336 | } | ||
337 | |||
338 | /* if current delay is greater than new delay, restart polling */ | ||
339 | if (cur_delay > new_delay) { | ||
340 | mutex_unlock(&devfreq->lock); | ||
341 | cancel_delayed_work_sync(&devfreq->work); | ||
342 | mutex_lock(&devfreq->lock); | ||
343 | if (!devfreq->stop_polling) | ||
344 | queue_delayed_work(devfreq_wq, &devfreq->work, | ||
345 | msecs_to_jiffies(devfreq->profile->polling_ms)); | ||
346 | } | ||
347 | out: | ||
348 | mutex_unlock(&devfreq->lock); | ||
349 | } | ||
350 | EXPORT_SYMBOL(devfreq_interval_update); | ||
122 | 351 | ||
123 | /** | 352 | /** |
124 | * devfreq_notifier_call() - Notify that the device frequency requirements | 353 | * devfreq_notifier_call() - Notify that the device frequency requirements |
125 | * has been changed out of devfreq framework. | 354 | * has been changed out of devfreq framework. |
126 | * @nb the notifier_block (supposed to be devfreq->nb) | 355 | * @nb: the notifier_block (supposed to be devfreq->nb) |
127 | * @type not used | 356 | * @type: not used |
128 | * @devp not used | 357 | * @devp: not used |
129 | * | 358 | * |
130 | * Called by a notifier that uses devfreq->nb. | 359 | * Called by a notifier that uses devfreq->nb. |
131 | */ | 360 | */ |
@@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, | |||
143 | } | 372 | } |
144 | 373 | ||
145 | /** | 374 | /** |
146 | * _remove_devfreq() - Remove devfreq from the device. | 375 | * _remove_devfreq() - Remove devfreq from the list and release its resources. |
147 | * @devfreq: the devfreq struct | 376 | * @devfreq: the devfreq struct |
148 | * @skip: skip calling device_unregister(). | 377 | * @skip: skip calling device_unregister(). |
149 | * | ||
150 | * Note that the caller should lock devfreq->lock before calling | ||
151 | * this. _remove_devfreq() will unlock it and free devfreq | ||
152 | * internally. devfreq_list_lock should be locked by the caller | ||
153 | * as well (not relased at return) | ||
154 | * | ||
155 | * Lock usage: | ||
156 | * devfreq->lock: locked before call. | ||
157 | * unlocked at return (and freed) | ||
158 | * devfreq_list_lock: locked before call. | ||
159 | * kept locked at return. | ||
160 | * if devfreq is centrally polled. | ||
161 | * | ||
162 | * Freed memory: | ||
163 | * devfreq | ||
164 | */ | 378 | */ |
165 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) | 379 | static void _remove_devfreq(struct devfreq *devfreq, bool skip) |
166 | { | 380 | { |
167 | if (!mutex_is_locked(&devfreq->lock)) { | 381 | mutex_lock(&devfreq_list_lock); |
168 | WARN(true, "devfreq->lock must be locked by the caller.\n"); | 382 | if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { |
169 | return; | 383 | mutex_unlock(&devfreq_list_lock); |
170 | } | 384 | dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); |
171 | if (!devfreq->governor->no_central_polling && | ||
172 | !mutex_is_locked(&devfreq_list_lock)) { | ||
173 | WARN(true, "devfreq_list_lock must be locked by the caller.\n"); | ||
174 | return; | 385 | return; |
175 | } | 386 | } |
387 | list_del(&devfreq->node); | ||
388 | mutex_unlock(&devfreq_list_lock); | ||
176 | 389 | ||
177 | if (devfreq->being_removed) | 390 | if (devfreq->governor) |
178 | return; | 391 | devfreq->governor->event_handler(devfreq, |
179 | 392 | DEVFREQ_GOV_STOP, NULL); | |
180 | devfreq->being_removed = true; | ||
181 | 393 | ||
182 | if (devfreq->profile->exit) | 394 | if (devfreq->profile->exit) |
183 | devfreq->profile->exit(devfreq->dev.parent); | 395 | devfreq->profile->exit(devfreq->dev.parent); |
184 | 396 | ||
185 | if (devfreq->governor->exit) | ||
186 | devfreq->governor->exit(devfreq); | ||
187 | |||
188 | if (!skip && get_device(&devfreq->dev)) { | 397 | if (!skip && get_device(&devfreq->dev)) { |
189 | device_unregister(&devfreq->dev); | 398 | device_unregister(&devfreq->dev); |
190 | put_device(&devfreq->dev); | 399 | put_device(&devfreq->dev); |
191 | } | 400 | } |
192 | 401 | ||
193 | if (!devfreq->governor->no_central_polling) | ||
194 | list_del(&devfreq->node); | ||
195 | |||
196 | mutex_unlock(&devfreq->lock); | ||
197 | mutex_destroy(&devfreq->lock); | 402 | mutex_destroy(&devfreq->lock); |
198 | |||
199 | kfree(devfreq); | 403 | kfree(devfreq); |
200 | } | 404 | } |
201 | 405 | ||
@@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip) | |||
210 | static void devfreq_dev_release(struct device *dev) | 414 | static void devfreq_dev_release(struct device *dev) |
211 | { | 415 | { |
212 | struct devfreq *devfreq = to_devfreq(dev); | 416 | struct devfreq *devfreq = to_devfreq(dev); |
213 | bool central_polling = !devfreq->governor->no_central_polling; | ||
214 | |||
215 | /* | ||
216 | * If devfreq_dev_release() was called by device_unregister() of | ||
217 | * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and | ||
218 | * being_removed is already set. This also partially checks the case | ||
219 | * where devfreq_dev_release() is called from a thread other than | ||
220 | * the one called _remove_devfreq(); however, this case is | ||
221 | * dealt completely with another following being_removed check. | ||
222 | * | ||
223 | * Because being_removed is never being | ||
224 | * unset, we do not need to worry about race conditions on | ||
225 | * being_removed. | ||
226 | */ | ||
227 | if (devfreq->being_removed) | ||
228 | return; | ||
229 | |||
230 | if (central_polling) | ||
231 | mutex_lock(&devfreq_list_lock); | ||
232 | |||
233 | mutex_lock(&devfreq->lock); | ||
234 | 417 | ||
235 | /* | ||
236 | * Check being_removed flag again for the case where | ||
237 | * devfreq_dev_release() was called in a thread other than the one | ||
238 | * possibly called _remove_devfreq(). | ||
239 | */ | ||
240 | if (devfreq->being_removed) { | ||
241 | mutex_unlock(&devfreq->lock); | ||
242 | goto out; | ||
243 | } | ||
244 | |||
245 | /* devfreq->lock is unlocked and removed in _removed_devfreq() */ | ||
246 | _remove_devfreq(devfreq, true); | 418 | _remove_devfreq(devfreq, true); |
247 | |||
248 | out: | ||
249 | if (central_polling) | ||
250 | mutex_unlock(&devfreq_list_lock); | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * devfreq_monitor() - Periodically poll devfreq objects. | ||
255 | * @work: the work struct used to run devfreq_monitor periodically. | ||
256 | * | ||
257 | */ | ||
258 | static void devfreq_monitor(struct work_struct *work) | ||
259 | { | ||
260 | static unsigned long last_polled_at; | ||
261 | struct devfreq *devfreq, *tmp; | ||
262 | int error; | ||
263 | unsigned long jiffies_passed; | ||
264 | unsigned long next_jiffies = ULONG_MAX, now = jiffies; | ||
265 | struct device *dev; | ||
266 | |||
267 | /* Initially last_polled_at = 0, polling every device at bootup */ | ||
268 | jiffies_passed = now - last_polled_at; | ||
269 | last_polled_at = now; | ||
270 | if (jiffies_passed == 0) | ||
271 | jiffies_passed = 1; | ||
272 | |||
273 | mutex_lock(&devfreq_list_lock); | ||
274 | list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { | ||
275 | mutex_lock(&devfreq->lock); | ||
276 | dev = devfreq->dev.parent; | ||
277 | |||
278 | /* Do not remove tmp for a while */ | ||
279 | wait_remove_device = tmp; | ||
280 | |||
281 | if (devfreq->governor->no_central_polling || | ||
282 | devfreq->next_polling == 0) { | ||
283 | mutex_unlock(&devfreq->lock); | ||
284 | continue; | ||
285 | } | ||
286 | mutex_unlock(&devfreq_list_lock); | ||
287 | |||
288 | /* | ||
289 | * Reduce more next_polling if devfreq_wq took an extra | ||
290 | * delay. (i.e., CPU has been idled.) | ||
291 | */ | ||
292 | if (devfreq->next_polling <= jiffies_passed) { | ||
293 | error = update_devfreq(devfreq); | ||
294 | |||
295 | /* Remove a devfreq with an error. */ | ||
296 | if (error && error != -EAGAIN) { | ||
297 | |||
298 | dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", | ||
299 | error, devfreq->governor->name); | ||
300 | |||
301 | /* | ||
302 | * Unlock devfreq before locking the list | ||
303 | * in order to avoid deadlock with | ||
304 | * find_device_devfreq or others | ||
305 | */ | ||
306 | mutex_unlock(&devfreq->lock); | ||
307 | mutex_lock(&devfreq_list_lock); | ||
308 | /* Check if devfreq is already removed */ | ||
309 | if (IS_ERR(find_device_devfreq(dev))) | ||
310 | continue; | ||
311 | mutex_lock(&devfreq->lock); | ||
312 | /* This unlocks devfreq->lock and free it */ | ||
313 | _remove_devfreq(devfreq, false); | ||
314 | continue; | ||
315 | } | ||
316 | devfreq->next_polling = devfreq->polling_jiffies; | ||
317 | } else { | ||
318 | devfreq->next_polling -= jiffies_passed; | ||
319 | } | ||
320 | |||
321 | if (devfreq->next_polling) | ||
322 | next_jiffies = (next_jiffies > devfreq->next_polling) ? | ||
323 | devfreq->next_polling : next_jiffies; | ||
324 | |||
325 | mutex_unlock(&devfreq->lock); | ||
326 | mutex_lock(&devfreq_list_lock); | ||
327 | } | ||
328 | wait_remove_device = NULL; | ||
329 | mutex_unlock(&devfreq_list_lock); | ||
330 | |||
331 | if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { | ||
332 | polling = true; | ||
333 | queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); | ||
334 | } else { | ||
335 | polling = false; | ||
336 | } | ||
337 | } | 419 | } |
338 | 420 | ||
339 | /** | 421 | /** |
340 | * devfreq_add_device() - Add devfreq feature to the device | 422 | * devfreq_add_device() - Add devfreq feature to the device |
341 | * @dev: the device to add devfreq feature. | 423 | * @dev: the device to add devfreq feature. |
342 | * @profile: device-specific profile to run devfreq. | 424 | * @profile: device-specific profile to run devfreq. |
343 | * @governor: the policy to choose frequency. | 425 | * @governor_name: name of the policy to choose frequency. |
344 | * @data: private data for the governor. The devfreq framework does not | 426 | * @data: private data for the governor. The devfreq framework does not |
345 | * touch this value. | 427 | * touch this value. |
346 | */ | 428 | */ |
347 | struct devfreq *devfreq_add_device(struct device *dev, | 429 | struct devfreq *devfreq_add_device(struct device *dev, |
348 | struct devfreq_dev_profile *profile, | 430 | struct devfreq_dev_profile *profile, |
349 | const struct devfreq_governor *governor, | 431 | const char *governor_name, |
350 | void *data) | 432 | void *data) |
351 | { | 433 | { |
352 | struct devfreq *devfreq; | 434 | struct devfreq *devfreq; |
435 | struct devfreq_governor *governor; | ||
353 | int err = 0; | 436 | int err = 0; |
354 | 437 | ||
355 | if (!dev || !profile || !governor) { | 438 | if (!dev || !profile || !governor_name) { |
356 | dev_err(dev, "%s: Invalid parameters.\n", __func__); | 439 | dev_err(dev, "%s: Invalid parameters.\n", __func__); |
357 | return ERR_PTR(-EINVAL); | 440 | return ERR_PTR(-EINVAL); |
358 | } | 441 | } |
359 | 442 | ||
360 | 443 | mutex_lock(&devfreq_list_lock); | |
361 | if (!governor->no_central_polling) { | 444 | devfreq = find_device_devfreq(dev); |
362 | mutex_lock(&devfreq_list_lock); | 445 | mutex_unlock(&devfreq_list_lock); |
363 | devfreq = find_device_devfreq(dev); | 446 | if (!IS_ERR(devfreq)) { |
364 | mutex_unlock(&devfreq_list_lock); | 447 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); |
365 | if (!IS_ERR(devfreq)) { | 448 | err = -EINVAL; |
366 | dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); | 449 | goto err_out; |
367 | err = -EINVAL; | ||
368 | goto err_out; | ||
369 | } | ||
370 | } | 450 | } |
371 | 451 | ||
372 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); | 452 | devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); |
@@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
383 | devfreq->dev.class = devfreq_class; | 463 | devfreq->dev.class = devfreq_class; |
384 | devfreq->dev.release = devfreq_dev_release; | 464 | devfreq->dev.release = devfreq_dev_release; |
385 | devfreq->profile = profile; | 465 | devfreq->profile = profile; |
386 | devfreq->governor = governor; | 466 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); |
387 | devfreq->previous_freq = profile->initial_freq; | 467 | devfreq->previous_freq = profile->initial_freq; |
388 | devfreq->data = data; | 468 | devfreq->data = data; |
389 | devfreq->next_polling = devfreq->polling_jiffies | ||
390 | = msecs_to_jiffies(devfreq->profile->polling_ms); | ||
391 | devfreq->nb.notifier_call = devfreq_notifier_call; | 469 | devfreq->nb.notifier_call = devfreq_notifier_call; |
392 | 470 | ||
471 | devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * | ||
472 | devfreq->profile->max_state * | ||
473 | devfreq->profile->max_state, | ||
474 | GFP_KERNEL); | ||
475 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * | ||
476 | devfreq->profile->max_state, | ||
477 | GFP_KERNEL); | ||
478 | devfreq->last_stat_updated = jiffies; | ||
479 | |||
393 | dev_set_name(&devfreq->dev, dev_name(dev)); | 480 | dev_set_name(&devfreq->dev, dev_name(dev)); |
394 | err = device_register(&devfreq->dev); | 481 | err = device_register(&devfreq->dev); |
395 | if (err) { | 482 | if (err) { |
396 | put_device(&devfreq->dev); | 483 | put_device(&devfreq->dev); |
484 | mutex_unlock(&devfreq->lock); | ||
397 | goto err_dev; | 485 | goto err_dev; |
398 | } | 486 | } |
399 | 487 | ||
400 | if (governor->init) | ||
401 | err = governor->init(devfreq); | ||
402 | if (err) | ||
403 | goto err_init; | ||
404 | |||
405 | mutex_unlock(&devfreq->lock); | 488 | mutex_unlock(&devfreq->lock); |
406 | 489 | ||
407 | if (governor->no_central_polling) | ||
408 | goto out; | ||
409 | |||
410 | mutex_lock(&devfreq_list_lock); | 490 | mutex_lock(&devfreq_list_lock); |
411 | |||
412 | list_add(&devfreq->node, &devfreq_list); | 491 | list_add(&devfreq->node, &devfreq_list); |
413 | 492 | ||
414 | if (devfreq_wq && devfreq->next_polling && !polling) { | 493 | governor = find_devfreq_governor(devfreq->governor_name); |
415 | polling = true; | 494 | if (!IS_ERR(governor)) |
416 | queue_delayed_work(devfreq_wq, &devfreq_work, | 495 | devfreq->governor = governor; |
417 | devfreq->next_polling); | 496 | if (devfreq->governor) |
418 | } | 497 | err = devfreq->governor->event_handler(devfreq, |
498 | DEVFREQ_GOV_START, NULL); | ||
419 | mutex_unlock(&devfreq_list_lock); | 499 | mutex_unlock(&devfreq_list_lock); |
420 | out: | 500 | if (err) { |
501 | dev_err(dev, "%s: Unable to start governor for the device\n", | ||
502 | __func__); | ||
503 | goto err_init; | ||
504 | } | ||
505 | |||
421 | return devfreq; | 506 | return devfreq; |
422 | 507 | ||
423 | err_init: | 508 | err_init: |
509 | list_del(&devfreq->node); | ||
424 | device_unregister(&devfreq->dev); | 510 | device_unregister(&devfreq->dev); |
425 | err_dev: | 511 | err_dev: |
426 | mutex_unlock(&devfreq->lock); | ||
427 | kfree(devfreq); | 512 | kfree(devfreq); |
428 | err_out: | 513 | err_out: |
429 | return ERR_PTR(err); | 514 | return ERR_PTR(err); |
430 | } | 515 | } |
516 | EXPORT_SYMBOL(devfreq_add_device); | ||
431 | 517 | ||
432 | /** | 518 | /** |
433 | * devfreq_remove_device() - Remove devfreq feature from a device. | 519 | * devfreq_remove_device() - Remove devfreq feature from a device. |
434 | * @devfreq the devfreq instance to be removed | 520 | * @devfreq: the devfreq instance to be removed |
435 | */ | 521 | */ |
436 | int devfreq_remove_device(struct devfreq *devfreq) | 522 | int devfreq_remove_device(struct devfreq *devfreq) |
437 | { | 523 | { |
438 | bool central_polling; | 524 | if (!devfreq) |
525 | return -EINVAL; | ||
526 | |||
527 | _remove_devfreq(devfreq, false); | ||
439 | 528 | ||
529 | return 0; | ||
530 | } | ||
531 | EXPORT_SYMBOL(devfreq_remove_device); | ||
532 | |||
533 | /** | ||
534 | * devfreq_suspend_device() - Suspend devfreq of a device. | ||
535 | * @devfreq: the devfreq instance to be suspended | ||
536 | */ | ||
537 | int devfreq_suspend_device(struct devfreq *devfreq) | ||
538 | { | ||
440 | if (!devfreq) | 539 | if (!devfreq) |
441 | return -EINVAL; | 540 | return -EINVAL; |
442 | 541 | ||
443 | central_polling = !devfreq->governor->no_central_polling; | 542 | if (!devfreq->governor) |
543 | return 0; | ||
544 | |||
545 | return devfreq->governor->event_handler(devfreq, | ||
546 | DEVFREQ_GOV_SUSPEND, NULL); | ||
547 | } | ||
548 | EXPORT_SYMBOL(devfreq_suspend_device); | ||
549 | |||
550 | /** | ||
551 | * devfreq_resume_device() - Resume devfreq of a device. | ||
552 | * @devfreq: the devfreq instance to be resumed | ||
553 | */ | ||
554 | int devfreq_resume_device(struct devfreq *devfreq) | ||
555 | { | ||
556 | if (!devfreq) | ||
557 | return -EINVAL; | ||
558 | |||
559 | if (!devfreq->governor) | ||
560 | return 0; | ||
561 | |||
562 | return devfreq->governor->event_handler(devfreq, | ||
563 | DEVFREQ_GOV_RESUME, NULL); | ||
564 | } | ||
565 | EXPORT_SYMBOL(devfreq_resume_device); | ||
566 | |||
567 | /** | ||
568 | * devfreq_add_governor() - Add devfreq governor | ||
569 | * @governor: the devfreq governor to be added | ||
570 | */ | ||
571 | int devfreq_add_governor(struct devfreq_governor *governor) | ||
572 | { | ||
573 | struct devfreq_governor *g; | ||
574 | struct devfreq *devfreq; | ||
575 | int err = 0; | ||
576 | |||
577 | if (!governor) { | ||
578 | pr_err("%s: Invalid parameters.\n", __func__); | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | |||
582 | mutex_lock(&devfreq_list_lock); | ||
583 | g = find_devfreq_governor(governor->name); | ||
584 | if (!IS_ERR(g)) { | ||
585 | pr_err("%s: governor %s already registered\n", __func__, | ||
586 | g->name); | ||
587 | err = -EINVAL; | ||
588 | goto err_out; | ||
589 | } | ||
444 | 590 | ||
445 | if (central_polling) { | 591 | list_add(&governor->node, &devfreq_governor_list); |
446 | mutex_lock(&devfreq_list_lock); | 592 | |
447 | while (wait_remove_device == devfreq) { | 593 | list_for_each_entry(devfreq, &devfreq_list, node) { |
448 | mutex_unlock(&devfreq_list_lock); | 594 | int ret = 0; |
449 | schedule(); | 595 | struct device *dev = devfreq->dev.parent; |
450 | mutex_lock(&devfreq_list_lock); | 596 | |
597 | if (!strncmp(devfreq->governor_name, governor->name, | ||
598 | DEVFREQ_NAME_LEN)) { | ||
599 | /* The following should never occur */ | ||
600 | if (devfreq->governor) { | ||
601 | dev_warn(dev, | ||
602 | "%s: Governor %s already present\n", | ||
603 | __func__, devfreq->governor->name); | ||
604 | ret = devfreq->governor->event_handler(devfreq, | ||
605 | DEVFREQ_GOV_STOP, NULL); | ||
606 | if (ret) { | ||
607 | dev_warn(dev, | ||
608 | "%s: Governor %s stop = %d\n", | ||
609 | __func__, | ||
610 | devfreq->governor->name, ret); | ||
611 | } | ||
612 | /* Fall through */ | ||
613 | } | ||
614 | devfreq->governor = governor; | ||
615 | ret = devfreq->governor->event_handler(devfreq, | ||
616 | DEVFREQ_GOV_START, NULL); | ||
617 | if (ret) { | ||
618 | dev_warn(dev, "%s: Governor %s start=%d\n", | ||
619 | __func__, devfreq->governor->name, | ||
620 | ret); | ||
621 | } | ||
451 | } | 622 | } |
452 | } | 623 | } |
453 | 624 | ||
454 | mutex_lock(&devfreq->lock); | 625 | err_out: |
455 | _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ | 626 | mutex_unlock(&devfreq_list_lock); |
456 | 627 | ||
457 | if (central_polling) | 628 | return err; |
458 | mutex_unlock(&devfreq_list_lock); | 629 | } |
630 | EXPORT_SYMBOL(devfreq_add_governor); | ||
459 | 631 | ||
460 | return 0; | 632 | /** |
633 | * devfreq_remove_device() - Remove devfreq feature from a device. | ||
634 | * @governor: the devfreq governor to be removed | ||
635 | */ | ||
636 | int devfreq_remove_governor(struct devfreq_governor *governor) | ||
637 | { | ||
638 | struct devfreq_governor *g; | ||
639 | struct devfreq *devfreq; | ||
640 | int err = 0; | ||
641 | |||
642 | if (!governor) { | ||
643 | pr_err("%s: Invalid parameters.\n", __func__); | ||
644 | return -EINVAL; | ||
645 | } | ||
646 | |||
647 | mutex_lock(&devfreq_list_lock); | ||
648 | g = find_devfreq_governor(governor->name); | ||
649 | if (IS_ERR(g)) { | ||
650 | pr_err("%s: governor %s not registered\n", __func__, | ||
651 | governor->name); | ||
652 | err = PTR_ERR(g); | ||
653 | goto err_out; | ||
654 | } | ||
655 | list_for_each_entry(devfreq, &devfreq_list, node) { | ||
656 | int ret; | ||
657 | struct device *dev = devfreq->dev.parent; | ||
658 | |||
659 | if (!strncmp(devfreq->governor_name, governor->name, | ||
660 | DEVFREQ_NAME_LEN)) { | ||
661 | /* we should have a devfreq governor! */ | ||
662 | if (!devfreq->governor) { | ||
663 | dev_warn(dev, "%s: Governor %s NOT present\n", | ||
664 | __func__, governor->name); | ||
665 | continue; | ||
666 | /* Fall through */ | ||
667 | } | ||
668 | ret = devfreq->governor->event_handler(devfreq, | ||
669 | DEVFREQ_GOV_STOP, NULL); | ||
670 | if (ret) { | ||
671 | dev_warn(dev, "%s: Governor %s stop=%d\n", | ||
672 | __func__, devfreq->governor->name, | ||
673 | ret); | ||
674 | } | ||
675 | devfreq->governor = NULL; | ||
676 | } | ||
677 | } | ||
678 | |||
679 | list_del(&governor->node); | ||
680 | err_out: | ||
681 | mutex_unlock(&devfreq_list_lock); | ||
682 | |||
683 | return err; | ||
461 | } | 684 | } |
685 | EXPORT_SYMBOL(devfreq_remove_governor); | ||
462 | 686 | ||
463 | static ssize_t show_governor(struct device *dev, | 687 | static ssize_t show_governor(struct device *dev, |
464 | struct device_attribute *attr, char *buf) | 688 | struct device_attribute *attr, char *buf) |
465 | { | 689 | { |
690 | if (!to_devfreq(dev)->governor) | ||
691 | return -EINVAL; | ||
692 | |||
466 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); | 693 | return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); |
467 | } | 694 | } |
468 | 695 | ||
696 | static ssize_t store_governor(struct device *dev, struct device_attribute *attr, | ||
697 | const char *buf, size_t count) | ||
698 | { | ||
699 | struct devfreq *df = to_devfreq(dev); | ||
700 | int ret; | ||
701 | char str_governor[DEVFREQ_NAME_LEN + 1]; | ||
702 | struct devfreq_governor *governor; | ||
703 | |||
704 | ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); | ||
705 | if (ret != 1) | ||
706 | return -EINVAL; | ||
707 | |||
708 | mutex_lock(&devfreq_list_lock); | ||
709 | governor = find_devfreq_governor(str_governor); | ||
710 | if (IS_ERR(governor)) { | ||
711 | ret = PTR_ERR(governor); | ||
712 | goto out; | ||
713 | } | ||
714 | if (df->governor == governor) | ||
715 | goto out; | ||
716 | |||
717 | if (df->governor) { | ||
718 | ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); | ||
719 | if (ret) { | ||
720 | dev_warn(dev, "%s: Governor %s not stopped(%d)\n", | ||
721 | __func__, df->governor->name, ret); | ||
722 | goto out; | ||
723 | } | ||
724 | } | ||
725 | df->governor = governor; | ||
726 | strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); | ||
727 | ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); | ||
728 | if (ret) | ||
729 | dev_warn(dev, "%s: Governor %s not started(%d)\n", | ||
730 | __func__, df->governor->name, ret); | ||
731 | out: | ||
732 | mutex_unlock(&devfreq_list_lock); | ||
733 | |||
734 | if (!ret) | ||
735 | ret = count; | ||
736 | return ret; | ||
737 | } | ||
738 | static ssize_t show_available_governors(struct device *d, | ||
739 | struct device_attribute *attr, | ||
740 | char *buf) | ||
741 | { | ||
742 | struct devfreq_governor *tmp_governor; | ||
743 | ssize_t count = 0; | ||
744 | |||
745 | mutex_lock(&devfreq_list_lock); | ||
746 | list_for_each_entry(tmp_governor, &devfreq_governor_list, node) | ||
747 | count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), | ||
748 | "%s ", tmp_governor->name); | ||
749 | mutex_unlock(&devfreq_list_lock); | ||
750 | |||
751 | /* Truncate the trailing space */ | ||
752 | if (count) | ||
753 | count--; | ||
754 | |||
755 | count += sprintf(&buf[count], "\n"); | ||
756 | |||
757 | return count; | ||
758 | } | ||
759 | |||
469 | static ssize_t show_freq(struct device *dev, | 760 | static ssize_t show_freq(struct device *dev, |
470 | struct device_attribute *attr, char *buf) | 761 | struct device_attribute *attr, char *buf) |
471 | { | 762 | { |
763 | unsigned long freq; | ||
764 | struct devfreq *devfreq = to_devfreq(dev); | ||
765 | |||
766 | if (devfreq->profile->get_cur_freq && | ||
767 | !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) | ||
768 | return sprintf(buf, "%lu\n", freq); | ||
769 | |||
770 | return sprintf(buf, "%lu\n", devfreq->previous_freq); | ||
771 | } | ||
772 | |||
773 | static ssize_t show_target_freq(struct device *dev, | ||
774 | struct device_attribute *attr, char *buf) | ||
775 | { | ||
472 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); | 776 | return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); |
473 | } | 777 | } |
474 | 778 | ||
@@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev, | |||
486 | unsigned int value; | 790 | unsigned int value; |
487 | int ret; | 791 | int ret; |
488 | 792 | ||
793 | if (!df->governor) | ||
794 | return -EINVAL; | ||
795 | |||
489 | ret = sscanf(buf, "%u", &value); | 796 | ret = sscanf(buf, "%u", &value); |
490 | if (ret != 1) | 797 | if (ret != 1) |
491 | goto out; | 798 | return -EINVAL; |
492 | |||
493 | mutex_lock(&df->lock); | ||
494 | df->profile->polling_ms = value; | ||
495 | df->next_polling = df->polling_jiffies | ||
496 | = msecs_to_jiffies(value); | ||
497 | mutex_unlock(&df->lock); | ||
498 | 799 | ||
800 | df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); | ||
499 | ret = count; | 801 | ret = count; |
500 | 802 | ||
501 | if (df->governor->no_central_polling) | ||
502 | goto out; | ||
503 | |||
504 | mutex_lock(&devfreq_list_lock); | ||
505 | if (df->next_polling > 0 && !polling) { | ||
506 | polling = true; | ||
507 | queue_delayed_work(devfreq_wq, &devfreq_work, | ||
508 | df->next_polling); | ||
509 | } | ||
510 | mutex_unlock(&devfreq_list_lock); | ||
511 | out: | ||
512 | return ret; | 803 | return ret; |
513 | } | 804 | } |
514 | 805 | ||
515 | static ssize_t show_central_polling(struct device *dev, | ||
516 | struct device_attribute *attr, char *buf) | ||
517 | { | ||
518 | return sprintf(buf, "%d\n", | ||
519 | !to_devfreq(dev)->governor->no_central_polling); | ||
520 | } | ||
521 | |||
522 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | 806 | static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, |
523 | const char *buf, size_t count) | 807 | const char *buf, size_t count) |
524 | { | 808 | { |
@@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | |||
529 | 813 | ||
530 | ret = sscanf(buf, "%lu", &value); | 814 | ret = sscanf(buf, "%lu", &value); |
531 | if (ret != 1) | 815 | if (ret != 1) |
532 | goto out; | 816 | return -EINVAL; |
533 | 817 | ||
534 | mutex_lock(&df->lock); | 818 | mutex_lock(&df->lock); |
535 | max = df->max_freq; | 819 | max = df->max_freq; |
@@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, | |||
543 | ret = count; | 827 | ret = count; |
544 | unlock: | 828 | unlock: |
545 | mutex_unlock(&df->lock); | 829 | mutex_unlock(&df->lock); |
546 | out: | ||
547 | return ret; | 830 | return ret; |
548 | } | 831 | } |
549 | 832 | ||
@@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | |||
563 | 846 | ||
564 | ret = sscanf(buf, "%lu", &value); | 847 | ret = sscanf(buf, "%lu", &value); |
565 | if (ret != 1) | 848 | if (ret != 1) |
566 | goto out; | 849 | return -EINVAL; |
567 | 850 | ||
568 | mutex_lock(&df->lock); | 851 | mutex_lock(&df->lock); |
569 | min = df->min_freq; | 852 | min = df->min_freq; |
@@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, | |||
577 | ret = count; | 860 | ret = count; |
578 | unlock: | 861 | unlock: |
579 | mutex_unlock(&df->lock); | 862 | mutex_unlock(&df->lock); |
580 | out: | ||
581 | return ret; | 863 | return ret; |
582 | } | 864 | } |
583 | 865 | ||
@@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr, | |||
587 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); | 869 | return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); |
588 | } | 870 | } |
589 | 871 | ||
872 | static ssize_t show_available_freqs(struct device *d, | ||
873 | struct device_attribute *attr, | ||
874 | char *buf) | ||
875 | { | ||
876 | struct devfreq *df = to_devfreq(d); | ||
877 | struct device *dev = df->dev.parent; | ||
878 | struct opp *opp; | ||
879 | ssize_t count = 0; | ||
880 | unsigned long freq = 0; | ||
881 | |||
882 | rcu_read_lock(); | ||
883 | do { | ||
884 | opp = opp_find_freq_ceil(dev, &freq); | ||
885 | if (IS_ERR(opp)) | ||
886 | break; | ||
887 | |||
888 | count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), | ||
889 | "%lu ", freq); | ||
890 | freq++; | ||
891 | } while (1); | ||
892 | rcu_read_unlock(); | ||
893 | |||
894 | /* Truncate the trailing space */ | ||
895 | if (count) | ||
896 | count--; | ||
897 | |||
898 | count += sprintf(&buf[count], "\n"); | ||
899 | |||
900 | return count; | ||
901 | } | ||
902 | |||
903 | static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr, | ||
904 | char *buf) | ||
905 | { | ||
906 | struct devfreq *devfreq = to_devfreq(dev); | ||
907 | ssize_t len; | ||
908 | int i, j, err; | ||
909 | unsigned int max_state = devfreq->profile->max_state; | ||
910 | |||
911 | err = devfreq_update_status(devfreq, devfreq->previous_freq); | ||
912 | if (err) | ||
913 | return 0; | ||
914 | |||
915 | len = sprintf(buf, " From : To\n"); | ||
916 | len += sprintf(buf + len, " :"); | ||
917 | for (i = 0; i < max_state; i++) | ||
918 | len += sprintf(buf + len, "%8u", | ||
919 | devfreq->profile->freq_table[i]); | ||
920 | |||
921 | len += sprintf(buf + len, " time(ms)\n"); | ||
922 | |||
923 | for (i = 0; i < max_state; i++) { | ||
924 | if (devfreq->profile->freq_table[i] | ||
925 | == devfreq->previous_freq) { | ||
926 | len += sprintf(buf + len, "*"); | ||
927 | } else { | ||
928 | len += sprintf(buf + len, " "); | ||
929 | } | ||
930 | len += sprintf(buf + len, "%8u:", | ||
931 | devfreq->profile->freq_table[i]); | ||
932 | for (j = 0; j < max_state; j++) | ||
933 | len += sprintf(buf + len, "%8u", | ||
934 | devfreq->trans_table[(i * max_state) + j]); | ||
935 | len += sprintf(buf + len, "%10u\n", | ||
936 | jiffies_to_msecs(devfreq->time_in_state[i])); | ||
937 | } | ||
938 | |||
939 | len += sprintf(buf + len, "Total transition : %u\n", | ||
940 | devfreq->total_trans); | ||
941 | return len; | ||
942 | } | ||
943 | |||
590 | static struct device_attribute devfreq_attrs[] = { | 944 | static struct device_attribute devfreq_attrs[] = { |
591 | __ATTR(governor, S_IRUGO, show_governor, NULL), | 945 | __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor), |
946 | __ATTR(available_governors, S_IRUGO, show_available_governors, NULL), | ||
592 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), | 947 | __ATTR(cur_freq, S_IRUGO, show_freq, NULL), |
593 | __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), | 948 | __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL), |
949 | __ATTR(target_freq, S_IRUGO, show_target_freq, NULL), | ||
594 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, | 950 | __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, |
595 | store_polling_interval), | 951 | store_polling_interval), |
596 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), | 952 | __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), |
597 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), | 953 | __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), |
954 | __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL), | ||
598 | { }, | 955 | { }, |
599 | }; | 956 | }; |
600 | 957 | ||
601 | /** | ||
602 | * devfreq_start_polling() - Initialize data structure for devfreq framework and | ||
603 | * start polling registered devfreq devices. | ||
604 | */ | ||
605 | static int __init devfreq_start_polling(void) | ||
606 | { | ||
607 | mutex_lock(&devfreq_list_lock); | ||
608 | polling = false; | ||
609 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | ||
610 | INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor); | ||
611 | mutex_unlock(&devfreq_list_lock); | ||
612 | |||
613 | devfreq_monitor(&devfreq_work.work); | ||
614 | return 0; | ||
615 | } | ||
616 | late_initcall(devfreq_start_polling); | ||
617 | |||
618 | static int __init devfreq_init(void) | 958 | static int __init devfreq_init(void) |
619 | { | 959 | { |
620 | devfreq_class = class_create(THIS_MODULE, "devfreq"); | 960 | devfreq_class = class_create(THIS_MODULE, "devfreq"); |
@@ -622,7 +962,15 @@ static int __init devfreq_init(void) | |||
622 | pr_err("%s: couldn't create class\n", __FILE__); | 962 | pr_err("%s: couldn't create class\n", __FILE__); |
623 | return PTR_ERR(devfreq_class); | 963 | return PTR_ERR(devfreq_class); |
624 | } | 964 | } |
965 | |||
966 | devfreq_wq = create_freezable_workqueue("devfreq_wq"); | ||
967 | if (IS_ERR(devfreq_wq)) { | ||
968 | class_destroy(devfreq_class); | ||
969 | pr_err("%s: couldn't create workqueue\n", __FILE__); | ||
970 | return PTR_ERR(devfreq_wq); | ||
971 | } | ||
625 | devfreq_class->dev_attrs = devfreq_attrs; | 972 | devfreq_class->dev_attrs = devfreq_attrs; |
973 | |||
626 | return 0; | 974 | return 0; |
627 | } | 975 | } |
628 | subsys_initcall(devfreq_init); | 976 | subsys_initcall(devfreq_init); |
@@ -630,6 +978,7 @@ subsys_initcall(devfreq_init); | |||
630 | static void __exit devfreq_exit(void) | 978 | static void __exit devfreq_exit(void) |
631 | { | 979 | { |
632 | class_destroy(devfreq_class); | 980 | class_destroy(devfreq_class); |
981 | destroy_workqueue(devfreq_wq); | ||
633 | } | 982 | } |
634 | module_exit(devfreq_exit); | 983 | module_exit(devfreq_exit); |
635 | 984 | ||
@@ -641,9 +990,9 @@ module_exit(devfreq_exit); | |||
641 | /** | 990 | /** |
642 | * devfreq_recommended_opp() - Helper function to get proper OPP for the | 991 | * devfreq_recommended_opp() - Helper function to get proper OPP for the |
643 | * freq value given to target callback. | 992 | * freq value given to target callback. |
644 | * @dev The devfreq user device. (parent of devfreq) | 993 | * @dev: The devfreq user device. (parent of devfreq) |
645 | * @freq The frequency given to target function | 994 | * @freq: The frequency given to target function |
646 | * @flags Flags handed from devfreq framework. | 995 | * @flags: Flags handed from devfreq framework. |
647 | * | 996 | * |
648 | */ | 997 | */ |
649 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | 998 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, |
@@ -656,14 +1005,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | |||
656 | opp = opp_find_freq_floor(dev, freq); | 1005 | opp = opp_find_freq_floor(dev, freq); |
657 | 1006 | ||
658 | /* If not available, use the closest opp */ | 1007 | /* If not available, use the closest opp */ |
659 | if (opp == ERR_PTR(-ENODEV)) | 1008 | if (opp == ERR_PTR(-ERANGE)) |
660 | opp = opp_find_freq_ceil(dev, freq); | 1009 | opp = opp_find_freq_ceil(dev, freq); |
661 | } else { | 1010 | } else { |
662 | /* The freq is an lower bound. opp should be higher */ | 1011 | /* The freq is an lower bound. opp should be higher */ |
663 | opp = opp_find_freq_ceil(dev, freq); | 1012 | opp = opp_find_freq_ceil(dev, freq); |
664 | 1013 | ||
665 | /* If not available, use the closest opp */ | 1014 | /* If not available, use the closest opp */ |
666 | if (opp == ERR_PTR(-ENODEV)) | 1015 | if (opp == ERR_PTR(-ERANGE)) |
667 | opp = opp_find_freq_floor(dev, freq); | 1016 | opp = opp_find_freq_floor(dev, freq); |
668 | } | 1017 | } |
669 | 1018 | ||
@@ -674,35 +1023,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | |||
674 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified | 1023 | * devfreq_register_opp_notifier() - Helper function to get devfreq notified |
675 | * for any changes in the OPP availability | 1024 | * for any changes in the OPP availability |
676 | * changes | 1025 | * changes |
677 | * @dev The devfreq user device. (parent of devfreq) | 1026 | * @dev: The devfreq user device. (parent of devfreq) |
678 | * @devfreq The devfreq object. | 1027 | * @devfreq: The devfreq object. |
679 | */ | 1028 | */ |
680 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | 1029 | int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) |
681 | { | 1030 | { |
682 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | 1031 | struct srcu_notifier_head *nh; |
1032 | int ret = 0; | ||
683 | 1033 | ||
1034 | rcu_read_lock(); | ||
1035 | nh = opp_get_notifier(dev); | ||
684 | if (IS_ERR(nh)) | 1036 | if (IS_ERR(nh)) |
685 | return PTR_ERR(nh); | 1037 | ret = PTR_ERR(nh); |
686 | return srcu_notifier_chain_register(nh, &devfreq->nb); | 1038 | rcu_read_unlock(); |
1039 | if (!ret) | ||
1040 | ret = srcu_notifier_chain_register(nh, &devfreq->nb); | ||
1041 | |||
1042 | return ret; | ||
687 | } | 1043 | } |
688 | 1044 | ||
689 | /** | 1045 | /** |
690 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq | 1046 | * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq |
691 | * notified for any changes in the OPP | 1047 | * notified for any changes in the OPP |
692 | * availability changes anymore. | 1048 | * availability changes anymore. |
693 | * @dev The devfreq user device. (parent of devfreq) | 1049 | * @dev: The devfreq user device. (parent of devfreq) |
694 | * @devfreq The devfreq object. | 1050 | * @devfreq: The devfreq object. |
695 | * | 1051 | * |
696 | * At exit() callback of devfreq_dev_profile, this must be included if | 1052 | * At exit() callback of devfreq_dev_profile, this must be included if |
697 | * devfreq_recommended_opp is used. | 1053 | * devfreq_recommended_opp is used. |
698 | */ | 1054 | */ |
699 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | 1055 | int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) |
700 | { | 1056 | { |
701 | struct srcu_notifier_head *nh = opp_get_notifier(dev); | 1057 | struct srcu_notifier_head *nh; |
1058 | int ret = 0; | ||
702 | 1059 | ||
1060 | rcu_read_lock(); | ||
1061 | nh = opp_get_notifier(dev); | ||
703 | if (IS_ERR(nh)) | 1062 | if (IS_ERR(nh)) |
704 | return PTR_ERR(nh); | 1063 | ret = PTR_ERR(nh); |
705 | return srcu_notifier_chain_unregister(nh, &devfreq->nb); | 1064 | rcu_read_unlock(); |
1065 | if (!ret) | ||
1066 | ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); | ||
1067 | |||
1068 | return ret; | ||
706 | } | 1069 | } |
707 | 1070 | ||
708 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | 1071 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); |
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c index 88ddc77a9bb1..741837208716 100644 --- a/drivers/devfreq/exynos4_bus.c +++ b/drivers/devfreq/exynos4_bus.c | |||
@@ -987,7 +987,7 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
987 | struct device *dev = &pdev->dev; | 987 | struct device *dev = &pdev->dev; |
988 | int err = 0; | 988 | int err = 0; |
989 | 989 | ||
990 | data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL); | 990 | data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL); |
991 | if (data == NULL) { | 991 | if (data == NULL) { |
992 | dev_err(dev, "Cannot allocate memory.\n"); | 992 | dev_err(dev, "Cannot allocate memory.\n"); |
993 | return -ENOMEM; | 993 | return -ENOMEM; |
@@ -1012,31 +1012,26 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1012 | err = -EINVAL; | 1012 | err = -EINVAL; |
1013 | } | 1013 | } |
1014 | if (err) | 1014 | if (err) |
1015 | goto err_regulator; | 1015 | return err; |
1016 | 1016 | ||
1017 | data->vdd_int = regulator_get(dev, "vdd_int"); | 1017 | data->vdd_int = devm_regulator_get(dev, "vdd_int"); |
1018 | if (IS_ERR(data->vdd_int)) { | 1018 | if (IS_ERR(data->vdd_int)) { |
1019 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); | 1019 | dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); |
1020 | err = PTR_ERR(data->vdd_int); | 1020 | return PTR_ERR(data->vdd_int); |
1021 | goto err_regulator; | ||
1022 | } | 1021 | } |
1023 | if (data->type == TYPE_BUSF_EXYNOS4x12) { | 1022 | if (data->type == TYPE_BUSF_EXYNOS4x12) { |
1024 | data->vdd_mif = regulator_get(dev, "vdd_mif"); | 1023 | data->vdd_mif = devm_regulator_get(dev, "vdd_mif"); |
1025 | if (IS_ERR(data->vdd_mif)) { | 1024 | if (IS_ERR(data->vdd_mif)) { |
1026 | dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); | 1025 | dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); |
1027 | err = PTR_ERR(data->vdd_mif); | 1026 | return PTR_ERR(data->vdd_mif); |
1028 | regulator_put(data->vdd_int); | ||
1029 | goto err_regulator; | ||
1030 | |||
1031 | } | 1027 | } |
1032 | } | 1028 | } |
1033 | 1029 | ||
1034 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); | 1030 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); |
1035 | if (IS_ERR(opp)) { | 1031 | if (IS_ERR(opp)) { |
1036 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", | 1032 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", |
1037 | exynos4_devfreq_profile.initial_freq); | 1033 | exynos4_devfreq_profile.initial_freq); |
1038 | err = PTR_ERR(opp); | 1034 | return PTR_ERR(opp); |
1039 | goto err_opp_add; | ||
1040 | } | 1035 | } |
1041 | data->curr_opp = opp; | 1036 | data->curr_opp = opp; |
1042 | 1037 | ||
@@ -1045,30 +1040,20 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1045 | busfreq_mon_reset(data); | 1040 | busfreq_mon_reset(data); |
1046 | 1041 | ||
1047 | data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, | 1042 | data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, |
1048 | &devfreq_simple_ondemand, NULL); | 1043 | "simple_ondemand", NULL); |
1049 | if (IS_ERR(data->devfreq)) { | 1044 | if (IS_ERR(data->devfreq)) |
1050 | err = PTR_ERR(data->devfreq); | 1045 | return PTR_ERR(data->devfreq); |
1051 | goto err_opp_add; | ||
1052 | } | ||
1053 | 1046 | ||
1054 | devfreq_register_opp_notifier(dev, data->devfreq); | 1047 | devfreq_register_opp_notifier(dev, data->devfreq); |
1055 | 1048 | ||
1056 | err = register_pm_notifier(&data->pm_notifier); | 1049 | err = register_pm_notifier(&data->pm_notifier); |
1057 | if (err) { | 1050 | if (err) { |
1058 | dev_err(dev, "Failed to setup pm notifier\n"); | 1051 | dev_err(dev, "Failed to setup pm notifier\n"); |
1059 | goto err_devfreq_add; | 1052 | devfreq_remove_device(data->devfreq); |
1053 | return err; | ||
1060 | } | 1054 | } |
1061 | 1055 | ||
1062 | return 0; | 1056 | return 0; |
1063 | err_devfreq_add: | ||
1064 | devfreq_remove_device(data->devfreq); | ||
1065 | err_opp_add: | ||
1066 | if (data->vdd_mif) | ||
1067 | regulator_put(data->vdd_mif); | ||
1068 | regulator_put(data->vdd_int); | ||
1069 | err_regulator: | ||
1070 | kfree(data); | ||
1071 | return err; | ||
1072 | } | 1057 | } |
1073 | 1058 | ||
1074 | static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) | 1059 | static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) |
@@ -1077,10 +1062,6 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) | |||
1077 | 1062 | ||
1078 | unregister_pm_notifier(&data->pm_notifier); | 1063 | unregister_pm_notifier(&data->pm_notifier); |
1079 | devfreq_remove_device(data->devfreq); | 1064 | devfreq_remove_device(data->devfreq); |
1080 | regulator_put(data->vdd_int); | ||
1081 | if (data->vdd_mif) | ||
1082 | regulator_put(data->vdd_mif); | ||
1083 | kfree(data); | ||
1084 | 1065 | ||
1085 | return 0; | 1066 | return 0; |
1086 | } | 1067 | } |
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index ea7f13c58ded..fad7d6321978 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h | |||
@@ -18,7 +18,24 @@ | |||
18 | 18 | ||
19 | #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) | 19 | #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) |
20 | 20 | ||
21 | /* Devfreq events */ | ||
22 | #define DEVFREQ_GOV_START 0x1 | ||
23 | #define DEVFREQ_GOV_STOP 0x2 | ||
24 | #define DEVFREQ_GOV_INTERVAL 0x3 | ||
25 | #define DEVFREQ_GOV_SUSPEND 0x4 | ||
26 | #define DEVFREQ_GOV_RESUME 0x5 | ||
27 | |||
21 | /* Caution: devfreq->lock must be locked before calling update_devfreq */ | 28 | /* Caution: devfreq->lock must be locked before calling update_devfreq */ |
22 | extern int update_devfreq(struct devfreq *devfreq); | 29 | extern int update_devfreq(struct devfreq *devfreq); |
23 | 30 | ||
31 | extern void devfreq_monitor_start(struct devfreq *devfreq); | ||
32 | extern void devfreq_monitor_stop(struct devfreq *devfreq); | ||
33 | extern void devfreq_monitor_suspend(struct devfreq *devfreq); | ||
34 | extern void devfreq_monitor_resume(struct devfreq *devfreq); | ||
35 | extern void devfreq_interval_update(struct devfreq *devfreq, | ||
36 | unsigned int *delay); | ||
37 | |||
38 | extern int devfreq_add_governor(struct devfreq_governor *governor); | ||
39 | extern int devfreq_remove_governor(struct devfreq_governor *governor); | ||
40 | |||
24 | #endif /* _GOVERNOR_H */ | 41 | #endif /* _GOVERNOR_H */ |
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index af75ddd4f158..c72f942f30a8 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/devfreq.h> | 12 | #include <linux/devfreq.h> |
13 | #include <linux/module.h> | ||
13 | #include "governor.h" | 14 | #include "governor.h" |
14 | 15 | ||
15 | static int devfreq_performance_func(struct devfreq *df, | 16 | static int devfreq_performance_func(struct devfreq *df, |
@@ -26,14 +27,41 @@ static int devfreq_performance_func(struct devfreq *df, | |||
26 | return 0; | 27 | return 0; |
27 | } | 28 | } |
28 | 29 | ||
29 | static int performance_init(struct devfreq *devfreq) | 30 | static int devfreq_performance_handler(struct devfreq *devfreq, |
31 | unsigned int event, void *data) | ||
30 | { | 32 | { |
31 | return update_devfreq(devfreq); | 33 | int ret = 0; |
34 | |||
35 | if (event == DEVFREQ_GOV_START) { | ||
36 | mutex_lock(&devfreq->lock); | ||
37 | ret = update_devfreq(devfreq); | ||
38 | mutex_unlock(&devfreq->lock); | ||
39 | } | ||
40 | |||
41 | return ret; | ||
32 | } | 42 | } |
33 | 43 | ||
34 | const struct devfreq_governor devfreq_performance = { | 44 | static struct devfreq_governor devfreq_performance = { |
35 | .name = "performance", | 45 | .name = "performance", |
36 | .init = performance_init, | ||
37 | .get_target_freq = devfreq_performance_func, | 46 | .get_target_freq = devfreq_performance_func, |
38 | .no_central_polling = true, | 47 | .event_handler = devfreq_performance_handler, |
39 | }; | 48 | }; |
49 | |||
50 | static int __init devfreq_performance_init(void) | ||
51 | { | ||
52 | return devfreq_add_governor(&devfreq_performance); | ||
53 | } | ||
54 | subsys_initcall(devfreq_performance_init); | ||
55 | |||
56 | static void __exit devfreq_performance_exit(void) | ||
57 | { | ||
58 | int ret; | ||
59 | |||
60 | ret = devfreq_remove_governor(&devfreq_performance); | ||
61 | if (ret) | ||
62 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
63 | |||
64 | return; | ||
65 | } | ||
66 | module_exit(devfreq_performance_exit); | ||
67 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index fec0cdbd2477..0c6bed567e6d 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/devfreq.h> | 12 | #include <linux/devfreq.h> |
13 | #include <linux/module.h> | ||
13 | #include "governor.h" | 14 | #include "governor.h" |
14 | 15 | ||
15 | static int devfreq_powersave_func(struct devfreq *df, | 16 | static int devfreq_powersave_func(struct devfreq *df, |
@@ -23,14 +24,41 @@ static int devfreq_powersave_func(struct devfreq *df, | |||
23 | return 0; | 24 | return 0; |
24 | } | 25 | } |
25 | 26 | ||
26 | static int powersave_init(struct devfreq *devfreq) | 27 | static int devfreq_powersave_handler(struct devfreq *devfreq, |
28 | unsigned int event, void *data) | ||
27 | { | 29 | { |
28 | return update_devfreq(devfreq); | 30 | int ret = 0; |
31 | |||
32 | if (event == DEVFREQ_GOV_START) { | ||
33 | mutex_lock(&devfreq->lock); | ||
34 | ret = update_devfreq(devfreq); | ||
35 | mutex_unlock(&devfreq->lock); | ||
36 | } | ||
37 | |||
38 | return ret; | ||
29 | } | 39 | } |
30 | 40 | ||
31 | const struct devfreq_governor devfreq_powersave = { | 41 | static struct devfreq_governor devfreq_powersave = { |
32 | .name = "powersave", | 42 | .name = "powersave", |
33 | .init = powersave_init, | ||
34 | .get_target_freq = devfreq_powersave_func, | 43 | .get_target_freq = devfreq_powersave_func, |
35 | .no_central_polling = true, | 44 | .event_handler = devfreq_powersave_handler, |
36 | }; | 45 | }; |
46 | |||
47 | static int __init devfreq_powersave_init(void) | ||
48 | { | ||
49 | return devfreq_add_governor(&devfreq_powersave); | ||
50 | } | ||
51 | subsys_initcall(devfreq_powersave_init); | ||
52 | |||
53 | static void __exit devfreq_powersave_exit(void) | ||
54 | { | ||
55 | int ret; | ||
56 | |||
57 | ret = devfreq_remove_governor(&devfreq_powersave); | ||
58 | if (ret) | ||
59 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
60 | |||
61 | return; | ||
62 | } | ||
63 | module_exit(devfreq_powersave_exit); | ||
64 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index a2e3eae79011..0720ba84ca92 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c | |||
@@ -10,8 +10,10 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/module.h> | ||
13 | #include <linux/devfreq.h> | 14 | #include <linux/devfreq.h> |
14 | #include <linux/math64.h> | 15 | #include <linux/math64.h> |
16 | #include "governor.h" | ||
15 | 17 | ||
16 | /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ | 18 | /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ |
17 | #define DFSO_UPTHRESHOLD (90) | 19 | #define DFSO_UPTHRESHOLD (90) |
@@ -88,7 +90,58 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, | |||
88 | return 0; | 90 | return 0; |
89 | } | 91 | } |
90 | 92 | ||
91 | const struct devfreq_governor devfreq_simple_ondemand = { | 93 | static int devfreq_simple_ondemand_handler(struct devfreq *devfreq, |
94 | unsigned int event, void *data) | ||
95 | { | ||
96 | switch (event) { | ||
97 | case DEVFREQ_GOV_START: | ||
98 | devfreq_monitor_start(devfreq); | ||
99 | break; | ||
100 | |||
101 | case DEVFREQ_GOV_STOP: | ||
102 | devfreq_monitor_stop(devfreq); | ||
103 | break; | ||
104 | |||
105 | case DEVFREQ_GOV_INTERVAL: | ||
106 | devfreq_interval_update(devfreq, (unsigned int *)data); | ||
107 | break; | ||
108 | |||
109 | case DEVFREQ_GOV_SUSPEND: | ||
110 | devfreq_monitor_suspend(devfreq); | ||
111 | break; | ||
112 | |||
113 | case DEVFREQ_GOV_RESUME: | ||
114 | devfreq_monitor_resume(devfreq); | ||
115 | break; | ||
116 | |||
117 | default: | ||
118 | break; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct devfreq_governor devfreq_simple_ondemand = { | ||
92 | .name = "simple_ondemand", | 125 | .name = "simple_ondemand", |
93 | .get_target_freq = devfreq_simple_ondemand_func, | 126 | .get_target_freq = devfreq_simple_ondemand_func, |
127 | .event_handler = devfreq_simple_ondemand_handler, | ||
94 | }; | 128 | }; |
129 | |||
130 | static int __init devfreq_simple_ondemand_init(void) | ||
131 | { | ||
132 | return devfreq_add_governor(&devfreq_simple_ondemand); | ||
133 | } | ||
134 | subsys_initcall(devfreq_simple_ondemand_init); | ||
135 | |||
136 | static void __exit devfreq_simple_ondemand_exit(void) | ||
137 | { | ||
138 | int ret; | ||
139 | |||
140 | ret = devfreq_remove_governor(&devfreq_simple_ondemand); | ||
141 | if (ret) | ||
142 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
143 | |||
144 | return; | ||
145 | } | ||
146 | module_exit(devfreq_simple_ondemand_exit); | ||
147 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 0681246fc89d..35de6e83c1fe 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/devfreq.h> | 14 | #include <linux/devfreq.h> |
15 | #include <linux/pm.h> | 15 | #include <linux/pm.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/module.h> | ||
17 | #include "governor.h" | 18 | #include "governor.h" |
18 | 19 | ||
19 | struct userspace_data { | 20 | struct userspace_data { |
@@ -116,10 +117,46 @@ static void userspace_exit(struct devfreq *devfreq) | |||
116 | devfreq->data = NULL; | 117 | devfreq->data = NULL; |
117 | } | 118 | } |
118 | 119 | ||
119 | const struct devfreq_governor devfreq_userspace = { | 120 | static int devfreq_userspace_handler(struct devfreq *devfreq, |
121 | unsigned int event, void *data) | ||
122 | { | ||
123 | int ret = 0; | ||
124 | |||
125 | switch (event) { | ||
126 | case DEVFREQ_GOV_START: | ||
127 | ret = userspace_init(devfreq); | ||
128 | break; | ||
129 | case DEVFREQ_GOV_STOP: | ||
130 | userspace_exit(devfreq); | ||
131 | break; | ||
132 | default: | ||
133 | break; | ||
134 | } | ||
135 | |||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static struct devfreq_governor devfreq_userspace = { | ||
120 | .name = "userspace", | 140 | .name = "userspace", |
121 | .get_target_freq = devfreq_userspace_func, | 141 | .get_target_freq = devfreq_userspace_func, |
122 | .init = userspace_init, | 142 | .event_handler = devfreq_userspace_handler, |
123 | .exit = userspace_exit, | ||
124 | .no_central_polling = true, | ||
125 | }; | 143 | }; |
144 | |||
145 | static int __init devfreq_userspace_init(void) | ||
146 | { | ||
147 | return devfreq_add_governor(&devfreq_userspace); | ||
148 | } | ||
149 | subsys_initcall(devfreq_userspace_init); | ||
150 | |||
151 | static void __exit devfreq_userspace_exit(void) | ||
152 | { | ||
153 | int ret; | ||
154 | |||
155 | ret = devfreq_remove_governor(&devfreq_userspace); | ||
156 | if (ret) | ||
157 | pr_err("%s: failed remove governor %d\n", __func__, ret); | ||
158 | |||
159 | return; | ||
160 | } | ||
161 | module_exit(devfreq_userspace_exit); | ||
162 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8d4804732bac..8c4139647efc 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * detection. The mods to Rev F required more family | 33 | * detection. The mods to Rev F required more family |
34 | * information detection. | 34 | * information detection. |
35 | * | 35 | * |
36 | * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: | 36 | * Changes/Fixes by Borislav Petkov <bp@alien8.de>: |
37 | * - misc fixes and code cleanups | 37 | * - misc fixes and code cleanups |
38 | * | 38 | * |
39 | * This module is based on the following documents | 39 | * This module is based on the following documents |
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 6c86f6e54558..351945fa2ecd 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * 2007 (c) MontaVista Software, Inc. | 6 | * 2007 (c) MontaVista Software, Inc. |
7 | * 2010 (c) Advanced Micro Devices Inc. | 7 | * 2010 (c) Advanced Micro Devices Inc. |
8 | * Borislav Petkov <borislav.petkov@amd.com> | 8 | * Borislav Petkov <bp@alien8.de> |
9 | * | 9 | * |
10 | * This file is licensed under the terms of the GNU General Public | 10 | * This file is licensed under the terms of the GNU General Public |
11 | * License version 2. This program is licensed "as is" without any | 11 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 66b5151c1080..2ae78f20cc28 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * This file may be distributed under the terms of the GNU General Public | 6 | * This file may be distributed under the terms of the GNU General Public |
7 | * License version 2. | 7 | * License version 2. |
8 | * | 8 | * |
9 | * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> | 9 | * Copyright (c) 2010: Borislav Petkov <bp@alien8.de> |
10 | * Advanced Micro Devices Inc. | 10 | * Advanced Micro Devices Inc. |
11 | */ | 11 | */ |
12 | 12 | ||
@@ -168,6 +168,6 @@ module_init(edac_init_mce_inject); | |||
168 | module_exit(edac_exit_mce_inject); | 168 | module_exit(edac_exit_mce_inject); |
169 | 169 | ||
170 | MODULE_LICENSE("GPL"); | 170 | MODULE_LICENSE("GPL"); |
171 | MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); | 171 | MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); |
172 | MODULE_AUTHOR("AMD Inc."); | 172 | MODULE_AUTHOR("AMD Inc."); |
173 | MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); | 173 | MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1162d6b3bf85..bb1b392f5cda 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | |||
1546 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1546 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1547 | 1547 | ||
1548 | sdev->use_10_for_rw = 1; | 1548 | sdev->use_10_for_rw = 1; |
1549 | sdev->no_report_opcodes = 1; | ||
1550 | sdev->no_write_same = 1; | ||
1549 | 1551 | ||
1550 | if (sbp2_param_exclusive_login) | 1552 | if (sbp2_param_exclusive_login) |
1551 | sdev->manage_start_stop = 1; | 1553 | sdev->manage_start_stop = 1; |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index f11d8e3b4041..f16557690cfd 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -49,6 +49,10 @@ config OF_GPIO | |||
49 | def_bool y | 49 | def_bool y |
50 | depends on OF | 50 | depends on OF |
51 | 51 | ||
52 | config GPIO_ACPI | ||
53 | def_bool y | ||
54 | depends on ACPI | ||
55 | |||
52 | config DEBUG_GPIO | 56 | config DEBUG_GPIO |
53 | bool "Debug GPIO calls" | 57 | bool "Debug GPIO calls" |
54 | depends on DEBUG_KERNEL | 58 | depends on DEBUG_KERNEL |
@@ -466,7 +470,7 @@ config GPIO_ADP5588_IRQ | |||
466 | 470 | ||
467 | config GPIO_ADNP | 471 | config GPIO_ADNP |
468 | tristate "Avionic Design N-bit GPIO expander" | 472 | tristate "Avionic Design N-bit GPIO expander" |
469 | depends on I2C && OF | 473 | depends on I2C && OF_GPIO |
470 | help | 474 | help |
471 | This option enables support for N GPIOs found on Avionic Design | 475 | This option enables support for N GPIOs found on Avionic Design |
472 | I2C GPIO expanders. The register space will be extended by powers | 476 | I2C GPIO expanders. The register space will be extended by powers |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 9aeed6707326..420dbaca05f1 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | |||
4 | 4 | ||
5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o | 5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o |
6 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o | 6 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o |
7 | obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o | ||
7 | 8 | ||
8 | # Device drivers. Generally keep list sorted alphabetically | 9 | # Device drivers. Generally keep list sorted alphabetically |
9 | obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o | 10 | obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o |
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 0f425189de11..ce1c84760076 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c | |||
@@ -77,7 +77,7 @@ struct mcp23s08_driver_data { | |||
77 | 77 | ||
78 | /*----------------------------------------------------------------------*/ | 78 | /*----------------------------------------------------------------------*/ |
79 | 79 | ||
80 | #ifdef CONFIG_I2C | 80 | #if IS_ENABLED(CONFIG_I2C) |
81 | 81 | ||
82 | static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg) | 82 | static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg) |
83 | { | 83 | { |
@@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
399 | break; | 399 | break; |
400 | #endif /* CONFIG_SPI_MASTER */ | 400 | #endif /* CONFIG_SPI_MASTER */ |
401 | 401 | ||
402 | #ifdef CONFIG_I2C | 402 | #if IS_ENABLED(CONFIG_I2C) |
403 | case MCP_TYPE_008: | 403 | case MCP_TYPE_008: |
404 | mcp->ops = &mcp23008_ops; | 404 | mcp->ops = &mcp23008_ops; |
405 | mcp->chip.ngpio = 8; | 405 | mcp->chip.ngpio = 8; |
@@ -473,7 +473,7 @@ fail: | |||
473 | 473 | ||
474 | /*----------------------------------------------------------------------*/ | 474 | /*----------------------------------------------------------------------*/ |
475 | 475 | ||
476 | #ifdef CONFIG_I2C | 476 | #if IS_ENABLED(CONFIG_I2C) |
477 | 477 | ||
478 | static int __devinit mcp230xx_probe(struct i2c_client *client, | 478 | static int __devinit mcp230xx_probe(struct i2c_client *client, |
479 | const struct i2c_device_id *id) | 479 | const struct i2c_device_id *id) |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index cf7afb9eb61a..be65c0451ad5 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
@@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip) | |||
92 | return mvchip->membase + GPIO_OUT_OFF; | 92 | return mvchip->membase + GPIO_OUT_OFF; |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip) | ||
96 | { | ||
97 | return mvchip->membase + GPIO_BLINK_EN_OFF; | ||
98 | } | ||
99 | |||
95 | static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) | 100 | static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) |
96 | { | 101 | { |
97 | return mvchip->membase + GPIO_IO_CONF_OFF; | 102 | return mvchip->membase + GPIO_IO_CONF_OFF; |
@@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin) | |||
206 | return (u >> pin) & 1; | 211 | return (u >> pin) & 1; |
207 | } | 212 | } |
208 | 213 | ||
214 | static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value) | ||
215 | { | ||
216 | struct mvebu_gpio_chip *mvchip = | ||
217 | container_of(chip, struct mvebu_gpio_chip, chip); | ||
218 | unsigned long flags; | ||
219 | u32 u; | ||
220 | |||
221 | spin_lock_irqsave(&mvchip->lock, flags); | ||
222 | u = readl_relaxed(mvebu_gpioreg_blink(mvchip)); | ||
223 | if (value) | ||
224 | u |= 1 << pin; | ||
225 | else | ||
226 | u &= ~(1 << pin); | ||
227 | writel_relaxed(u, mvebu_gpioreg_blink(mvchip)); | ||
228 | spin_unlock_irqrestore(&mvchip->lock, flags); | ||
229 | } | ||
230 | |||
209 | static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) | 231 | static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) |
210 | { | 232 | { |
211 | struct mvebu_gpio_chip *mvchip = | 233 | struct mvebu_gpio_chip *mvchip = |
@@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin, | |||
244 | if (ret) | 266 | if (ret) |
245 | return ret; | 267 | return ret; |
246 | 268 | ||
269 | mvebu_gpio_blink(chip, pin, 0); | ||
247 | mvebu_gpio_set(chip, pin, value); | 270 | mvebu_gpio_set(chip, pin, value); |
248 | 271 | ||
249 | spin_lock_irqsave(&mvchip->lock, flags); | 272 | spin_lock_irqsave(&mvchip->lock, flags); |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c new file mode 100644 index 000000000000..cbad6e908d30 --- /dev/null +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * ACPI helpers for GPIO API | ||
3 | * | ||
4 | * Copyright (C) 2012, Intel Corporation | ||
5 | * Authors: Mathias Nyman <mathias.nyman@linux.intel.com> | ||
6 | * Mika Westerberg <mika.westerberg@linux.intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/gpio.h> | ||
15 | #include <linux/export.h> | ||
16 | #include <linux/acpi_gpio.h> | ||
17 | #include <linux/acpi.h> | ||
18 | |||
19 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | ||
20 | { | ||
21 | if (!gc->dev) | ||
22 | return false; | ||
23 | |||
24 | return ACPI_HANDLE(gc->dev) == data; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * acpi_get_gpio() - Translate ACPI GPIO pin to GPIO number usable with GPIO API | ||
29 | * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1") | ||
30 | * @pin: ACPI GPIO pin number (0-based, controller-relative) | ||
31 | * | ||
32 | * Returns GPIO number to use with Linux generic GPIO API, or errno error value | ||
33 | */ | ||
34 | |||
35 | int acpi_get_gpio(char *path, int pin) | ||
36 | { | ||
37 | struct gpio_chip *chip; | ||
38 | acpi_handle handle; | ||
39 | acpi_status status; | ||
40 | |||
41 | status = acpi_get_handle(NULL, path, &handle); | ||
42 | if (ACPI_FAILURE(status)) | ||
43 | return -ENODEV; | ||
44 | |||
45 | chip = gpiochip_find(handle, acpi_gpiochip_find); | ||
46 | if (!chip) | ||
47 | return -ENODEV; | ||
48 | |||
49 | if (!gpio_is_valid(chip->base + pin)) | ||
50 | return -EINVAL; | ||
51 | |||
52 | return chip->base + pin; | ||
53 | } | ||
54 | EXPORT_SYMBOL_GPL(acpi_get_gpio); | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b726b478a4f5..6345878ae1e7 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode) | |||
143 | int old_dpms; | 143 | int old_dpms; |
144 | 144 | ||
145 | /* PCH platforms and VLV only support on/off. */ | 145 | /* PCH platforms and VLV only support on/off. */ |
146 | if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) | 146 | if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON) |
147 | mode = DRM_MODE_DPMS_OFF; | 147 | mode = DRM_MODE_DPMS_OFF; |
148 | 148 | ||
149 | if (mode == connector->dpms) | 149 | if (mode == connector->dpms) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 461a637f1ef7..4154bcd7a070 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3841,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
3841 | } | 3841 | } |
3842 | } | 3842 | } |
3843 | 3843 | ||
3844 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { | ||
3845 | /* Use VBT settings if we have an eDP panel */ | ||
3846 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; | ||
3847 | |||
3848 | if (edp_bpc < display_bpc) { | ||
3849 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); | ||
3850 | display_bpc = edp_bpc; | ||
3851 | } | ||
3852 | continue; | ||
3853 | } | ||
3854 | |||
3844 | /* | 3855 | /* |
3845 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak | 3856 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
3846 | * through, clamp it down. (Note: >12bpc will be caught below.) | 3857 | * through, clamp it down. (Note: >12bpc will be caught below.) |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 79d308da29ff..c600fb06e25e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2382,6 +2382,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) | |||
2382 | return true; | 2382 | return true; |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) | ||
2386 | { | ||
2387 | struct drm_device *dev = intel_sdvo->base.base.dev; | ||
2388 | struct drm_connector *connector, *tmp; | ||
2389 | |||
2390 | list_for_each_entry_safe(connector, tmp, | ||
2391 | &dev->mode_config.connector_list, head) { | ||
2392 | if (intel_attached_encoder(connector) == &intel_sdvo->base) | ||
2393 | intel_sdvo_destroy(connector); | ||
2394 | } | ||
2395 | } | ||
2396 | |||
2385 | static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, | 2397 | static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, |
2386 | struct intel_sdvo_connector *intel_sdvo_connector, | 2398 | struct intel_sdvo_connector *intel_sdvo_connector, |
2387 | int type) | 2399 | int type) |
@@ -2705,7 +2717,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2705 | intel_sdvo->caps.output_flags) != true) { | 2717 | intel_sdvo->caps.output_flags) != true) { |
2706 | DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", | 2718 | DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", |
2707 | SDVO_NAME(intel_sdvo)); | 2719 | SDVO_NAME(intel_sdvo)); |
2708 | goto err; | 2720 | /* Output_setup can leave behind connectors! */ |
2721 | goto err_output; | ||
2709 | } | 2722 | } |
2710 | 2723 | ||
2711 | /* Only enable the hotplug irq if we need it, to work around noisy | 2724 | /* Only enable the hotplug irq if we need it, to work around noisy |
@@ -2718,12 +2731,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2718 | 2731 | ||
2719 | /* Set the input timing to the screen. Assume always input 0. */ | 2732 | /* Set the input timing to the screen. Assume always input 0. */ |
2720 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 2733 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
2721 | goto err; | 2734 | goto err_output; |
2722 | 2735 | ||
2723 | if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, | 2736 | if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, |
2724 | &intel_sdvo->pixel_clock_min, | 2737 | &intel_sdvo->pixel_clock_min, |
2725 | &intel_sdvo->pixel_clock_max)) | 2738 | &intel_sdvo->pixel_clock_max)) |
2726 | goto err; | 2739 | goto err_output; |
2727 | 2740 | ||
2728 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " | 2741 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " |
2729 | "clock range %dMHz - %dMHz, " | 2742 | "clock range %dMHz - %dMHz, " |
@@ -2743,6 +2756,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2743 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); | 2756 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); |
2744 | return true; | 2757 | return true; |
2745 | 2758 | ||
2759 | err_output: | ||
2760 | intel_sdvo_output_cleanup(intel_sdvo); | ||
2761 | |||
2746 | err: | 2762 | err: |
2747 | drm_encoder_cleanup(&intel_encoder->base); | 2763 | drm_encoder_cleanup(&intel_encoder->base); |
2748 | i2c_del_adapter(&intel_sdvo->ddc); | 2764 | i2c_del_adapter(&intel_sdvo->ddc); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 05a909a17cee..15b182c84ce8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
@@ -49,13 +49,7 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) | |||
49 | if (chan->vblank.crtc != crtc) | 49 | if (chan->vblank.crtc != crtc) |
50 | continue; | 50 | continue; |
51 | 51 | ||
52 | if (nv_device(priv)->chipset == 0x50) { | 52 | if (nv_device(priv)->chipset >= 0xc0) { |
53 | nv_wr32(priv, 0x001704, chan->vblank.channel); | ||
54 | nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); | ||
55 | bar->flush(bar); | ||
56 | nv_wr32(priv, 0x001570, chan->vblank.offset); | ||
57 | nv_wr32(priv, 0x001574, chan->vblank.value); | ||
58 | } else { | ||
59 | nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); | 53 | nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); |
60 | bar->flush(bar); | 54 | bar->flush(bar); |
61 | nv_wr32(priv, 0x06000c, | 55 | nv_wr32(priv, 0x06000c, |
@@ -63,6 +57,17 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) | |||
63 | nv_wr32(priv, 0x060010, | 57 | nv_wr32(priv, 0x060010, |
64 | lower_32_bits(chan->vblank.offset)); | 58 | lower_32_bits(chan->vblank.offset)); |
65 | nv_wr32(priv, 0x060014, chan->vblank.value); | 59 | nv_wr32(priv, 0x060014, chan->vblank.value); |
60 | } else { | ||
61 | nv_wr32(priv, 0x001704, chan->vblank.channel); | ||
62 | nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); | ||
63 | bar->flush(bar); | ||
64 | if (nv_device(priv)->chipset == 0x50) { | ||
65 | nv_wr32(priv, 0x001570, chan->vblank.offset); | ||
66 | nv_wr32(priv, 0x001574, chan->vblank.value); | ||
67 | } else { | ||
68 | nv_wr32(priv, 0x060010, chan->vblank.offset); | ||
69 | nv_wr32(priv, 0x060014, chan->vblank.value); | ||
70 | } | ||
66 | } | 71 | } |
67 | 72 | ||
68 | list_del(&chan->vblank.head); | 73 | list_del(&chan->vblank.head); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c index e45035efb8ca..7bbb1e1b7a8d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c | |||
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) | |||
669 | }); | 669 | }); |
670 | } | 670 | } |
671 | 671 | ||
672 | void | 672 | int |
673 | nv40_grctx_init(struct nouveau_device *device, u32 *size) | 673 | nv40_grctx_init(struct nouveau_device *device, u32 *size) |
674 | { | 674 | { |
675 | u32 ctxprog[256], i; | 675 | u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i; |
676 | struct nouveau_grctx ctx = { | 676 | struct nouveau_grctx ctx = { |
677 | .device = device, | 677 | .device = device, |
678 | .mode = NOUVEAU_GRCTX_PROG, | 678 | .mode = NOUVEAU_GRCTX_PROG, |
679 | .data = ctxprog, | 679 | .data = ctxprog, |
680 | .ctxprog_max = ARRAY_SIZE(ctxprog) | 680 | .ctxprog_max = 256, |
681 | }; | 681 | }; |
682 | 682 | ||
683 | if (!ctxprog) | ||
684 | return -ENOMEM; | ||
685 | |||
683 | nv40_grctx_generate(&ctx); | 686 | nv40_grctx_generate(&ctx); |
684 | 687 | ||
685 | nv_wr32(device, 0x400324, 0); | 688 | nv_wr32(device, 0x400324, 0); |
686 | for (i = 0; i < ctx.ctxprog_len; i++) | 689 | for (i = 0; i < ctx.ctxprog_len; i++) |
687 | nv_wr32(device, 0x400328, ctxprog[i]); | 690 | nv_wr32(device, 0x400328, ctxprog[i]); |
688 | *size = ctx.ctxvals_pos * 4; | 691 | *size = ctx.ctxvals_pos * 4; |
692 | |||
693 | kfree(ctxprog); | ||
694 | return 0; | ||
689 | } | 695 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 425001204a89..cc6574eeb80e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c | |||
@@ -346,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object) | |||
346 | return ret; | 346 | return ret; |
347 | 347 | ||
348 | /* generate and upload context program */ | 348 | /* generate and upload context program */ |
349 | nv40_grctx_init(nv_device(priv), &priv->size); | 349 | ret = nv40_grctx_init(nv_device(priv), &priv->size); |
350 | if (ret) | ||
351 | return ret; | ||
350 | 352 | ||
351 | /* No context present currently */ | 353 | /* No context present currently */ |
352 | nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | 354 | nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h index d2ac975afc2e..7da35a4e7970 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h | |||
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv) | |||
15 | return !(0x0baf & (1 << (device->chipset & 0x0f))); | 15 | return !(0x0baf & (1 << (device->chipset & 0x0f))); |
16 | } | 16 | } |
17 | 17 | ||
18 | void nv40_grctx_init(struct nouveau_device *, u32 *size); | 18 | int nv40_grctx_init(struct nouveau_device *, u32 *size); |
19 | void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); | 19 | void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 818feabbf4a0..486f1a9217fd 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h | |||
@@ -175,14 +175,18 @@ nv_mo32(void *obj, u32 addr, u32 mask, u32 data) | |||
175 | return temp; | 175 | return temp; |
176 | } | 176 | } |
177 | 177 | ||
178 | static inline bool | 178 | static inline int |
179 | nv_strncmp(void *obj, u32 addr, u32 len, const char *str) | 179 | nv_memcmp(void *obj, u32 addr, const char *str, u32 len) |
180 | { | 180 | { |
181 | unsigned char c1, c2; | ||
182 | |||
181 | while (len--) { | 183 | while (len--) { |
182 | if (nv_ro08(obj, addr++) != *(str++)) | 184 | c1 = nv_ro08(obj, addr++); |
183 | return false; | 185 | c2 = *(str++); |
186 | if (c1 != c2) | ||
187 | return c1 - c2; | ||
184 | } | 188 | } |
185 | return true; | 189 | return 0; |
186 | } | 190 | } |
187 | 191 | ||
188 | #endif | 192 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index 39e73b91d360..41b7a6a76f19 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h | |||
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, | |||
54 | int clk, struct nouveau_pll_vals *); | 54 | int clk, struct nouveau_pll_vals *); |
55 | int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, | 55 | int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, |
56 | struct nouveau_pll_vals *); | 56 | struct nouveau_pll_vals *); |
57 | 57 | int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, | |
58 | int clk, struct nouveau_pll_vals *); | ||
58 | 59 | ||
59 | #endif | 60 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 7d750382a833..c51197157749 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c | |||
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | |||
64 | } | 64 | } |
65 | } else | 65 | } else |
66 | if (*ver >= 0x15) { | 66 | if (*ver >= 0x15) { |
67 | if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) { | 67 | if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { |
68 | u16 i2c = nv_ro16(bios, dcb + 2); | 68 | u16 i2c = nv_ro16(bios, dcb + 2); |
69 | *hdr = 4; | 69 | *hdr = 4; |
70 | *cnt = (i2c - dcb) / 10; | 70 | *cnt = (i2c - dcb) / 10; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index cc8d7d162d7c..9068c98b96f6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c | |||
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) | |||
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | 68 | ||
69 | int | ||
70 | nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, | ||
71 | int clk, struct nouveau_pll_vals *pv) | ||
72 | { | ||
73 | int ret, N, M, P; | ||
74 | |||
75 | ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P); | ||
76 | |||
77 | if (ret > 0) { | ||
78 | pv->refclk = info->refclk; | ||
79 | pv->N1 = N; | ||
80 | pv->M1 = M; | ||
81 | pv->log2P = P; | ||
82 | } | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | |||
69 | static int | 87 | static int |
70 | nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 88 | nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
71 | struct nouveau_oclass *oclass, void *data, u32 size, | 89 | struct nouveau_oclass *oclass, void *data, u32 size, |
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
80 | return ret; | 98 | return ret; |
81 | 99 | ||
82 | priv->base.pll_set = nva3_clock_pll_set; | 100 | priv->base.pll_set = nva3_clock_pll_set; |
101 | priv->base.pll_calc = nva3_clock_pll_calc; | ||
83 | return 0; | 102 | return 0; |
84 | } | 103 | } |
85 | 104 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index 5ccce0b17bf3..f6962c9b6c36 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c | |||
@@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
79 | return ret; | 79 | return ret; |
80 | 80 | ||
81 | priv->base.pll_set = nvc0_clock_pll_set; | 81 | priv->base.pll_set = nvc0_clock_pll_set; |
82 | priv->base.pll_calc = nva3_clock_pll_calc; | ||
82 | return 0; | 83 | return 0; |
83 | } | 84 | } |
84 | 85 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index cc79c796afee..cbf1fc60a386 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -241,6 +241,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
241 | 241 | ||
242 | if (unlikely(!abi16)) | 242 | if (unlikely(!abi16)) |
243 | return -ENOMEM; | 243 | return -ENOMEM; |
244 | |||
245 | if (!drm->channel) | ||
246 | return nouveau_abi16_put(abi16, -ENODEV); | ||
247 | |||
244 | client = nv_client(abi16->client); | 248 | client = nv_client(abi16->client); |
245 | 249 | ||
246 | if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) | 250 | if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0910125cbbc3..8503b2ea570a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -129,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
129 | 129 | ||
130 | /* initialise synchronisation routines */ | 130 | /* initialise synchronisation routines */ |
131 | if (device->card_type < NV_10) ret = nv04_fence_create(drm); | 131 | if (device->card_type < NV_10) ret = nv04_fence_create(drm); |
132 | else if (device->chipset < 0x84) ret = nv10_fence_create(drm); | 132 | else if (device->card_type < NV_50) ret = nv10_fence_create(drm); |
133 | else if (device->chipset < 0x84) ret = nv50_fence_create(drm); | ||
133 | else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); | 134 | else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); |
134 | else ret = nvc0_fence_create(drm); | 135 | else ret = nvc0_fence_create(drm); |
135 | if (ret) { | 136 | if (ret) { |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index ba498f8e47a2..010bae19554a 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1625 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | 1625 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); |
1626 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | 1626 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1627 | /* some early dce3.2 boards have a bug in their transmitter control table */ | 1627 | /* some early dce3.2 boards have a bug in their transmitter control table */ |
1628 | if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) | 1628 | if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) |
1629 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1629 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
1630 | } | 1630 | } |
1631 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | 1631 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index af31f829f4a8..219942c660d7 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav | |||
1330 | break; | 1330 | break; |
1331 | udelay(1); | 1331 | udelay(1); |
1332 | } | 1332 | } |
1333 | } else { | ||
1334 | save->crtc_enabled[i] = false; | ||
1333 | } | 1335 | } |
1334 | } | 1336 | } |
1335 | 1337 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 10ea17a6b2a6..42433344cb1b 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { | |||
69 | /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ | 69 | /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ |
70 | { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, | 70 | { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, |
71 | PCI_VENDOR_ID_DELL, 0x00e3, 2}, | 71 | PCI_VENDOR_ID_DELL, 0x00e3, 2}, |
72 | /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ | 72 | /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ |
73 | { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, | 73 | { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, |
74 | PCI_VENDOR_ID_DELL, 0x0149, 1}, | 74 | PCI_VENDOR_ID_DELL, 0x0149, 1}, |
75 | /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ | ||
76 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, | ||
77 | PCI_VENDOR_ID_IBM, 0x0531, 1}, | ||
75 | /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ | 78 | /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ |
76 | { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, | 79 | { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, |
77 | 0x1025, 0x0061, 1}, | 80 | 0x1025, 0x0061, 1}, |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 860dc4813e99..bd2a3b40cd12 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
749 | /* clear the pages coming from the pool if requested */ | 749 | /* clear the pages coming from the pool if requested */ |
750 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | 750 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { |
751 | list_for_each_entry(p, &plist, lru) { | 751 | list_for_each_entry(p, &plist, lru) { |
752 | clear_page(page_address(p)); | 752 | if (PageHighMem(p)) |
753 | clear_highpage(p); | ||
754 | else | ||
755 | clear_page(page_address(p)); | ||
753 | } | 756 | } |
754 | } | 757 | } |
755 | 758 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index bf8260133ea9..7d759a430294 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) | |||
308 | if (unlikely(to_page == NULL)) | 308 | if (unlikely(to_page == NULL)) |
309 | goto out_err; | 309 | goto out_err; |
310 | 310 | ||
311 | preempt_disable(); | ||
312 | copy_highpage(to_page, from_page); | 311 | copy_highpage(to_page, from_page); |
313 | preempt_enable(); | ||
314 | page_cache_release(from_page); | 312 | page_cache_release(from_page); |
315 | } | 313 | } |
316 | 314 | ||
@@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) | |||
358 | ret = PTR_ERR(to_page); | 356 | ret = PTR_ERR(to_page); |
359 | goto out_err; | 357 | goto out_err; |
360 | } | 358 | } |
361 | preempt_disable(); | ||
362 | copy_highpage(to_page, from_page); | 359 | copy_highpage(to_page, from_page); |
363 | preempt_enable(); | ||
364 | set_page_dirty(to_page); | 360 | set_page_dirty(to_page); |
365 | mark_page_accessed(to_page); | 361 | mark_page_accessed(to_page); |
366 | page_cache_release(to_page); | 362 | page_cache_release(to_page); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b07ca2e4d04b..7290811f89be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
110 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 110 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); |
111 | 111 | ||
112 | ret = copy_to_user(buffer, bounce, size); | 112 | ret = copy_to_user(buffer, bounce, size); |
113 | if (ret) | ||
114 | ret = -EFAULT; | ||
113 | vfree(bounce); | 115 | vfree(bounce); |
114 | 116 | ||
115 | if (unlikely(ret != 0)) | 117 | if (unlikely(ret != 0)) |
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index f676c01bb471..6fcd466d0825 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c | |||
@@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
46 | rdesc[559] = 0x45; | 46 | rdesc[559] = 0x45; |
47 | } | 47 | } |
48 | /* the same as above (s/usage/physical/) */ | 48 | /* the same as above (s/usage/physical/) */ |
49 | if ((quirks & MS_RDESC_3K) && *rsize == 106 && | 49 | if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 && |
50 | !memcmp((char []){ 0x19, 0x00, 0x29, 0xff }, | 50 | rdesc[95] == 0x00 && rdesc[96] == 0x29 && |
51 | &rdesc[94], 4)) { | 51 | rdesc[97] == 0xff) { |
52 | rdesc[94] = 0x35; | 52 | rdesc[94] = 0x35; |
53 | rdesc[96] = 0x45; | 53 | rdesc[96] = 0x45; |
54 | } | 54 | } |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index aa59a254be2c..c02bf208084f 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */ | 39 | #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */ |
40 | #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */ | 40 | #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */ |
41 | #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */ | 41 | #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */ |
42 | #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */ | ||
42 | #define AT91_TWI_SWRST 0x0080 /* Software Reset */ | 43 | #define AT91_TWI_SWRST 0x0080 /* Software Reset */ |
43 | 44 | ||
44 | #define AT91_TWI_MMR 0x0004 /* Master Mode Register */ | 45 | #define AT91_TWI_MMR 0x0004 /* Master Mode Register */ |
@@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
212 | 213 | ||
213 | INIT_COMPLETION(dev->cmd_complete); | 214 | INIT_COMPLETION(dev->cmd_complete); |
214 | dev->transfer_status = 0; | 215 | dev->transfer_status = 0; |
215 | if (dev->msg->flags & I2C_M_RD) { | 216 | |
217 | if (!dev->buf_len) { | ||
218 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); | ||
219 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); | ||
220 | } else if (dev->msg->flags & I2C_M_RD) { | ||
216 | unsigned start_flags = AT91_TWI_START; | 221 | unsigned start_flags = AT91_TWI_START; |
217 | 222 | ||
218 | if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { | 223 | if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 286ca1917820..0670da79ee5e 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
@@ -287,12 +287,14 @@ read_init_dma_fail: | |||
287 | select_init_dma_fail: | 287 | select_init_dma_fail: |
288 | dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); | 288 | dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); |
289 | select_init_pio_fail: | 289 | select_init_pio_fail: |
290 | dmaengine_terminate_all(i2c->dmach); | ||
290 | return -EINVAL; | 291 | return -EINVAL; |
291 | 292 | ||
292 | /* Write failpath. */ | 293 | /* Write failpath. */ |
293 | write_init_dma_fail: | 294 | write_init_dma_fail: |
294 | dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); | 295 | dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); |
295 | write_init_pio_fail: | 296 | write_init_pio_fail: |
297 | dmaengine_terminate_all(i2c->dmach); | ||
296 | return -EINVAL; | 298 | return -EINVAL; |
297 | } | 299 | } |
298 | 300 | ||
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index db31eaed6ea5..3525c9e62cb0 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/i2c-omap.h> | 44 | #include <linux/i2c-omap.h> |
45 | #include <linux/pm_runtime.h> | 45 | #include <linux/pm_runtime.h> |
46 | #include <linux/pm_qos.h> | ||
47 | 46 | ||
48 | /* I2C controller revisions */ | 47 | /* I2C controller revisions */ |
49 | #define OMAP_I2C_OMAP1_REV_2 0x20 | 48 | #define OMAP_I2C_OMAP1_REV_2 0x20 |
@@ -187,8 +186,9 @@ struct omap_i2c_dev { | |||
187 | int reg_shift; /* bit shift for I2C register addresses */ | 186 | int reg_shift; /* bit shift for I2C register addresses */ |
188 | struct completion cmd_complete; | 187 | struct completion cmd_complete; |
189 | struct resource *ioarea; | 188 | struct resource *ioarea; |
190 | u32 latency; /* maximum MPU wkup latency */ | 189 | u32 latency; /* maximum mpu wkup latency */ |
191 | struct pm_qos_request pm_qos_request; | 190 | void (*set_mpu_wkup_lat)(struct device *dev, |
191 | long latency); | ||
192 | u32 speed; /* Speed of bus in kHz */ | 192 | u32 speed; /* Speed of bus in kHz */ |
193 | u32 dtrev; /* extra revision from DT */ | 193 | u32 dtrev; /* extra revision from DT */ |
194 | u32 flags; | 194 | u32 flags; |
@@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx) | |||
494 | dev->b_hw = 1; /* Enable hardware fixes */ | 494 | dev->b_hw = 1; /* Enable hardware fixes */ |
495 | 495 | ||
496 | /* calculate wakeup latency constraint for MPU */ | 496 | /* calculate wakeup latency constraint for MPU */ |
497 | dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8); | 497 | if (dev->set_mpu_wkup_lat != NULL) |
498 | dev->latency = (1000000 * dev->threshold) / | ||
499 | (1000 * dev->speed / 8); | ||
498 | } | 500 | } |
499 | 501 | ||
500 | /* | 502 | /* |
@@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, | |||
522 | dev->buf = msg->buf; | 524 | dev->buf = msg->buf; |
523 | dev->buf_len = msg->len; | 525 | dev->buf_len = msg->len; |
524 | 526 | ||
527 | /* make sure writes to dev->buf_len are ordered */ | ||
528 | barrier(); | ||
529 | |||
525 | omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); | 530 | omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); |
526 | 531 | ||
527 | /* Clear the FIFO Buffers */ | 532 | /* Clear the FIFO Buffers */ |
@@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, | |||
579 | */ | 584 | */ |
580 | timeout = wait_for_completion_timeout(&dev->cmd_complete, | 585 | timeout = wait_for_completion_timeout(&dev->cmd_complete, |
581 | OMAP_I2C_TIMEOUT); | 586 | OMAP_I2C_TIMEOUT); |
582 | dev->buf_len = 0; | ||
583 | if (timeout == 0) { | 587 | if (timeout == 0) { |
584 | dev_err(dev->dev, "controller timed out\n"); | 588 | dev_err(dev->dev, "controller timed out\n"); |
585 | omap_i2c_init(dev); | 589 | omap_i2c_init(dev); |
@@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
629 | if (r < 0) | 633 | if (r < 0) |
630 | goto out; | 634 | goto out; |
631 | 635 | ||
632 | /* | 636 | if (dev->set_mpu_wkup_lat != NULL) |
633 | * When waiting for completion of a i2c transfer, we need to | 637 | dev->set_mpu_wkup_lat(dev->dev, dev->latency); |
634 | * set a wake up latency constraint for the MPU. This is to | ||
635 | * ensure quick enough wakeup from idle, when transfer | ||
636 | * completes. | ||
637 | */ | ||
638 | if (dev->latency) | ||
639 | pm_qos_add_request(&dev->pm_qos_request, | ||
640 | PM_QOS_CPU_DMA_LATENCY, | ||
641 | dev->latency); | ||
642 | 638 | ||
643 | for (i = 0; i < num; i++) { | 639 | for (i = 0; i < num; i++) { |
644 | r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); | 640 | r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); |
@@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
646 | break; | 642 | break; |
647 | } | 643 | } |
648 | 644 | ||
649 | if (dev->latency) | 645 | if (dev->set_mpu_wkup_lat != NULL) |
650 | pm_qos_remove_request(&dev->pm_qos_request); | 646 | dev->set_mpu_wkup_lat(dev->dev, -1); |
651 | 647 | ||
652 | if (r == 0) | 648 | if (r == 0) |
653 | r = num; | 649 | r = num; |
@@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev) | |||
1104 | } else if (pdata != NULL) { | 1100 | } else if (pdata != NULL) { |
1105 | dev->speed = pdata->clkrate; | 1101 | dev->speed = pdata->clkrate; |
1106 | dev->flags = pdata->flags; | 1102 | dev->flags = pdata->flags; |
1103 | dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; | ||
1107 | dev->dtrev = pdata->rev; | 1104 | dev->dtrev = pdata->rev; |
1108 | } | 1105 | } |
1109 | 1106 | ||
@@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev) | |||
1159 | dev->b_hw = 1; /* Enable hardware fixes */ | 1156 | dev->b_hw = 1; /* Enable hardware fixes */ |
1160 | 1157 | ||
1161 | /* calculate wakeup latency constraint for MPU */ | 1158 | /* calculate wakeup latency constraint for MPU */ |
1162 | dev->latency = (1000000 * dev->fifo_size) / | 1159 | if (dev->set_mpu_wkup_lat != NULL) |
1163 | (1000 * dev->speed / 8); | 1160 | dev->latency = (1000000 * dev->fifo_size) / |
1161 | (1000 * dev->speed / 8); | ||
1164 | } | 1162 | } |
1165 | 1163 | ||
1166 | /* reset ASAP, clearing any IRQs */ | 1164 | /* reset ASAP, clearing any IRQs */ |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3e0335f1fc60..9d902725bac9 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) | |||
806 | dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio); | 806 | dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio); |
807 | goto free_gpio; | 807 | goto free_gpio; |
808 | } | 808 | } |
809 | i2c->gpios[idx] = gpio; | ||
809 | 810 | ||
810 | ret = gpio_request(gpio, "i2c-bus"); | 811 | ret = gpio_request(gpio, "i2c-bus"); |
811 | if (ret) { | 812 | if (ret) { |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index a7edf987a339..e388590b44ab 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/irqflags.h> | 39 | #include <linux/irqflags.h> |
40 | #include <linux/rwsem.h> | 40 | #include <linux/rwsem.h> |
41 | #include <linux/pm_runtime.h> | 41 | #include <linux/pm_runtime.h> |
42 | #include <linux/acpi.h> | ||
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
43 | 44 | ||
44 | #include "i2c-core.h" | 45 | #include "i2c-core.h" |
@@ -78,6 +79,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv) | |||
78 | if (of_driver_match_device(dev, drv)) | 79 | if (of_driver_match_device(dev, drv)) |
79 | return 1; | 80 | return 1; |
80 | 81 | ||
82 | /* Then ACPI style match */ | ||
83 | if (acpi_driver_match_device(dev, drv)) | ||
84 | return 1; | ||
85 | |||
81 | driver = to_i2c_driver(drv); | 86 | driver = to_i2c_driver(drv); |
82 | /* match on an id table if there is one */ | 87 | /* match on an id table if there is one */ |
83 | if (driver->id_table) | 88 | if (driver->id_table) |
@@ -539,6 +544,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) | |||
539 | client->dev.bus = &i2c_bus_type; | 544 | client->dev.bus = &i2c_bus_type; |
540 | client->dev.type = &i2c_client_type; | 545 | client->dev.type = &i2c_client_type; |
541 | client->dev.of_node = info->of_node; | 546 | client->dev.of_node = info->of_node; |
547 | ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); | ||
542 | 548 | ||
543 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ | 549 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ |
544 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), | 550 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), |
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 5f097f309b9f..7fa5b24b16db 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c | |||
@@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) | |||
169 | mux->busses = devm_kzalloc(&pdev->dev, | 169 | mux->busses = devm_kzalloc(&pdev->dev, |
170 | sizeof(mux->busses) * mux->pdata->bus_count, | 170 | sizeof(mux->busses) * mux->pdata->bus_count, |
171 | GFP_KERNEL); | 171 | GFP_KERNEL); |
172 | if (!mux->states) { | 172 | if (!mux->busses) { |
173 | dev_err(&pdev->dev, "Cannot allocate busses\n"); | 173 | dev_err(&pdev->dev, "Cannot allocate busses\n"); |
174 | ret = -ENOMEM; | 174 | ret = -ENOMEM; |
175 | goto err; | 175 | goto err; |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b0f6b4c8ee14..c49c04d9c2b0 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/cpuidle.h> | 57 | #include <linux/cpuidle.h> |
58 | #include <linux/clockchips.h> | 58 | #include <linux/clockchips.h> |
59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | ||
60 | #include <trace/events/power.h> | 59 | #include <trace/events/power.h> |
61 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
62 | #include <linux/notifier.h> | 61 | #include <linux/notifier.h> |
@@ -72,6 +71,7 @@ | |||
72 | static struct cpuidle_driver intel_idle_driver = { | 71 | static struct cpuidle_driver intel_idle_driver = { |
73 | .name = "intel_idle", | 72 | .name = "intel_idle", |
74 | .owner = THIS_MODULE, | 73 | .owner = THIS_MODULE, |
74 | .en_core_tk_irqen = 1, | ||
75 | }; | 75 | }; |
76 | /* intel_idle.max_cstate=0 disables driver */ | 76 | /* intel_idle.max_cstate=0 disables driver */ |
77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | 77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; |
@@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 281 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | 282 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); |
283 | unsigned int cstate; | 283 | unsigned int cstate; |
284 | ktime_t kt_before, kt_after; | ||
285 | s64 usec_delta; | ||
286 | int cpu = smp_processor_id(); | 284 | int cpu = smp_processor_id(); |
287 | 285 | ||
288 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | 286 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; |
@@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev, | |||
297 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 295 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
298 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 296 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
299 | 297 | ||
300 | kt_before = ktime_get_real(); | ||
301 | |||
302 | stop_critical_timings(); | 298 | stop_critical_timings(); |
303 | if (!need_resched()) { | 299 | if (!need_resched()) { |
304 | 300 | ||
@@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev, | |||
310 | 306 | ||
311 | start_critical_timings(); | 307 | start_critical_timings(); |
312 | 308 | ||
313 | kt_after = ktime_get_real(); | ||
314 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | ||
315 | |||
316 | local_irq_enable(); | ||
317 | |||
318 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 309 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
319 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 310 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
320 | 311 | ||
321 | /* Update cpuidle counters */ | ||
322 | dev->last_residency = (int)usec_delta; | ||
323 | |||
324 | return index; | 312 | return index; |
325 | } | 313 | } |
326 | 314 | ||
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index c0ec7d42c3be..1abbc170d8b7 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c | |||
@@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) | |||
26 | * input_mt_init_slots() - initialize MT input slots | 26 | * input_mt_init_slots() - initialize MT input slots |
27 | * @dev: input device supporting MT events and finger tracking | 27 | * @dev: input device supporting MT events and finger tracking |
28 | * @num_slots: number of slots used by the device | 28 | * @num_slots: number of slots used by the device |
29 | * @flags: mt tasks to handle in core | ||
29 | * | 30 | * |
30 | * This function allocates all necessary memory for MT slot handling | 31 | * This function allocates all necessary memory for MT slot handling |
31 | * in the input device, prepares the ABS_MT_SLOT and | 32 | * in the input device, prepares the ABS_MT_SLOT and |
32 | * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. | 33 | * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. |
34 | * Depending on the flags set, it also performs pointer emulation and | ||
35 | * frame synchronization. | ||
36 | * | ||
33 | * May be called repeatedly. Returns -EINVAL if attempting to | 37 | * May be called repeatedly. Returns -EINVAL if attempting to |
34 | * reinitialize with a different number of slots. | 38 | * reinitialize with a different number of slots. |
35 | */ | 39 | */ |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 8f02e3d0e712..4c842c320c2e 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #define MOUSEDEV_MINOR_BASE 32 | 14 | #define MOUSEDEV_MINOR_BASE 32 |
15 | #define MOUSEDEV_MINORS 32 | 15 | #define MOUSEDEV_MINORS 31 |
16 | #define MOUSEDEV_MIX 31 | 16 | #define MOUSEDEV_MIX 63 |
17 | 17 | ||
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f02028ec3db6..78e5d9ab0ba7 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev) | |||
955 | 955 | ||
956 | static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); | 956 | static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); |
957 | 957 | ||
958 | static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) | 958 | static int __devinit ads7846_setup_pendown(struct spi_device *spi, |
959 | struct ads7846 *ts) | ||
959 | { | 960 | { |
960 | struct ads7846_platform_data *pdata = spi->dev.platform_data; | 961 | struct ads7846_platform_data *pdata = spi->dev.platform_data; |
961 | int err; | 962 | int err; |
@@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 | |||
981 | 982 | ||
982 | ts->gpio_pendown = pdata->gpio_pendown; | 983 | ts->gpio_pendown = pdata->gpio_pendown; |
983 | 984 | ||
985 | if (pdata->gpio_pendown_debounce) | ||
986 | gpio_set_debounce(pdata->gpio_pendown, | ||
987 | pdata->gpio_pendown_debounce); | ||
984 | } else { | 988 | } else { |
985 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); | 989 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); |
986 | return -EINVAL; | 990 | return -EINVAL; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d4a4cd445cab..0badfa48b32b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) | |||
4108 | static int intel_iommu_add_device(struct device *dev) | 4108 | static int intel_iommu_add_device(struct device *dev) |
4109 | { | 4109 | { |
4110 | struct pci_dev *pdev = to_pci_dev(dev); | 4110 | struct pci_dev *pdev = to_pci_dev(dev); |
4111 | struct pci_dev *bridge, *dma_pdev; | 4111 | struct pci_dev *bridge, *dma_pdev = NULL; |
4112 | struct iommu_group *group; | 4112 | struct iommu_group *group; |
4113 | int ret; | 4113 | int ret; |
4114 | 4114 | ||
@@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev) | |||
4122 | dma_pdev = pci_get_domain_bus_and_slot( | 4122 | dma_pdev = pci_get_domain_bus_and_slot( |
4123 | pci_domain_nr(pdev->bus), | 4123 | pci_domain_nr(pdev->bus), |
4124 | bridge->subordinate->number, 0); | 4124 | bridge->subordinate->number, 0); |
4125 | else | 4125 | if (!dma_pdev) |
4126 | dma_pdev = pci_dev_get(bridge); | 4126 | dma_pdev = pci_dev_get(bridge); |
4127 | } else | 4127 | } else |
4128 | dma_pdev = pci_dev_get(pdev); | 4128 | dma_pdev = pci_dev_get(pdev); |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index a649f146d17b..c0f7a4266263 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v) | |||
1054 | stats[i], val, offs); | 1054 | stats[i], val, offs); |
1055 | } | 1055 | } |
1056 | seq_printf(s, "\n"); | 1056 | seq_printf(s, "\n"); |
1057 | dput(dent); | ||
1057 | 1058 | ||
1058 | return 0; | 1059 | return 0; |
1059 | } | 1060 | } |
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index dc670ccc6978..16c78f1c5ef2 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c | |||
@@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node, | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static struct of_device_id irq_of_match[] __initconst = { | 170 | static struct of_device_id irq_of_match[] __initconst = { |
171 | { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init } | 171 | { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }, |
172 | { } | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | void __init bcm2835_init_irq(void) | 175 | void __init bcm2835_init_irq(void) |
diff --git a/drivers/leds/ledtrig-cpu.c b/drivers/leds/ledtrig-cpu.c index b312056da14d..4239b3955ff0 100644 --- a/drivers/leds/ledtrig-cpu.c +++ b/drivers/leds/ledtrig-cpu.c | |||
@@ -33,8 +33,6 @@ | |||
33 | struct led_trigger_cpu { | 33 | struct led_trigger_cpu { |
34 | char name[MAX_NAME_LEN]; | 34 | char name[MAX_NAME_LEN]; |
35 | struct led_trigger *_trig; | 35 | struct led_trigger *_trig; |
36 | struct mutex lock; | ||
37 | int lock_is_inited; | ||
38 | }; | 36 | }; |
39 | 37 | ||
40 | static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig); | 38 | static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig); |
@@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) | |||
50 | { | 48 | { |
51 | struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig); | 49 | struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig); |
52 | 50 | ||
53 | /* mutex lock should be initialized before calling mutex_call() */ | ||
54 | if (!trig->lock_is_inited) | ||
55 | return; | ||
56 | |||
57 | mutex_lock(&trig->lock); | ||
58 | |||
59 | /* Locate the correct CPU LED */ | 51 | /* Locate the correct CPU LED */ |
60 | switch (ledevt) { | 52 | switch (ledevt) { |
61 | case CPU_LED_IDLE_END: | 53 | case CPU_LED_IDLE_END: |
@@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) | |||
75 | /* Will leave the LED as it is */ | 67 | /* Will leave the LED as it is */ |
76 | break; | 68 | break; |
77 | } | 69 | } |
78 | |||
79 | mutex_unlock(&trig->lock); | ||
80 | } | 70 | } |
81 | EXPORT_SYMBOL(ledtrig_cpu); | 71 | EXPORT_SYMBOL(ledtrig_cpu); |
82 | 72 | ||
@@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void) | |||
117 | for_each_possible_cpu(cpu) { | 107 | for_each_possible_cpu(cpu) { |
118 | struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); | 108 | struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); |
119 | 109 | ||
120 | mutex_init(&trig->lock); | ||
121 | |||
122 | snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); | 110 | snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); |
123 | 111 | ||
124 | mutex_lock(&trig->lock); | ||
125 | led_trigger_register_simple(trig->name, &trig->_trig); | 112 | led_trigger_register_simple(trig->name, &trig->_trig); |
126 | trig->lock_is_inited = 1; | ||
127 | mutex_unlock(&trig->lock); | ||
128 | } | 113 | } |
129 | 114 | ||
130 | register_syscore_ops(&ledtrig_cpu_syscore_ops); | 115 | register_syscore_ops(&ledtrig_cpu_syscore_ops); |
@@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void) | |||
142 | for_each_possible_cpu(cpu) { | 127 | for_each_possible_cpu(cpu) { |
143 | struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); | 128 | struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); |
144 | 129 | ||
145 | mutex_lock(&trig->lock); | ||
146 | |||
147 | led_trigger_unregister_simple(trig->_trig); | 130 | led_trigger_unregister_simple(trig->_trig); |
148 | trig->_trig = NULL; | 131 | trig->_trig = NULL; |
149 | memset(trig->name, 0, MAX_NAME_LEN); | 132 | memset(trig->name, 0, MAX_NAME_LEN); |
150 | trig->lock_is_inited = 0; | ||
151 | |||
152 | mutex_unlock(&trig->lock); | ||
153 | mutex_destroy(&trig->lock); | ||
154 | } | 133 | } |
155 | 134 | ||
156 | unregister_syscore_ops(&ledtrig_cpu_syscore_ops); | 135 | unregister_syscore_ops(&ledtrig_cpu_syscore_ops); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 02db9183ca01..77e6eff41cae 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) | |||
740 | if (!md_in_flight(md)) | 740 | if (!md_in_flight(md)) |
741 | wake_up(&md->wait); | 741 | wake_up(&md->wait); |
742 | 742 | ||
743 | /* | ||
744 | * Run this off this callpath, as drivers could invoke end_io while | ||
745 | * inside their request_fn (and holding the queue lock). Calling | ||
746 | * back into ->request_fn() could deadlock attempting to grab the | ||
747 | * queue lock again. | ||
748 | */ | ||
743 | if (run_queue) | 749 | if (run_queue) |
744 | blk_run_queue(md->queue); | 750 | blk_run_queue_async(md->queue); |
745 | 751 | ||
746 | /* | 752 | /* |
747 | * dm_put() must be at the end of this function. See the comment above | 753 | * dm_put() must be at the end of this function. See the comment above |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ab768acfb62..61200717687b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1817,10 +1817,10 @@ retry: | |||
1817 | memset(bbp, 0xff, PAGE_SIZE); | 1817 | memset(bbp, 0xff, PAGE_SIZE); |
1818 | 1818 | ||
1819 | for (i = 0 ; i < bb->count ; i++) { | 1819 | for (i = 0 ; i < bb->count ; i++) { |
1820 | u64 internal_bb = *p++; | 1820 | u64 internal_bb = p[i]; |
1821 | u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | 1821 | u64 store_bb = ((BB_OFFSET(internal_bb) << 10) |
1822 | | BB_LEN(internal_bb)); | 1822 | | BB_LEN(internal_bb)); |
1823 | *bbp++ = cpu_to_le64(store_bb); | 1823 | bbp[i] = cpu_to_le64(store_bb); |
1824 | } | 1824 | } |
1825 | bb->changed = 0; | 1825 | bb->changed = 0; |
1826 | if (read_seqretry(&bb->lock, seq)) | 1826 | if (read_seqretry(&bb->lock, seq)) |
@@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev) | |||
5294 | } | 5294 | } |
5295 | EXPORT_SYMBOL_GPL(md_stop_writes); | 5295 | EXPORT_SYMBOL_GPL(md_stop_writes); |
5296 | 5296 | ||
5297 | void md_stop(struct mddev *mddev) | 5297 | static void __md_stop(struct mddev *mddev) |
5298 | { | 5298 | { |
5299 | mddev->ready = 0; | 5299 | mddev->ready = 0; |
5300 | mddev->pers->stop(mddev); | 5300 | mddev->pers->stop(mddev); |
@@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev) | |||
5304 | mddev->pers = NULL; | 5304 | mddev->pers = NULL; |
5305 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 5305 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
5306 | } | 5306 | } |
5307 | |||
5308 | void md_stop(struct mddev *mddev) | ||
5309 | { | ||
5310 | /* stop the array and free an attached data structures. | ||
5311 | * This is called from dm-raid | ||
5312 | */ | ||
5313 | __md_stop(mddev); | ||
5314 | bitmap_destroy(mddev); | ||
5315 | if (mddev->bio_set) | ||
5316 | bioset_free(mddev->bio_set); | ||
5317 | } | ||
5318 | |||
5307 | EXPORT_SYMBOL_GPL(md_stop); | 5319 | EXPORT_SYMBOL_GPL(md_stop); |
5308 | 5320 | ||
5309 | static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | 5321 | static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) |
@@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode, | |||
5364 | set_disk_ro(disk, 0); | 5376 | set_disk_ro(disk, 0); |
5365 | 5377 | ||
5366 | __md_stop_writes(mddev); | 5378 | __md_stop_writes(mddev); |
5367 | md_stop(mddev); | 5379 | __md_stop(mddev); |
5368 | mddev->queue->merge_bvec_fn = NULL; | 5380 | mddev->queue->merge_bvec_fn = NULL; |
5369 | mddev->queue->backing_dev_info.congested_fn = NULL; | 5381 | mddev->queue->backing_dev_info.congested_fn = NULL; |
5370 | 5382 | ||
@@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, | |||
7936 | sector_t *first_bad, int *bad_sectors) | 7948 | sector_t *first_bad, int *bad_sectors) |
7937 | { | 7949 | { |
7938 | int hi; | 7950 | int hi; |
7939 | int lo = 0; | 7951 | int lo; |
7940 | u64 *p = bb->page; | 7952 | u64 *p = bb->page; |
7941 | int rv = 0; | 7953 | int rv; |
7942 | sector_t target = s + sectors; | 7954 | sector_t target = s + sectors; |
7943 | unsigned seq; | 7955 | unsigned seq; |
7944 | 7956 | ||
@@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, | |||
7953 | 7965 | ||
7954 | retry: | 7966 | retry: |
7955 | seq = read_seqbegin(&bb->lock); | 7967 | seq = read_seqbegin(&bb->lock); |
7956 | 7968 | lo = 0; | |
7969 | rv = 0; | ||
7957 | hi = bb->count; | 7970 | hi = bb->count; |
7958 | 7971 | ||
7959 | /* Binary search between lo and hi for 'target' | 7972 | /* Binary search between lo and hi for 'target' |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d1295aff4173..0d5d0ff2c0f7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
499 | */ | 499 | */ |
500 | one_write_done(r10_bio); | 500 | one_write_done(r10_bio); |
501 | if (dec_rdev) | 501 | if (dec_rdev) |
502 | rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); | 502 | rdev_dec_pending(rdev, conf->mddev); |
503 | } | 503 | } |
504 | 504 | ||
505 | /* | 505 | /* |
@@ -1334,18 +1334,21 @@ retry_write: | |||
1334 | blocked_rdev = rrdev; | 1334 | blocked_rdev = rrdev; |
1335 | break; | 1335 | break; |
1336 | } | 1336 | } |
1337 | if (rdev && (test_bit(Faulty, &rdev->flags) | ||
1338 | || test_bit(Unmerged, &rdev->flags))) | ||
1339 | rdev = NULL; | ||
1337 | if (rrdev && (test_bit(Faulty, &rrdev->flags) | 1340 | if (rrdev && (test_bit(Faulty, &rrdev->flags) |
1338 | || test_bit(Unmerged, &rrdev->flags))) | 1341 | || test_bit(Unmerged, &rrdev->flags))) |
1339 | rrdev = NULL; | 1342 | rrdev = NULL; |
1340 | 1343 | ||
1341 | r10_bio->devs[i].bio = NULL; | 1344 | r10_bio->devs[i].bio = NULL; |
1342 | r10_bio->devs[i].repl_bio = NULL; | 1345 | r10_bio->devs[i].repl_bio = NULL; |
1343 | if (!rdev || test_bit(Faulty, &rdev->flags) || | 1346 | |
1344 | test_bit(Unmerged, &rdev->flags)) { | 1347 | if (!rdev && !rrdev) { |
1345 | set_bit(R10BIO_Degraded, &r10_bio->state); | 1348 | set_bit(R10BIO_Degraded, &r10_bio->state); |
1346 | continue; | 1349 | continue; |
1347 | } | 1350 | } |
1348 | if (test_bit(WriteErrorSeen, &rdev->flags)) { | 1351 | if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { |
1349 | sector_t first_bad; | 1352 | sector_t first_bad; |
1350 | sector_t dev_sector = r10_bio->devs[i].addr; | 1353 | sector_t dev_sector = r10_bio->devs[i].addr; |
1351 | int bad_sectors; | 1354 | int bad_sectors; |
@@ -1387,8 +1390,10 @@ retry_write: | |||
1387 | max_sectors = good_sectors; | 1390 | max_sectors = good_sectors; |
1388 | } | 1391 | } |
1389 | } | 1392 | } |
1390 | r10_bio->devs[i].bio = bio; | 1393 | if (rdev) { |
1391 | atomic_inc(&rdev->nr_pending); | 1394 | r10_bio->devs[i].bio = bio; |
1395 | atomic_inc(&rdev->nr_pending); | ||
1396 | } | ||
1392 | if (rrdev) { | 1397 | if (rrdev) { |
1393 | r10_bio->devs[i].repl_bio = bio; | 1398 | r10_bio->devs[i].repl_bio = bio; |
1394 | atomic_inc(&rrdev->nr_pending); | 1399 | atomic_inc(&rrdev->nr_pending); |
@@ -1444,69 +1449,71 @@ retry_write: | |||
1444 | for (i = 0; i < conf->copies; i++) { | 1449 | for (i = 0; i < conf->copies; i++) { |
1445 | struct bio *mbio; | 1450 | struct bio *mbio; |
1446 | int d = r10_bio->devs[i].devnum; | 1451 | int d = r10_bio->devs[i].devnum; |
1447 | if (!r10_bio->devs[i].bio) | 1452 | if (r10_bio->devs[i].bio) { |
1448 | continue; | 1453 | struct md_rdev *rdev = conf->mirrors[d].rdev; |
1454 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | ||
1455 | md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, | ||
1456 | max_sectors); | ||
1457 | r10_bio->devs[i].bio = mbio; | ||
1458 | |||
1459 | mbio->bi_sector = (r10_bio->devs[i].addr+ | ||
1460 | choose_data_offset(r10_bio, | ||
1461 | rdev)); | ||
1462 | mbio->bi_bdev = rdev->bdev; | ||
1463 | mbio->bi_end_io = raid10_end_write_request; | ||
1464 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; | ||
1465 | mbio->bi_private = r10_bio; | ||
1449 | 1466 | ||
1450 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1467 | atomic_inc(&r10_bio->remaining); |
1451 | md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, | ||
1452 | max_sectors); | ||
1453 | r10_bio->devs[i].bio = mbio; | ||
1454 | 1468 | ||
1455 | mbio->bi_sector = (r10_bio->devs[i].addr+ | 1469 | cb = blk_check_plugged(raid10_unplug, mddev, |
1456 | choose_data_offset(r10_bio, | 1470 | sizeof(*plug)); |
1457 | conf->mirrors[d].rdev)); | 1471 | if (cb) |
1458 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; | 1472 | plug = container_of(cb, struct raid10_plug_cb, |
1459 | mbio->bi_end_io = raid10_end_write_request; | 1473 | cb); |
1460 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; | 1474 | else |
1461 | mbio->bi_private = r10_bio; | 1475 | plug = NULL; |
1476 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1477 | if (plug) { | ||
1478 | bio_list_add(&plug->pending, mbio); | ||
1479 | plug->pending_cnt++; | ||
1480 | } else { | ||
1481 | bio_list_add(&conf->pending_bio_list, mbio); | ||
1482 | conf->pending_count++; | ||
1483 | } | ||
1484 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1485 | if (!plug) | ||
1486 | md_wakeup_thread(mddev->thread); | ||
1487 | } | ||
1462 | 1488 | ||
1463 | atomic_inc(&r10_bio->remaining); | 1489 | if (r10_bio->devs[i].repl_bio) { |
1490 | struct md_rdev *rdev = conf->mirrors[d].replacement; | ||
1491 | if (rdev == NULL) { | ||
1492 | /* Replacement just got moved to main 'rdev' */ | ||
1493 | smp_mb(); | ||
1494 | rdev = conf->mirrors[d].rdev; | ||
1495 | } | ||
1496 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | ||
1497 | md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, | ||
1498 | max_sectors); | ||
1499 | r10_bio->devs[i].repl_bio = mbio; | ||
1500 | |||
1501 | mbio->bi_sector = (r10_bio->devs[i].addr + | ||
1502 | choose_data_offset( | ||
1503 | r10_bio, rdev)); | ||
1504 | mbio->bi_bdev = rdev->bdev; | ||
1505 | mbio->bi_end_io = raid10_end_write_request; | ||
1506 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; | ||
1507 | mbio->bi_private = r10_bio; | ||
1464 | 1508 | ||
1465 | cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); | 1509 | atomic_inc(&r10_bio->remaining); |
1466 | if (cb) | 1510 | spin_lock_irqsave(&conf->device_lock, flags); |
1467 | plug = container_of(cb, struct raid10_plug_cb, cb); | ||
1468 | else | ||
1469 | plug = NULL; | ||
1470 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1471 | if (plug) { | ||
1472 | bio_list_add(&plug->pending, mbio); | ||
1473 | plug->pending_cnt++; | ||
1474 | } else { | ||
1475 | bio_list_add(&conf->pending_bio_list, mbio); | 1511 | bio_list_add(&conf->pending_bio_list, mbio); |
1476 | conf->pending_count++; | 1512 | conf->pending_count++; |
1513 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1514 | if (!mddev_check_plugged(mddev)) | ||
1515 | md_wakeup_thread(mddev->thread); | ||
1477 | } | 1516 | } |
1478 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1479 | if (!plug) | ||
1480 | md_wakeup_thread(mddev->thread); | ||
1481 | |||
1482 | if (!r10_bio->devs[i].repl_bio) | ||
1483 | continue; | ||
1484 | |||
1485 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | ||
1486 | md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, | ||
1487 | max_sectors); | ||
1488 | r10_bio->devs[i].repl_bio = mbio; | ||
1489 | |||
1490 | /* We are actively writing to the original device | ||
1491 | * so it cannot disappear, so the replacement cannot | ||
1492 | * become NULL here | ||
1493 | */ | ||
1494 | mbio->bi_sector = (r10_bio->devs[i].addr + | ||
1495 | choose_data_offset( | ||
1496 | r10_bio, | ||
1497 | conf->mirrors[d].replacement)); | ||
1498 | mbio->bi_bdev = conf->mirrors[d].replacement->bdev; | ||
1499 | mbio->bi_end_io = raid10_end_write_request; | ||
1500 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; | ||
1501 | mbio->bi_private = r10_bio; | ||
1502 | |||
1503 | atomic_inc(&r10_bio->remaining); | ||
1504 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1505 | bio_list_add(&conf->pending_bio_list, mbio); | ||
1506 | conf->pending_count++; | ||
1507 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1508 | if (!mddev_check_plugged(mddev)) | ||
1509 | md_wakeup_thread(mddev->thread); | ||
1510 | } | 1517 | } |
1511 | 1518 | ||
1512 | /* Don't remove the bias on 'remaining' (one_write_done) until | 1519 | /* Don't remove the bias on 'remaining' (one_write_done) until |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c5439dce0295..a4502686e7a8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2774 | dev = &sh->dev[i]; | 2774 | dev = &sh->dev[i]; |
2775 | if (!test_bit(R5_LOCKED, &dev->flags) && | 2775 | if (!test_bit(R5_LOCKED, &dev->flags) && |
2776 | (test_bit(R5_UPTODATE, &dev->flags) || | 2776 | (test_bit(R5_UPTODATE, &dev->flags) || |
2777 | test_and_clear_bit(R5_Discard, &dev->flags))) { | 2777 | test_bit(R5_Discard, &dev->flags))) { |
2778 | /* We can return any write requests */ | 2778 | /* We can return any write requests */ |
2779 | struct bio *wbi, *wbi2; | 2779 | struct bio *wbi, *wbi2; |
2780 | pr_debug("Return write for disc %d\n", i); | 2780 | pr_debug("Return write for disc %d\n", i); |
2781 | if (test_and_clear_bit(R5_Discard, &dev->flags)) | ||
2782 | clear_bit(R5_UPTODATE, &dev->flags); | ||
2781 | wbi = dev->written; | 2783 | wbi = dev->written; |
2782 | dev->written = NULL; | 2784 | dev->written = NULL; |
2783 | while (wbi && wbi->bi_sector < | 2785 | while (wbi && wbi->bi_sector < |
@@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2795 | !test_bit(STRIPE_DEGRADED, &sh->state), | 2797 | !test_bit(STRIPE_DEGRADED, &sh->state), |
2796 | 0); | 2798 | 0); |
2797 | } | 2799 | } |
2798 | } | 2800 | } else if (test_bit(R5_Discard, &sh->dev[i].flags)) |
2801 | clear_bit(R5_Discard, &sh->dev[i].flags); | ||
2799 | 2802 | ||
2800 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 2803 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
2801 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 2804 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh) | |||
3490 | handle_failed_sync(conf, sh, &s); | 3493 | handle_failed_sync(conf, sh, &s); |
3491 | } | 3494 | } |
3492 | 3495 | ||
3493 | /* | ||
3494 | * might be able to return some write requests if the parity blocks | ||
3495 | * are safe, or on a failed drive | ||
3496 | */ | ||
3497 | pdev = &sh->dev[sh->pd_idx]; | ||
3498 | s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) | ||
3499 | || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); | ||
3500 | qdev = &sh->dev[sh->qd_idx]; | ||
3501 | s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) | ||
3502 | || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) | ||
3503 | || conf->level < 6; | ||
3504 | |||
3505 | if (s.written && | ||
3506 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) | ||
3507 | && !test_bit(R5_LOCKED, &pdev->flags) | ||
3508 | && (test_bit(R5_UPTODATE, &pdev->flags) || | ||
3509 | test_bit(R5_Discard, &pdev->flags))))) && | ||
3510 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) | ||
3511 | && !test_bit(R5_LOCKED, &qdev->flags) | ||
3512 | && (test_bit(R5_UPTODATE, &qdev->flags) || | ||
3513 | test_bit(R5_Discard, &qdev->flags)))))) | ||
3514 | handle_stripe_clean_event(conf, sh, disks, &s.return_bi); | ||
3515 | |||
3516 | /* Now we might consider reading some blocks, either to check/generate | ||
3517 | * parity, or to satisfy requests | ||
3518 | * or to load a block that is being partially written. | ||
3519 | */ | ||
3520 | if (s.to_read || s.non_overwrite | ||
3521 | || (conf->level == 6 && s.to_write && s.failed) | ||
3522 | || (s.syncing && (s.uptodate + s.compute < disks)) | ||
3523 | || s.replacing | ||
3524 | || s.expanding) | ||
3525 | handle_stripe_fill(sh, &s, disks); | ||
3526 | |||
3527 | /* Now we check to see if any write operations have recently | 3496 | /* Now we check to see if any write operations have recently |
3528 | * completed | 3497 | * completed |
3529 | */ | 3498 | */ |
@@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh) | |||
3561 | s.dec_preread_active = 1; | 3530 | s.dec_preread_active = 1; |
3562 | } | 3531 | } |
3563 | 3532 | ||
3533 | /* | ||
3534 | * might be able to return some write requests if the parity blocks | ||
3535 | * are safe, or on a failed drive | ||
3536 | */ | ||
3537 | pdev = &sh->dev[sh->pd_idx]; | ||
3538 | s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) | ||
3539 | || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); | ||
3540 | qdev = &sh->dev[sh->qd_idx]; | ||
3541 | s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) | ||
3542 | || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) | ||
3543 | || conf->level < 6; | ||
3544 | |||
3545 | if (s.written && | ||
3546 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) | ||
3547 | && !test_bit(R5_LOCKED, &pdev->flags) | ||
3548 | && (test_bit(R5_UPTODATE, &pdev->flags) || | ||
3549 | test_bit(R5_Discard, &pdev->flags))))) && | ||
3550 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) | ||
3551 | && !test_bit(R5_LOCKED, &qdev->flags) | ||
3552 | && (test_bit(R5_UPTODATE, &qdev->flags) || | ||
3553 | test_bit(R5_Discard, &qdev->flags)))))) | ||
3554 | handle_stripe_clean_event(conf, sh, disks, &s.return_bi); | ||
3555 | |||
3556 | /* Now we might consider reading some blocks, either to check/generate | ||
3557 | * parity, or to satisfy requests | ||
3558 | * or to load a block that is being partially written. | ||
3559 | */ | ||
3560 | if (s.to_read || s.non_overwrite | ||
3561 | || (conf->level == 6 && s.to_write && s.failed) | ||
3562 | || (s.syncing && (s.uptodate + s.compute < disks)) | ||
3563 | || s.replacing | ||
3564 | || s.expanding) | ||
3565 | handle_stripe_fill(sh, &s, disks); | ||
3566 | |||
3564 | /* Now to consider new write requests and what else, if anything | 3567 | /* Now to consider new write requests and what else, if anything |
3565 | * should be read. We do not handle new writes when: | 3568 | * should be read. We do not handle new writes when: |
3566 | * 1/ A 'write' operation (copy+xor) is already in flight. | 3569 | * 1/ A 'write' operation (copy+xor) is already in flight. |
@@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev) | |||
5529 | * discard data disk but write parity disk | 5532 | * discard data disk but write parity disk |
5530 | */ | 5533 | */ |
5531 | stripe = stripe * PAGE_SIZE; | 5534 | stripe = stripe * PAGE_SIZE; |
5535 | /* Round up to power of 2, as discard handling | ||
5536 | * currently assumes that */ | ||
5537 | while ((stripe-1) & stripe) | ||
5538 | stripe = (stripe | (stripe-1)) + 1; | ||
5532 | mddev->queue->limits.discard_alignment = stripe; | 5539 | mddev->queue->limits.discard_alignment = stripe; |
5533 | mddev->queue->limits.discard_granularity = stripe; | 5540 | mddev->queue->limits.discard_granularity = stripe; |
5534 | /* | 5541 | /* |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 9bf10e7bbfaf..56eac101c013 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -81,6 +81,18 @@ config MMC_RICOH_MMC | |||
81 | 81 | ||
82 | If unsure, say Y. | 82 | If unsure, say Y. |
83 | 83 | ||
84 | config MMC_SDHCI_ACPI | ||
85 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" | ||
86 | depends on MMC_SDHCI && ACPI | ||
87 | help | ||
88 | This selects support for ACPI enumerated SDHCI controllers, | ||
89 | identified by ACPI Compatibility ID PNP0D40 or specific | ||
90 | ACPI Hardware IDs. | ||
91 | |||
92 | If you have a controller with this interface, say Y or M here. | ||
93 | |||
94 | If unsure, say N. | ||
95 | |||
84 | config MMC_SDHCI_PLTFM | 96 | config MMC_SDHCI_PLTFM |
85 | tristate "SDHCI platform and OF driver helper" | 97 | tristate "SDHCI platform and OF driver helper" |
86 | depends on MMC_SDHCI | 98 | depends on MMC_SDHCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 17ad0a7ba40b..0e4960a107de 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o | |||
9 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o | 9 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o |
10 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o | 10 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o |
11 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o | 11 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o |
12 | obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o | ||
12 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o | 13 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o |
13 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o | 14 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o |
14 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o | 15 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c new file mode 100644 index 000000000000..6ac361744d36 --- /dev/null +++ b/drivers/mmc/host/sdhci-acpi.c | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Secure Digital Host Controller Interface ACPI driver. | ||
3 | * | ||
4 | * Copyright (c) 2012, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/device.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/compiler.h> | ||
30 | #include <linux/stddef.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/acpi.h> | ||
36 | #include <linux/pm.h> | ||
37 | #include <linux/pm_runtime.h> | ||
38 | |||
39 | #include <linux/mmc/host.h> | ||
40 | #include <linux/mmc/pm.h> | ||
41 | #include <linux/mmc/sdhci.h> | ||
42 | |||
43 | #include "sdhci.h" | ||
44 | |||
45 | enum { | ||
46 | SDHCI_ACPI_SD_CD = BIT(0), | ||
47 | SDHCI_ACPI_RUNTIME_PM = BIT(1), | ||
48 | }; | ||
49 | |||
50 | struct sdhci_acpi_chip { | ||
51 | const struct sdhci_ops *ops; | ||
52 | unsigned int quirks; | ||
53 | unsigned int quirks2; | ||
54 | unsigned long caps; | ||
55 | unsigned int caps2; | ||
56 | mmc_pm_flag_t pm_caps; | ||
57 | }; | ||
58 | |||
59 | struct sdhci_acpi_slot { | ||
60 | const struct sdhci_acpi_chip *chip; | ||
61 | unsigned int quirks; | ||
62 | unsigned int quirks2; | ||
63 | unsigned long caps; | ||
64 | unsigned int caps2; | ||
65 | mmc_pm_flag_t pm_caps; | ||
66 | unsigned int flags; | ||
67 | }; | ||
68 | |||
69 | struct sdhci_acpi_host { | ||
70 | struct sdhci_host *host; | ||
71 | const struct sdhci_acpi_slot *slot; | ||
72 | struct platform_device *pdev; | ||
73 | bool use_runtime_pm; | ||
74 | }; | ||
75 | |||
76 | static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) | ||
77 | { | ||
78 | return c->slot && (c->slot->flags & flag); | ||
79 | } | ||
80 | |||
81 | static int sdhci_acpi_enable_dma(struct sdhci_host *host) | ||
82 | { | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static const struct sdhci_ops sdhci_acpi_ops_dflt = { | ||
87 | .enable_dma = sdhci_acpi_enable_dma, | ||
88 | }; | ||
89 | |||
90 | static const struct acpi_device_id sdhci_acpi_ids[] = { | ||
91 | { "PNP0D40" }, | ||
92 | { }, | ||
93 | }; | ||
94 | MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); | ||
95 | |||
96 | static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) | ||
97 | { | ||
98 | const struct acpi_device_id *id; | ||
99 | |||
100 | for (id = sdhci_acpi_ids; id->id[0]; id++) | ||
101 | if (!strcmp(id->id, hid)) | ||
102 | return (const struct sdhci_acpi_slot *)id->driver_data; | ||
103 | return NULL; | ||
104 | } | ||
105 | |||
106 | static int __devinit sdhci_acpi_probe(struct platform_device *pdev) | ||
107 | { | ||
108 | struct device *dev = &pdev->dev; | ||
109 | acpi_handle handle = ACPI_HANDLE(dev); | ||
110 | struct acpi_device *device; | ||
111 | struct sdhci_acpi_host *c; | ||
112 | struct sdhci_host *host; | ||
113 | struct resource *iomem; | ||
114 | resource_size_t len; | ||
115 | const char *hid; | ||
116 | int err; | ||
117 | |||
118 | if (acpi_bus_get_device(handle, &device)) | ||
119 | return -ENODEV; | ||
120 | |||
121 | if (acpi_bus_get_status(device) || !device->status.present) | ||
122 | return -ENODEV; | ||
123 | |||
124 | hid = acpi_device_hid(device); | ||
125 | |||
126 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
127 | if (!iomem) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | len = resource_size(iomem); | ||
131 | if (len < 0x100) | ||
132 | dev_err(dev, "Invalid iomem size!\n"); | ||
133 | |||
134 | if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); | ||
138 | if (IS_ERR(host)) | ||
139 | return PTR_ERR(host); | ||
140 | |||
141 | c = sdhci_priv(host); | ||
142 | c->host = host; | ||
143 | c->slot = sdhci_acpi_get_slot(hid); | ||
144 | c->pdev = pdev; | ||
145 | c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); | ||
146 | |||
147 | platform_set_drvdata(pdev, c); | ||
148 | |||
149 | host->hw_name = "ACPI"; | ||
150 | host->ops = &sdhci_acpi_ops_dflt; | ||
151 | host->irq = platform_get_irq(pdev, 0); | ||
152 | |||
153 | host->ioaddr = devm_ioremap_nocache(dev, iomem->start, | ||
154 | resource_size(iomem)); | ||
155 | if (host->ioaddr == NULL) { | ||
156 | err = -ENOMEM; | ||
157 | goto err_free; | ||
158 | } | ||
159 | |||
160 | if (!dev->dma_mask) { | ||
161 | u64 dma_mask; | ||
162 | |||
163 | if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { | ||
164 | /* 64-bit DMA is not supported at present */ | ||
165 | dma_mask = DMA_BIT_MASK(32); | ||
166 | } else { | ||
167 | dma_mask = DMA_BIT_MASK(32); | ||
168 | } | ||
169 | |||
170 | dev->dma_mask = &dev->coherent_dma_mask; | ||
171 | dev->coherent_dma_mask = dma_mask; | ||
172 | } | ||
173 | |||
174 | if (c->slot) { | ||
175 | if (c->slot->chip) { | ||
176 | host->ops = c->slot->chip->ops; | ||
177 | host->quirks |= c->slot->chip->quirks; | ||
178 | host->quirks2 |= c->slot->chip->quirks2; | ||
179 | host->mmc->caps |= c->slot->chip->caps; | ||
180 | host->mmc->caps2 |= c->slot->chip->caps2; | ||
181 | host->mmc->pm_caps |= c->slot->chip->pm_caps; | ||
182 | } | ||
183 | host->quirks |= c->slot->quirks; | ||
184 | host->quirks2 |= c->slot->quirks2; | ||
185 | host->mmc->caps |= c->slot->caps; | ||
186 | host->mmc->caps2 |= c->slot->caps2; | ||
187 | host->mmc->pm_caps |= c->slot->pm_caps; | ||
188 | } | ||
189 | |||
190 | err = sdhci_add_host(host); | ||
191 | if (err) | ||
192 | goto err_free; | ||
193 | |||
194 | if (c->use_runtime_pm) { | ||
195 | pm_suspend_ignore_children(dev, 1); | ||
196 | pm_runtime_set_autosuspend_delay(dev, 50); | ||
197 | pm_runtime_use_autosuspend(dev); | ||
198 | pm_runtime_enable(dev); | ||
199 | } | ||
200 | |||
201 | return 0; | ||
202 | |||
203 | err_free: | ||
204 | platform_set_drvdata(pdev, NULL); | ||
205 | sdhci_free_host(c->host); | ||
206 | return err; | ||
207 | } | ||
208 | |||
209 | static int __devexit sdhci_acpi_remove(struct platform_device *pdev) | ||
210 | { | ||
211 | struct sdhci_acpi_host *c = platform_get_drvdata(pdev); | ||
212 | struct device *dev = &pdev->dev; | ||
213 | int dead; | ||
214 | |||
215 | if (c->use_runtime_pm) { | ||
216 | pm_runtime_get_sync(dev); | ||
217 | pm_runtime_disable(dev); | ||
218 | pm_runtime_put_noidle(dev); | ||
219 | } | ||
220 | |||
221 | dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); | ||
222 | sdhci_remove_host(c->host, dead); | ||
223 | platform_set_drvdata(pdev, NULL); | ||
224 | sdhci_free_host(c->host); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | #ifdef CONFIG_PM_SLEEP | ||
230 | |||
231 | static int sdhci_acpi_suspend(struct device *dev) | ||
232 | { | ||
233 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
234 | |||
235 | return sdhci_suspend_host(c->host); | ||
236 | } | ||
237 | |||
238 | static int sdhci_acpi_resume(struct device *dev) | ||
239 | { | ||
240 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
241 | |||
242 | return sdhci_resume_host(c->host); | ||
243 | } | ||
244 | |||
245 | #else | ||
246 | |||
247 | #define sdhci_acpi_suspend NULL | ||
248 | #define sdhci_acpi_resume NULL | ||
249 | |||
250 | #endif | ||
251 | |||
252 | #ifdef CONFIG_PM_RUNTIME | ||
253 | |||
254 | static int sdhci_acpi_runtime_suspend(struct device *dev) | ||
255 | { | ||
256 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
257 | |||
258 | return sdhci_runtime_suspend_host(c->host); | ||
259 | } | ||
260 | |||
261 | static int sdhci_acpi_runtime_resume(struct device *dev) | ||
262 | { | ||
263 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
264 | |||
265 | return sdhci_runtime_resume_host(c->host); | ||
266 | } | ||
267 | |||
268 | static int sdhci_acpi_runtime_idle(struct device *dev) | ||
269 | { | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | #else | ||
274 | |||
275 | #define sdhci_acpi_runtime_suspend NULL | ||
276 | #define sdhci_acpi_runtime_resume NULL | ||
277 | #define sdhci_acpi_runtime_idle NULL | ||
278 | |||
279 | #endif | ||
280 | |||
281 | static const struct dev_pm_ops sdhci_acpi_pm_ops = { | ||
282 | .suspend = sdhci_acpi_suspend, | ||
283 | .resume = sdhci_acpi_resume, | ||
284 | .runtime_suspend = sdhci_acpi_runtime_suspend, | ||
285 | .runtime_resume = sdhci_acpi_runtime_resume, | ||
286 | .runtime_idle = sdhci_acpi_runtime_idle, | ||
287 | }; | ||
288 | |||
289 | static struct platform_driver sdhci_acpi_driver = { | ||
290 | .driver = { | ||
291 | .name = "sdhci-acpi", | ||
292 | .owner = THIS_MODULE, | ||
293 | .acpi_match_table = sdhci_acpi_ids, | ||
294 | .pm = &sdhci_acpi_pm_ops, | ||
295 | }, | ||
296 | .probe = sdhci_acpi_probe, | ||
297 | .remove = __devexit_p(sdhci_acpi_remove), | ||
298 | }; | ||
299 | |||
300 | module_platform_driver(sdhci_acpi_driver); | ||
301 | |||
302 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); | ||
303 | MODULE_AUTHOR("Adrian Hunter"); | ||
304 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8f52fc858e48..5a5cd2ace4a6 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c | |||
@@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) | |||
240 | 240 | ||
241 | if (*(szlength) != '+') { | 241 | if (*(szlength) != '+') { |
242 | devlength = simple_strtoul(szlength, &buffer, 0); | 242 | devlength = simple_strtoul(szlength, &buffer, 0); |
243 | devlength = handle_unit(devlength, buffer) - devstart; | 243 | devlength = handle_unit(devlength, buffer); |
244 | if (devlength < devstart) | 244 | if (devlength < devstart) |
245 | goto err_out; | 245 | goto err_out; |
246 | 246 | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index ec6841d8e956..1a03b7f673ce 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, | |||
2983 | /* | 2983 | /* |
2984 | * Field definitions are in the following datasheets: | 2984 | * Field definitions are in the following datasheets: |
2985 | * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) | 2985 | * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) |
2986 | * New style (6 byte ID): Samsung K9GAG08U0F (p.44) | 2986 | * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) |
2987 | * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) | 2987 | * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) |
2988 | * | 2988 | * |
2989 | * Check for ID length, cell type, and Hynix/Samsung ID to decide what | 2989 | * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung |
2990 | * to do. | 2990 | * ID to decide what to do. |
2991 | */ | 2991 | */ |
2992 | if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) { | 2992 | if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && |
2993 | (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
2994 | id_data[5] != 0x00) { | ||
2993 | /* Calc pagesize */ | 2995 | /* Calc pagesize */ |
2994 | mtd->writesize = 2048 << (extid & 0x03); | 2996 | mtd->writesize = 2048 << (extid & 0x03); |
2995 | extid >>= 2; | 2997 | extid >>= 2; |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 4fbfe96e37a1..f48ac5d80bbf 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -727,7 +727,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr) | |||
727 | 727 | ||
728 | if (!flctl->qos_request) { | 728 | if (!flctl->qos_request) { |
729 | ret = dev_pm_qos_add_request(&flctl->pdev->dev, | 729 | ret = dev_pm_qos_add_request(&flctl->pdev->dev, |
730 | &flctl->pm_qos, 100); | 730 | &flctl->pm_qos, |
731 | DEV_PM_QOS_LATENCY, | ||
732 | 100); | ||
731 | if (ret < 0) | 733 | if (ret < 0) |
732 | dev_err(&flctl->pdev->dev, | 734 | dev_err(&flctl->pdev->dev, |
733 | "PM QoS request failed: %d\n", ret); | 735 | "PM QoS request failed: %d\n", ret); |
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 64be8f0848b0..d9127e2ed808 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c | |||
@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, | |||
121 | nr_parts = plen / sizeof(part[0]); | 121 | nr_parts = plen / sizeof(part[0]); |
122 | 122 | ||
123 | *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); | 123 | *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); |
124 | if (!pparts) | 124 | if (!*pparts) |
125 | return -ENOMEM; | 125 | return -ENOMEM; |
126 | 126 | ||
127 | names = of_get_property(dp, "partition-names", &plen); | 127 | names = of_get_property(dp, "partition-names", &plen); |
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 7153e0d27101..b3f41f200622 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int | |||
3694 | * flexonenand_set_boundary - Writes the SLC boundary | 3694 | * flexonenand_set_boundary - Writes the SLC boundary |
3695 | * @param mtd - mtd info structure | 3695 | * @param mtd - mtd info structure |
3696 | */ | 3696 | */ |
3697 | int flexonenand_set_boundary(struct mtd_info *mtd, int die, | 3697 | static int flexonenand_set_boundary(struct mtd_info *mtd, int die, |
3698 | int boundary, int lock) | 3698 | int boundary, int lock) |
3699 | { | 3699 | { |
3700 | struct onenand_chip *this = mtd->priv; | 3700 | struct onenand_chip *this = mtd->priv; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2530b002125..5f5b69f37d2e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond) | |||
1379 | struct net_device *bond_dev = bond->dev; | 1379 | struct net_device *bond_dev = bond->dev; |
1380 | netdev_features_t vlan_features = BOND_VLAN_FEATURES; | 1380 | netdev_features_t vlan_features = BOND_VLAN_FEATURES; |
1381 | unsigned short max_hard_header_len = ETH_HLEN; | 1381 | unsigned short max_hard_header_len = ETH_HLEN; |
1382 | unsigned int gso_max_size = GSO_MAX_SIZE; | ||
1383 | u16 gso_max_segs = GSO_MAX_SEGS; | ||
1382 | int i; | 1384 | int i; |
1383 | unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; | 1385 | unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; |
1384 | 1386 | ||
@@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond) | |||
1394 | dst_release_flag &= slave->dev->priv_flags; | 1396 | dst_release_flag &= slave->dev->priv_flags; |
1395 | if (slave->dev->hard_header_len > max_hard_header_len) | 1397 | if (slave->dev->hard_header_len > max_hard_header_len) |
1396 | max_hard_header_len = slave->dev->hard_header_len; | 1398 | max_hard_header_len = slave->dev->hard_header_len; |
1399 | |||
1400 | gso_max_size = min(gso_max_size, slave->dev->gso_max_size); | ||
1401 | gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); | ||
1397 | } | 1402 | } |
1398 | 1403 | ||
1399 | done: | 1404 | done: |
1400 | bond_dev->vlan_features = vlan_features; | 1405 | bond_dev->vlan_features = vlan_features; |
1401 | bond_dev->hard_header_len = max_hard_header_len; | 1406 | bond_dev->hard_header_len = max_hard_header_len; |
1407 | bond_dev->gso_max_segs = gso_max_segs; | ||
1408 | netif_set_gso_max_size(bond_dev, gso_max_size); | ||
1402 | 1409 | ||
1403 | flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; | 1410 | flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; |
1404 | bond_dev->priv_flags = flags | dst_release_flag; | 1411 | bond_dev->priv_flags = flags | dst_release_flag; |
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index d04911d33b64..47618e505355 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c | |||
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) | |||
813 | dev->irq = irq[this_dev]; | 813 | dev->irq = irq[this_dev]; |
814 | dev->mem_end = bad[this_dev]; | 814 | dev->mem_end = bad[this_dev]; |
815 | } | 815 | } |
816 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
816 | err = do_ne_probe(dev); | 817 | err = do_ne_probe(dev); |
817 | if (err) { | 818 | if (err) { |
818 | free_netdev(dev); | 819 | free_netdev(dev); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bd1fd3d87c24..01611b33a93d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) | |||
9545 | */ | 9545 | */ |
9546 | static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) | 9546 | static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) |
9547 | { | 9547 | { |
9548 | u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | 9548 | if (!CHIP_IS_E1x(bp)) { |
9549 | if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | 9549 | u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); |
9550 | BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); | 9550 | if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { |
9551 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); | 9551 | BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); |
9552 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
9553 | 1 << BP_FUNC(bp)); | ||
9554 | } | ||
9552 | } | 9555 | } |
9553 | } | 9556 | } |
9554 | 9557 | ||
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 92317e9c0f73..60ac46f4ac08 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev) | |||
1860 | jme_clear_pm(jme); | 1860 | jme_clear_pm(jme); |
1861 | JME_NAPI_ENABLE(jme); | 1861 | JME_NAPI_ENABLE(jme); |
1862 | 1862 | ||
1863 | tasklet_enable(&jme->linkch_task); | 1863 | tasklet_init(&jme->linkch_task, jme_link_change_tasklet, |
1864 | tasklet_enable(&jme->txclean_task); | 1864 | (unsigned long) jme); |
1865 | tasklet_hi_enable(&jme->rxclean_task); | 1865 | tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, |
1866 | tasklet_hi_enable(&jme->rxempty_task); | 1866 | (unsigned long) jme); |
1867 | tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, | ||
1868 | (unsigned long) jme); | ||
1869 | tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, | ||
1870 | (unsigned long) jme); | ||
1867 | 1871 | ||
1868 | rc = jme_request_irq(jme); | 1872 | rc = jme_request_irq(jme); |
1869 | if (rc) | 1873 | if (rc) |
@@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev, | |||
3079 | tasklet_init(&jme->pcc_task, | 3083 | tasklet_init(&jme->pcc_task, |
3080 | jme_pcc_tasklet, | 3084 | jme_pcc_tasklet, |
3081 | (unsigned long) jme); | 3085 | (unsigned long) jme); |
3082 | tasklet_init(&jme->linkch_task, | ||
3083 | jme_link_change_tasklet, | ||
3084 | (unsigned long) jme); | ||
3085 | tasklet_init(&jme->txclean_task, | ||
3086 | jme_tx_clean_tasklet, | ||
3087 | (unsigned long) jme); | ||
3088 | tasklet_init(&jme->rxclean_task, | ||
3089 | jme_rx_clean_tasklet, | ||
3090 | (unsigned long) jme); | ||
3091 | tasklet_init(&jme->rxempty_task, | ||
3092 | jme_rx_empty_tasklet, | ||
3093 | (unsigned long) jme); | ||
3094 | tasklet_disable_nosync(&jme->linkch_task); | ||
3095 | tasklet_disable_nosync(&jme->txclean_task); | ||
3096 | tasklet_disable_nosync(&jme->rxclean_task); | ||
3097 | tasklet_disable_nosync(&jme->rxempty_task); | ||
3098 | jme->dpi.cur = PCC_P1; | 3086 | jme->dpi.cur = PCC_P1; |
3099 | 3087 | ||
3100 | jme->reg_ghc = 0; | 3088 | jme->reg_ghc = 0; |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index e558edd1cb6c..69e01977a1dd 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c | |||
@@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev) | |||
5459 | rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); | 5459 | rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); |
5460 | if (rc) | 5460 | if (rc) |
5461 | return rc; | 5461 | return rc; |
5462 | tasklet_enable(&hw_priv->rx_tasklet); | 5462 | tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, |
5463 | tasklet_enable(&hw_priv->tx_tasklet); | 5463 | (unsigned long) hw_priv); |
5464 | tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, | ||
5465 | (unsigned long) hw_priv); | ||
5464 | 5466 | ||
5465 | hw->promiscuous = 0; | 5467 | hw->promiscuous = 0; |
5466 | hw->all_multi = 0; | 5468 | hw->all_multi = 0; |
@@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev, | |||
7033 | spin_lock_init(&hw_priv->hwlock); | 7035 | spin_lock_init(&hw_priv->hwlock); |
7034 | mutex_init(&hw_priv->lock); | 7036 | mutex_init(&hw_priv->lock); |
7035 | 7037 | ||
7036 | /* tasklet is enabled. */ | ||
7037 | tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, | ||
7038 | (unsigned long) hw_priv); | ||
7039 | tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, | ||
7040 | (unsigned long) hw_priv); | ||
7041 | |||
7042 | /* tasklet_enable will decrement the atomic counter. */ | ||
7043 | tasklet_disable(&hw_priv->rx_tasklet); | ||
7044 | tasklet_disable(&hw_priv->tx_tasklet); | ||
7045 | |||
7046 | for (i = 0; i < TOTAL_PORT_NUM; i++) | 7038 | for (i = 0; i < TOTAL_PORT_NUM; i++) |
7047 | init_waitqueue_head(&hw_priv->counter[i].counter); | 7039 | init_waitqueue_head(&hw_priv->counter[i].counter); |
7048 | 7040 | ||
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 1c818254b7be..b01f83a044c4 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) | |||
979 | cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); | 979 | cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); |
980 | cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); | 980 | cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); |
981 | 981 | ||
982 | cpw32_f(HiTxRingAddr, 0); | ||
983 | cpw32_f(HiTxRingAddr + 4, 0); | ||
984 | |||
985 | ring_dma = cp->ring_dma; | ||
986 | cpw32_f(RxRingAddr, ring_dma & 0xffffffff); | ||
987 | cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
988 | |||
989 | ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; | ||
990 | cpw32_f(TxRingAddr, ring_dma & 0xffffffff); | ||
991 | cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
992 | |||
993 | cp_start_hw(cp); | 982 | cp_start_hw(cp); |
994 | cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ | 983 | cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ |
995 | 984 | ||
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) | |||
1003 | 992 | ||
1004 | cpw8(Config5, cpr8(Config5) & PMEStatus); | 993 | cpw8(Config5, cpr8(Config5) & PMEStatus); |
1005 | 994 | ||
995 | cpw32_f(HiTxRingAddr, 0); | ||
996 | cpw32_f(HiTxRingAddr + 4, 0); | ||
997 | |||
998 | ring_dma = cp->ring_dma; | ||
999 | cpw32_f(RxRingAddr, ring_dma & 0xffffffff); | ||
1000 | cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
1001 | |||
1002 | ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; | ||
1003 | cpw32_f(TxRingAddr, ring_dma & 0xffffffff); | ||
1004 | cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
1005 | |||
1006 | cpw16(MultiIntr, 0); | 1006 | cpw16(MultiIntr, 0); |
1007 | 1007 | ||
1008 | cpw8_f(Cfg9346, Cfg9346_Lock); | 1008 | cpw8_f(Cfg9346, Cfg9346_Lock); |
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fb9f6b38511f..edf5edb13140 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c | |||
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev) | |||
2479 | netif_start_queue(net_dev); | 2479 | netif_start_queue(net_dev); |
2480 | 2480 | ||
2481 | /* Workaround for EDB */ | 2481 | /* Workaround for EDB */ |
2482 | sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); | 2482 | sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); |
2483 | 2483 | ||
2484 | /* Enable all known interrupts by setting the interrupt mask. */ | 2484 | /* Enable all known interrupts by setting the interrupt mask. */ |
2485 | sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); | 2485 | sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 62d1baf111ea..c53c0f4e2ce3 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev) | |||
2110 | static int __devinit smsc911x_init(struct net_device *dev) | 2110 | static int __devinit smsc911x_init(struct net_device *dev) |
2111 | { | 2111 | { |
2112 | struct smsc911x_data *pdata = netdev_priv(dev); | 2112 | struct smsc911x_data *pdata = netdev_priv(dev); |
2113 | unsigned int byte_test; | 2113 | unsigned int byte_test, mask; |
2114 | unsigned int to = 100; | 2114 | unsigned int to = 100; |
2115 | 2115 | ||
2116 | SMSC_TRACE(pdata, probe, "Driver Parameters:"); | 2116 | SMSC_TRACE(pdata, probe, "Driver Parameters:"); |
@@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
2130 | /* | 2130 | /* |
2131 | * poll the READY bit in PMT_CTRL. Any other access to the device is | 2131 | * poll the READY bit in PMT_CTRL. Any other access to the device is |
2132 | * forbidden while this bit isn't set. Try for 100ms | 2132 | * forbidden while this bit isn't set. Try for 100ms |
2133 | * | ||
2134 | * Note that this test is done before the WORD_SWAP register is | ||
2135 | * programmed. So in some configurations the READY bit is at 16 before | ||
2136 | * WORD_SWAP is written to. This issue is worked around by waiting | ||
2137 | * until either bit 0 or bit 16 gets set in PMT_CTRL. | ||
2138 | * | ||
2139 | * SMSC has confirmed that checking bit 16 (marked as reserved in | ||
2140 | * the datasheet) is fine since these bits "will either never be set | ||
2141 | * or can only go high after READY does (so also indicate the device | ||
2142 | * is ready)". | ||
2133 | */ | 2143 | */ |
2134 | while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) | 2144 | |
2145 | mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_); | ||
2146 | while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to) | ||
2135 | udelay(1000); | 2147 | udelay(1000); |
2148 | |||
2136 | if (to == 0) { | 2149 | if (to == 0) { |
2137 | pr_err("Device not READY in 100ms aborting\n"); | 2150 | pr_err("Device not READY in 100ms aborting\n"); |
2138 | return -ENODEV; | 2151 | return -ENODEV; |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 4e9810013850..66e025ad5df1 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) | |||
917 | ingress_irq = rc; | 917 | ingress_irq = rc; |
918 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | 918 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); |
919 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | 919 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, |
920 | 0, NULL, NULL); | 920 | 0, "tile_net", NULL); |
921 | if (rc != 0) { | 921 | if (rc != 0) { |
922 | netdev_err(dev, "request_irq failed: %d\n", rc); | 922 | netdev_err(dev, "request_irq failed: %d\n", rc); |
923 | destroy_irq(ingress_irq); | 923 | destroy_irq(ingress_irq); |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 1d04754a6637..a788501e978e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -894,6 +894,8 @@ out: | |||
894 | return IRQ_HANDLED; | 894 | return IRQ_HANDLED; |
895 | } | 895 | } |
896 | 896 | ||
897 | static void axienet_dma_err_handler(unsigned long data); | ||
898 | |||
897 | /** | 899 | /** |
898 | * axienet_open - Driver open routine. | 900 | * axienet_open - Driver open routine. |
899 | * @ndev: Pointer to net_device structure | 901 | * @ndev: Pointer to net_device structure |
@@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev) | |||
942 | phy_start(lp->phy_dev); | 944 | phy_start(lp->phy_dev); |
943 | } | 945 | } |
944 | 946 | ||
947 | /* Enable tasklets for Axi DMA error handling */ | ||
948 | tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, | ||
949 | (unsigned long) lp); | ||
950 | |||
945 | /* Enable interrupts for Axi DMA Tx */ | 951 | /* Enable interrupts for Axi DMA Tx */ |
946 | ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); | 952 | ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); |
947 | if (ret) | 953 | if (ret) |
@@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev) | |||
950 | ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); | 956 | ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); |
951 | if (ret) | 957 | if (ret) |
952 | goto err_rx_irq; | 958 | goto err_rx_irq; |
953 | /* Enable tasklets for Axi DMA error handling */ | 959 | |
954 | tasklet_enable(&lp->dma_err_tasklet); | ||
955 | return 0; | 960 | return 0; |
956 | 961 | ||
957 | err_rx_irq: | 962 | err_rx_irq: |
@@ -960,6 +965,7 @@ err_tx_irq: | |||
960 | if (lp->phy_dev) | 965 | if (lp->phy_dev) |
961 | phy_disconnect(lp->phy_dev); | 966 | phy_disconnect(lp->phy_dev); |
962 | lp->phy_dev = NULL; | 967 | lp->phy_dev = NULL; |
968 | tasklet_kill(&lp->dma_err_tasklet); | ||
963 | dev_err(lp->dev, "request_irq() failed\n"); | 969 | dev_err(lp->dev, "request_irq() failed\n"); |
964 | return ret; | 970 | return ret; |
965 | } | 971 | } |
@@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op) | |||
1613 | goto err_iounmap_2; | 1619 | goto err_iounmap_2; |
1614 | } | 1620 | } |
1615 | 1621 | ||
1616 | tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, | ||
1617 | (unsigned long) lp); | ||
1618 | tasklet_disable(&lp->dma_err_tasklet); | ||
1619 | |||
1620 | return 0; | 1622 | return 0; |
1621 | 1623 | ||
1622 | err_iounmap_2: | 1624 | err_iounmap_2: |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 98934bdf6acf..477d6729b17f 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port) | |||
1102 | { | 1102 | { |
1103 | int i; | 1103 | int i; |
1104 | 1104 | ||
1105 | if (!ports_open) | 1105 | if (!ports_open) { |
1106 | if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, | 1106 | dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, |
1107 | POOL_ALLOC_SIZE, 32, 0))) | 1107 | POOL_ALLOC_SIZE, 32, 0); |
1108 | if (!dma_pool) | ||
1108 | return -ENOMEM; | 1109 | return -ENOMEM; |
1110 | } | ||
1109 | 1111 | ||
1110 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, | 1112 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, |
1111 | &port->desc_tab_phys))) | 1113 | &port->desc_tab_phys))) |
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 5039f08f5a5b..43e9ab4f4d7e 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c | |||
@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) | |||
222 | break; | 222 | break; |
223 | 223 | ||
224 | case SIRDEV_STATE_DONGLE_SPEED: | 224 | case SIRDEV_STATE_DONGLE_SPEED: |
225 | if (dev->dongle_drv->reset) { | 225 | if (dev->dongle_drv->set_speed) { |
226 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | 226 | ret = dev->dongle_drv->set_speed(dev, fsm->param); |
227 | if (ret < 0) { | 227 | if (ret < 0) { |
228 | fsm->result = ret; | 228 | fsm->result = ret; |
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 6428fcbbdd4b..daec9b05d168 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c | |||
@@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus) | |||
234 | struct mdiobb_ctrl *ctrl = bus->priv; | 234 | struct mdiobb_ctrl *ctrl = bus->priv; |
235 | 235 | ||
236 | module_put(ctrl->ops->owner); | 236 | module_put(ctrl->ops->owner); |
237 | mdiobus_unregister(bus); | ||
238 | mdiobus_free(bus); | 237 | mdiobus_free(bus); |
239 | } | 238 | } |
240 | EXPORT_SYMBOL(free_mdio_bitbang); | 239 | EXPORT_SYMBOL(free_mdio_bitbang); |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 899274f2f9b1..2ed1140df3e9 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) | |||
185 | { | 185 | { |
186 | struct mdio_gpio_platform_data *pdata; | 186 | struct mdio_gpio_platform_data *pdata; |
187 | struct mii_bus *new_bus; | 187 | struct mii_bus *new_bus; |
188 | int ret; | 188 | int ret, bus_id; |
189 | 189 | ||
190 | if (pdev->dev.of_node) | 190 | if (pdev->dev.of_node) { |
191 | pdata = mdio_gpio_of_get_data(pdev); | 191 | pdata = mdio_gpio_of_get_data(pdev); |
192 | else | 192 | bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); |
193 | } else { | ||
193 | pdata = pdev->dev.platform_data; | 194 | pdata = pdev->dev.platform_data; |
195 | bus_id = pdev->id; | ||
196 | } | ||
194 | 197 | ||
195 | if (!pdata) | 198 | if (!pdata) |
196 | return -ENODEV; | 199 | return -ENODEV; |
197 | 200 | ||
198 | new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); | 201 | new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); |
199 | if (!new_bus) | 202 | if (!new_bus) |
200 | return -ENODEV; | 203 | return -ENODEV; |
201 | 204 | ||
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 9db0171e9366..c5db428e73fa 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c | |||
@@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) | |||
29 | if (last) { | 29 | if (last) { |
30 | skb2 = skb_clone(skb, GFP_ATOMIC); | 30 | skb2 = skb_clone(skb, GFP_ATOMIC); |
31 | if (skb2) { | 31 | if (skb2) { |
32 | ret = team_dev_queue_xmit(team, last, | 32 | ret = !team_dev_queue_xmit(team, last, |
33 | skb2); | 33 | skb2); |
34 | if (!sum_ret) | 34 | if (!sum_ret) |
35 | sum_ret = ret; | 35 | sum_ret = ret; |
36 | } | 36 | } |
@@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) | |||
39 | } | 39 | } |
40 | } | 40 | } |
41 | if (last) { | 41 | if (last) { |
42 | ret = team_dev_queue_xmit(team, last, skb); | 42 | ret = !team_dev_queue_xmit(team, last, skb); |
43 | if (!sum_ret) | 43 | if (!sum_ret) |
44 | sum_ret = ret; | 44 | sum_ret = ret; |
45 | } | 45 | } |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 4cd582a4f625..74fab1a40156 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -540,10 +540,12 @@ advance: | |||
540 | (ctx->ether_desc == NULL) || (ctx->control != intf)) | 540 | (ctx->ether_desc == NULL) || (ctx->control != intf)) |
541 | goto error; | 541 | goto error; |
542 | 542 | ||
543 | /* claim interfaces, if any */ | 543 | /* claim data interface, if different from control */ |
544 | temp = usb_driver_claim_interface(driver, ctx->data, dev); | 544 | if (ctx->data != ctx->control) { |
545 | if (temp) | 545 | temp = usb_driver_claim_interface(driver, ctx->data, dev); |
546 | goto error; | 546 | if (temp) |
547 | goto error; | ||
548 | } | ||
547 | 549 | ||
548 | iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; | 550 | iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; |
549 | 551 | ||
@@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) | |||
623 | 625 | ||
624 | tasklet_kill(&ctx->bh); | 626 | tasklet_kill(&ctx->bh); |
625 | 627 | ||
628 | /* handle devices with combined control and data interface */ | ||
629 | if (ctx->control == ctx->data) | ||
630 | ctx->data = NULL; | ||
631 | |||
626 | /* disconnect master --> disconnect slave */ | 632 | /* disconnect master --> disconnect slave */ |
627 | if (intf == ctx->control && ctx->data) { | 633 | if (intf == ctx->control && ctx->data) { |
628 | usb_set_intfdata(ctx->data, NULL); | 634 | usb_set_intfdata(ctx->data, NULL); |
@@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = { | |||
1245 | .driver_info = (unsigned long) &wwan_info, | 1251 | .driver_info = (unsigned long) &wwan_info, |
1246 | }, | 1252 | }, |
1247 | 1253 | ||
1254 | /* Huawei NCM devices disguised as vendor specific */ | ||
1255 | { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), | ||
1256 | .driver_info = (unsigned long)&wwan_info, | ||
1257 | }, | ||
1258 | { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), | ||
1259 | .driver_info = (unsigned long)&wwan_info, | ||
1260 | }, | ||
1261 | |||
1248 | /* Generic CDC-NCM devices */ | 1262 | /* Generic CDC-NCM devices */ |
1249 | { USB_INTERFACE_INFO(USB_CLASS_COMM, | 1263 | { USB_INTERFACE_INFO(USB_CLASS_COMM, |
1250 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), | 1264 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 3286166415b4..362cb8cfeb92 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx) | |||
184 | /* set the address, index & direction (read from PHY) */ | 184 | /* set the address, index & direction (read from PHY) */ |
185 | phy_id &= dev->mii.phy_id_mask; | 185 | phy_id &= dev->mii.phy_id_mask; |
186 | idx &= dev->mii.reg_num_mask; | 186 | idx &= dev->mii.reg_num_mask; |
187 | addr = (phy_id << 11) | (idx << 6) | MII_READ_; | 187 | addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; |
188 | ret = smsc95xx_write_reg(dev, MII_ADDR, addr); | 188 | ret = smsc95xx_write_reg(dev, MII_ADDR, addr); |
189 | check_warn_goto_done(ret, "Error writing MII_ADDR"); | 189 | check_warn_goto_done(ret, "Error writing MII_ADDR"); |
190 | 190 | ||
@@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, | |||
221 | /* set the address, index & direction (write to PHY) */ | 221 | /* set the address, index & direction (write to PHY) */ |
222 | phy_id &= dev->mii.phy_id_mask; | 222 | phy_id &= dev->mii.phy_id_mask; |
223 | idx &= dev->mii.reg_num_mask; | 223 | idx &= dev->mii.reg_num_mask; |
224 | addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; | 224 | addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; |
225 | ret = smsc95xx_write_reg(dev, MII_ADDR, addr); | 225 | ret = smsc95xx_write_reg(dev, MII_ADDR, addr); |
226 | check_warn_goto_done(ret, "Error writing MII_ADDR"); | 226 | check_warn_goto_done(ret, "Error writing MII_ADDR"); |
227 | 227 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7b4adde93c01..8b5c61917076 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * VXLAN: Virtual eXtensiable Local Area Network | 2 | * VXLAN: Virtual eXtensible Local Area Network |
3 | * | 3 | * |
4 | * Copyright (c) 2012 Vyatta Inc. | 4 | * Copyright (c) 2012 Vyatta Inc. |
5 | * | 5 | * |
@@ -50,8 +50,8 @@ | |||
50 | 50 | ||
51 | #define VXLAN_N_VID (1u << 24) | 51 | #define VXLAN_N_VID (1u << 24) |
52 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) | 52 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) |
53 | /* VLAN + IP header + UDP + VXLAN */ | 53 | /* IP header + UDP + VXLAN + Ethernet header */ |
54 | #define VXLAN_HEADROOM (4 + 20 + 8 + 8) | 54 | #define VXLAN_HEADROOM (20 + 8 + 8 + 14) |
55 | 55 | ||
56 | #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ | 56 | #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ |
57 | 57 | ||
@@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
1102 | 1102 | ||
1103 | if (!tb[IFLA_MTU]) | 1103 | if (!tb[IFLA_MTU]) |
1104 | dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; | 1104 | dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; |
1105 | |||
1106 | /* update header length based on lower device */ | ||
1107 | dev->hard_header_len = lowerdev->hard_header_len + | ||
1108 | VXLAN_HEADROOM; | ||
1105 | } | 1109 | } |
1106 | 1110 | ||
1107 | if (data[IFLA_VXLAN_TOS]) | 1111 | if (data[IFLA_VXLAN_TOS]) |
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3f575afd8cfc..e9a3da588e95 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c | |||
@@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port) | |||
969 | { | 969 | { |
970 | int i; | 970 | int i; |
971 | 971 | ||
972 | if (!ports_open) | 972 | if (!ports_open) { |
973 | if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, | 973 | dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, |
974 | POOL_ALLOC_SIZE, 32, 0))) | 974 | POOL_ALLOC_SIZE, 32, 0); |
975 | if (!dma_pool) | ||
975 | return -ENOMEM; | 976 | return -ENOMEM; |
977 | } | ||
976 | 978 | ||
977 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, | 979 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, |
978 | &port->desc_tab_phys))) | 980 | &port->desc_tab_phys))) |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8e1559aba495..1829b445d0b0 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) | |||
1456 | switch (type) { | 1456 | switch (type) { |
1457 | case ATH9K_RESET_POWER_ON: | 1457 | case ATH9K_RESET_POWER_ON: |
1458 | ret = ath9k_hw_set_reset_power_on(ah); | 1458 | ret = ath9k_hw_set_reset_power_on(ah); |
1459 | if (!ret) | 1459 | if (ret) |
1460 | ah->reset_power_on = true; | 1460 | ah->reset_power_on = true; |
1461 | break; | 1461 | break; |
1462 | case ATH9K_RESET_WARM: | 1462 | case ATH9K_RESET_WARM: |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index a6f1e8166008..481345c23ded 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode) | |||
4401 | 4401 | ||
4402 | static void brcmf_wiphy_pno_params(struct wiphy *wiphy) | 4402 | static void brcmf_wiphy_pno_params(struct wiphy *wiphy) |
4403 | { | 4403 | { |
4404 | #ifndef CONFIG_BRCMFISCAN | 4404 | #ifndef CONFIG_BRCMISCAN |
4405 | /* scheduled scan settings */ | 4405 | /* scheduled scan settings */ |
4406 | wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; | 4406 | wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; |
4407 | wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; | 4407 | wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index ff8162d4c454..2d9eee93c743 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, | |||
521 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | 521 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); |
522 | 522 | ||
523 | if (iwlagn_tx_skb(priv, control->sta, skb)) | 523 | if (iwlagn_tx_skb(priv, control->sta, skb)) |
524 | dev_kfree_skb_any(skb); | 524 | ieee80211_free_txskb(hw, skb); |
525 | } | 525 | } |
526 | 526 | ||
527 | static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, | 527 | static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, |
@@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, | |||
1354 | vif_priv->ctx = ctx; | 1354 | vif_priv->ctx = ctx; |
1355 | ctx->vif = vif; | 1355 | ctx->vif = vif; |
1356 | 1356 | ||
1357 | /* | ||
1358 | * In SNIFFER device type, the firmware reports the FCS to | ||
1359 | * the host, rather than snipping it off. Unfortunately, | ||
1360 | * mac80211 doesn't (yet) provide a per-packet flag for | ||
1361 | * this, so that we have to set the hardware flag based | ||
1362 | * on the interfaces added. As the monitor interface can | ||
1363 | * only be present by itself, and will be removed before | ||
1364 | * other interfaces are added, this is safe. | ||
1365 | */ | ||
1366 | if (vif->type == NL80211_IFTYPE_MONITOR) | ||
1367 | priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS; | ||
1368 | else | ||
1369 | priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; | ||
1370 | |||
1357 | err = iwl_setup_interface(priv, ctx); | 1371 | err = iwl_setup_interface(priv, ctx); |
1358 | if (!err || reset) | 1372 | if (!err || reset) |
1359 | goto out; | 1373 | goto out; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 7ff3f1430678..408132cf83c1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c | |||
@@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) | |||
2114 | 2114 | ||
2115 | info = IEEE80211_SKB_CB(skb); | 2115 | info = IEEE80211_SKB_CB(skb); |
2116 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); | 2116 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); |
2117 | dev_kfree_skb_any(skb); | 2117 | ieee80211_free_txskb(priv->hw, skb); |
2118 | } | 2118 | } |
2119 | 2119 | ||
2120 | static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 2120 | static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d82681..bb69f8f90b3b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | |||
321 | dma_map_page(trans->dev, page, 0, | 321 | dma_map_page(trans->dev, page, 0, |
322 | PAGE_SIZE << trans_pcie->rx_page_order, | 322 | PAGE_SIZE << trans_pcie->rx_page_order, |
323 | DMA_FROM_DEVICE); | 323 | DMA_FROM_DEVICE); |
324 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | ||
325 | rxb->page = NULL; | ||
326 | spin_lock_irqsave(&rxq->lock, flags); | ||
327 | list_add(&rxb->list, &rxq->rx_used); | ||
328 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
329 | __free_pages(page, trans_pcie->rx_page_order); | ||
330 | return; | ||
331 | } | ||
324 | /* dma address must be no more than 36 bits */ | 332 | /* dma address must be no more than 36 bits */ |
325 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | 333 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); |
326 | /* and also 256 byte aligned! */ | 334 | /* and also 256 byte aligned! */ |
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
488 | dma_map_page(trans->dev, rxb->page, 0, | 496 | dma_map_page(trans->dev, rxb->page, 0, |
489 | PAGE_SIZE << trans_pcie->rx_page_order, | 497 | PAGE_SIZE << trans_pcie->rx_page_order, |
490 | DMA_FROM_DEVICE); | 498 | DMA_FROM_DEVICE); |
491 | list_add_tail(&rxb->list, &rxq->rx_free); | 499 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
492 | rxq->free_count++; | 500 | /* |
501 | * free the page(s) as well to not break | ||
502 | * the invariant that the items on the used | ||
503 | * list have no page(s) | ||
504 | */ | ||
505 | __free_pages(rxb->page, trans_pcie->rx_page_order); | ||
506 | rxb->page = NULL; | ||
507 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
508 | } else { | ||
509 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
510 | rxq->free_count++; | ||
511 | } | ||
493 | } else | 512 | } else |
494 | list_add_tail(&rxb->list, &rxq->rx_used); | 513 | list_add_tail(&rxb->list, &rxq->rx_used); |
495 | spin_unlock_irqrestore(&rxq->lock, flags); | 514 | spin_unlock_irqrestore(&rxq->lock, flags); |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 105e3af3c621..79a4ddc002d3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
480 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | 480 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) |
481 | { | 481 | { |
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
483 | u16 rd_ptr, wr_ptr; | ||
484 | int n_bd = trans_pcie->txq[txq_id].q.n_bd; | ||
485 | 483 | ||
486 | if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { | 484 | if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { |
487 | WARN_ONCE(1, "queue %d not used", txq_id); | 485 | WARN_ONCE(1, "queue %d not used", txq_id); |
488 | return; | 486 | return; |
489 | } | 487 | } |
490 | 488 | ||
491 | rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); | ||
492 | wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); | ||
493 | |||
494 | WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", | ||
495 | txq_id, rd_ptr, wr_ptr); | ||
496 | |||
497 | iwl_txq_set_inactive(trans, txq_id); | 489 | iwl_txq_set_inactive(trans, txq_id); |
498 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | 490 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
499 | } | 491 | } |
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 8d465107f52b..ae9010ed58de 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
@@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) | |||
890 | return; | 890 | return; |
891 | } | 891 | } |
892 | cmd_node = adapter->curr_cmd; | 892 | cmd_node = adapter->curr_cmd; |
893 | if (cmd_node->wait_q_enabled) | ||
894 | adapter->cmd_wait_q.status = -ETIMEDOUT; | ||
895 | |||
896 | if (cmd_node) { | 893 | if (cmd_node) { |
897 | adapter->dbg.timeout_cmd_id = | 894 | adapter->dbg.timeout_cmd_id = |
898 | adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; | 895 | adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; |
@@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) | |||
938 | 935 | ||
939 | dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", | 936 | dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", |
940 | adapter->ps_mode, adapter->ps_state); | 937 | adapter->ps_mode, adapter->ps_state); |
938 | |||
939 | if (cmd_node->wait_q_enabled) { | ||
940 | adapter->cmd_wait_q.status = -ETIMEDOUT; | ||
941 | wake_up_interruptible(&adapter->cmd_wait_q.wait); | ||
942 | mwifiex_cancel_pending_ioctl(adapter); | ||
943 | /* reset cmd_sent flag to unblock new commands */ | ||
944 | adapter->cmd_sent = false; | ||
945 | } | ||
941 | } | 946 | } |
942 | if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) | 947 | if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) |
943 | mwifiex_init_fw_complete(adapter); | 948 | mwifiex_init_fw_complete(adapter); |
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index fc8a9bfa1248..82cf0fa2d9f6 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c | |||
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev) | |||
161 | struct sdio_mmc_card *card; | 161 | struct sdio_mmc_card *card; |
162 | struct mwifiex_adapter *adapter; | 162 | struct mwifiex_adapter *adapter; |
163 | mmc_pm_flag_t pm_flag = 0; | 163 | mmc_pm_flag_t pm_flag = 0; |
164 | int hs_actived = 0; | ||
165 | int i; | 164 | int i; |
166 | int ret = 0; | 165 | int ret = 0; |
167 | 166 | ||
@@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev) | |||
188 | adapter = card->adapter; | 187 | adapter = card->adapter; |
189 | 188 | ||
190 | /* Enable the Host Sleep */ | 189 | /* Enable the Host Sleep */ |
191 | hs_actived = mwifiex_enable_hs(adapter); | 190 | if (!mwifiex_enable_hs(adapter)) { |
192 | if (hs_actived) { | 191 | dev_err(adapter->dev, "cmd: failed to suspend\n"); |
193 | pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); | 192 | return -EFAULT; |
194 | ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); | ||
195 | } | 193 | } |
196 | 194 | ||
195 | dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); | ||
196 | ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); | ||
197 | |||
197 | /* Indicate device suspended */ | 198 | /* Indicate device suspended */ |
198 | adapter->is_suspended = true; | 199 | adapter->is_suspended = true; |
199 | 200 | ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 9970c2b1b199..b7e6607e6b6d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
297 | /*=== Customer ID ===*/ | 297 | /*=== Customer ID ===*/ |
298 | /****** 8188CU ********/ | 298 | /****** 8188CU ********/ |
299 | {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ | 299 | {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ |
300 | {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ | ||
300 | {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ | 301 | {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ |
301 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ | 302 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ |
302 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ | 303 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index caa011008cd0..fc24eb9b3948 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | |||
452 | /* Grant backend access to each skb fragment page. */ | 452 | /* Grant backend access to each skb fragment page. */ |
453 | for (i = 0; i < frags; i++) { | 453 | for (i = 0; i < frags; i++) { |
454 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | 454 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
455 | struct page *page = skb_frag_page(frag); | ||
455 | 456 | ||
456 | tx->flags |= XEN_NETTXF_more_data; | 457 | len = skb_frag_size(frag); |
458 | offset = frag->page_offset; | ||
457 | 459 | ||
458 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | 460 | /* Data must not cross a page boundary. */ |
459 | np->tx_skbs[id].skb = skb_get(skb); | 461 | BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); |
460 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
461 | tx->id = id; | ||
462 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
463 | BUG_ON((signed short)ref < 0); | ||
464 | 462 | ||
465 | mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); | 463 | /* Skip unused frames from start of page */ |
466 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | 464 | page += offset >> PAGE_SHIFT; |
467 | mfn, GNTMAP_readonly); | 465 | offset &= ~PAGE_MASK; |
468 | 466 | ||
469 | tx->gref = np->grant_tx_ref[id] = ref; | 467 | while (len > 0) { |
470 | tx->offset = frag->page_offset; | 468 | unsigned long bytes; |
471 | tx->size = skb_frag_size(frag); | 469 | |
472 | tx->flags = 0; | 470 | BUG_ON(offset >= PAGE_SIZE); |
471 | |||
472 | bytes = PAGE_SIZE - offset; | ||
473 | if (bytes > len) | ||
474 | bytes = len; | ||
475 | |||
476 | tx->flags |= XEN_NETTXF_more_data; | ||
477 | |||
478 | id = get_id_from_freelist(&np->tx_skb_freelist, | ||
479 | np->tx_skbs); | ||
480 | np->tx_skbs[id].skb = skb_get(skb); | ||
481 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
482 | tx->id = id; | ||
483 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
484 | BUG_ON((signed short)ref < 0); | ||
485 | |||
486 | mfn = pfn_to_mfn(page_to_pfn(page)); | ||
487 | gnttab_grant_foreign_access_ref(ref, | ||
488 | np->xbdev->otherend_id, | ||
489 | mfn, GNTMAP_readonly); | ||
490 | |||
491 | tx->gref = np->grant_tx_ref[id] = ref; | ||
492 | tx->offset = offset; | ||
493 | tx->size = bytes; | ||
494 | tx->flags = 0; | ||
495 | |||
496 | offset += bytes; | ||
497 | len -= bytes; | ||
498 | |||
499 | /* Next frame */ | ||
500 | if (offset == PAGE_SIZE && len) { | ||
501 | BUG_ON(!PageCompound(page)); | ||
502 | page++; | ||
503 | offset = 0; | ||
504 | } | ||
505 | } | ||
473 | } | 506 | } |
474 | 507 | ||
475 | np->tx.req_prod_pvt = prod; | 508 | np->tx.req_prod_pvt = prod; |
476 | } | 509 | } |
477 | 510 | ||
511 | /* | ||
512 | * Count how many ring slots are required to send the frags of this | ||
513 | * skb. Each frag might be a compound page. | ||
514 | */ | ||
515 | static int xennet_count_skb_frag_slots(struct sk_buff *skb) | ||
516 | { | ||
517 | int i, frags = skb_shinfo(skb)->nr_frags; | ||
518 | int pages = 0; | ||
519 | |||
520 | for (i = 0; i < frags; i++) { | ||
521 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | ||
522 | unsigned long size = skb_frag_size(frag); | ||
523 | unsigned long offset = frag->page_offset; | ||
524 | |||
525 | /* Skip unused frames from start of page */ | ||
526 | offset &= ~PAGE_MASK; | ||
527 | |||
528 | pages += PFN_UP(offset + size); | ||
529 | } | ||
530 | |||
531 | return pages; | ||
532 | } | ||
533 | |||
478 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 534 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
479 | { | 535 | { |
480 | unsigned short id; | 536 | unsigned short id; |
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
487 | grant_ref_t ref; | 543 | grant_ref_t ref; |
488 | unsigned long mfn; | 544 | unsigned long mfn; |
489 | int notify; | 545 | int notify; |
490 | int frags = skb_shinfo(skb)->nr_frags; | 546 | int slots; |
491 | unsigned int offset = offset_in_page(data); | 547 | unsigned int offset = offset_in_page(data); |
492 | unsigned int len = skb_headlen(skb); | 548 | unsigned int len = skb_headlen(skb); |
493 | unsigned long flags; | 549 | unsigned long flags; |
494 | 550 | ||
495 | frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); | 551 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + |
496 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | 552 | xennet_count_skb_frag_slots(skb); |
497 | printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", | 553 | if (unlikely(slots > MAX_SKB_FRAGS + 1)) { |
498 | frags); | 554 | net_alert_ratelimited( |
499 | dump_stack(); | 555 | "xennet: skb rides the rocket: %d slots\n", slots); |
500 | goto drop; | 556 | goto drop; |
501 | } | 557 | } |
502 | 558 | ||
503 | spin_lock_irqsave(&np->tx_lock, flags); | 559 | spin_lock_irqsave(&np->tx_lock, flags); |
504 | 560 | ||
505 | if (unlikely(!netif_carrier_ok(dev) || | 561 | if (unlikely(!netif_carrier_ok(dev) || |
506 | (frags > 1 && !xennet_can_sg(dev)) || | 562 | (slots > 1 && !xennet_can_sg(dev)) || |
507 | netif_needs_gso(skb, netif_skb_features(skb)))) { | 563 | netif_needs_gso(skb, netif_skb_features(skb)))) { |
508 | spin_unlock_irqrestore(&np->tx_lock, flags); | 564 | spin_unlock_irqrestore(&np->tx_lock, flags); |
509 | goto drop; | 565 | goto drop; |
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 97c440a8cd61..30ae18a03a9c 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c | |||
@@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work) | |||
698 | 698 | ||
699 | cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); | 699 | cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); |
700 | 700 | ||
701 | list_del(&cmd->queue); | ||
702 | |||
701 | mutex_unlock(&dev->cmd_lock); | 703 | mutex_unlock(&dev->cmd_lock); |
702 | 704 | ||
703 | __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame, | 705 | __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame, |
704 | cmd->in_frame_len, cmd->cmd_complete, | 706 | cmd->in_frame_len, cmd->cmd_complete, |
705 | cmd->arg, cmd->flags); | 707 | cmd->arg, cmd->flags); |
706 | 708 | ||
707 | list_del(&cmd->queue); | ||
708 | kfree(cmd); | 709 | kfree(cmd); |
709 | } | 710 | } |
710 | 711 | ||
@@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, | |||
1678 | static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, | 1679 | static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, |
1679 | u8 *params, int params_len) | 1680 | u8 *params, int params_len) |
1680 | { | 1681 | { |
1681 | struct pn533_cmd_jump_dep *cmd; | ||
1682 | struct pn533_cmd_jump_dep_response *resp; | 1682 | struct pn533_cmd_jump_dep_response *resp; |
1683 | struct nfc_target nfc_target; | 1683 | struct nfc_target nfc_target; |
1684 | u8 target_gt_len; | 1684 | u8 target_gt_len; |
1685 | int rc; | 1685 | int rc; |
1686 | struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg; | ||
1687 | u8 active = cmd->active; | ||
1688 | |||
1689 | kfree(arg); | ||
1686 | 1690 | ||
1687 | if (params_len == -ENOENT) { | 1691 | if (params_len == -ENOENT) { |
1688 | nfc_dev_dbg(&dev->interface->dev, ""); | 1692 | nfc_dev_dbg(&dev->interface->dev, ""); |
@@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, | |||
1704 | } | 1708 | } |
1705 | 1709 | ||
1706 | resp = (struct pn533_cmd_jump_dep_response *) params; | 1710 | resp = (struct pn533_cmd_jump_dep_response *) params; |
1707 | cmd = (struct pn533_cmd_jump_dep *) arg; | ||
1708 | rc = resp->status & PN533_CMD_RET_MASK; | 1711 | rc = resp->status & PN533_CMD_RET_MASK; |
1709 | if (rc != PN533_CMD_RET_SUCCESS) { | 1712 | if (rc != PN533_CMD_RET_SUCCESS) { |
1710 | nfc_dev_err(&dev->interface->dev, | 1713 | nfc_dev_err(&dev->interface->dev, |
@@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, | |||
1734 | if (rc == 0) | 1737 | if (rc == 0) |
1735 | rc = nfc_dep_link_is_up(dev->nfc_dev, | 1738 | rc = nfc_dep_link_is_up(dev->nfc_dev, |
1736 | dev->nfc_dev->targets[0].idx, | 1739 | dev->nfc_dev->targets[0].idx, |
1737 | !cmd->active, NFC_RF_INITIATOR); | 1740 | !active, NFC_RF_INITIATOR); |
1738 | 1741 | ||
1739 | return 0; | 1742 | return 0; |
1740 | } | 1743 | } |
@@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
1819 | rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, | 1822 | rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, |
1820 | dev->in_maxlen, pn533_in_dep_link_up_complete, | 1823 | dev->in_maxlen, pn533_in_dep_link_up_complete, |
1821 | cmd, GFP_KERNEL); | 1824 | cmd, GFP_KERNEL); |
1822 | if (rc) | 1825 | if (rc < 0) |
1823 | goto out; | 1826 | kfree(cmd); |
1824 | |||
1825 | |||
1826 | out: | ||
1827 | kfree(cmd); | ||
1828 | 1827 | ||
1829 | return rc; | 1828 | return rc; |
1830 | } | 1829 | } |
@@ -2078,8 +2077,12 @@ error: | |||
2078 | static int pn533_tm_send_complete(struct pn533 *dev, void *arg, | 2077 | static int pn533_tm_send_complete(struct pn533 *dev, void *arg, |
2079 | u8 *params, int params_len) | 2078 | u8 *params, int params_len) |
2080 | { | 2079 | { |
2080 | struct sk_buff *skb_out = arg; | ||
2081 | |||
2081 | nfc_dev_dbg(&dev->interface->dev, "%s", __func__); | 2082 | nfc_dev_dbg(&dev->interface->dev, "%s", __func__); |
2082 | 2083 | ||
2084 | dev_kfree_skb(skb_out); | ||
2085 | |||
2083 | if (params_len < 0) { | 2086 | if (params_len < 0) { |
2084 | nfc_dev_err(&dev->interface->dev, | 2087 | nfc_dev_err(&dev->interface->dev, |
2085 | "Error %d when sending data", | 2088 | "Error %d when sending data", |
@@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) | |||
2117 | 2120 | ||
2118 | rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame, | 2121 | rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame, |
2119 | dev->in_maxlen, pn533_tm_send_complete, | 2122 | dev->in_maxlen, pn533_tm_send_complete, |
2120 | NULL, GFP_KERNEL); | 2123 | skb, GFP_KERNEL); |
2121 | if (rc) { | 2124 | if (rc) { |
2122 | nfc_dev_err(&dev->interface->dev, | 2125 | nfc_dev_err(&dev->interface->dev, |
2123 | "Error %d when trying to send data", rc); | 2126 | "Error %d when trying to send data", rc); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index c5792d622dc4..1af4008182fd 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -17,10 +17,9 @@ | |||
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
20 | #include <linux/pm_qos.h> | ||
20 | #include "pci.h" | 21 | #include "pci.h" |
21 | 22 | ||
22 | static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); | ||
23 | |||
24 | /** | 23 | /** |
25 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | 24 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. |
26 | * @handle: ACPI handle of a device the notification is for. | 25 | * @handle: ACPI handle of a device the notification is for. |
@@ -68,67 +67,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
68 | } | 67 | } |
69 | 68 | ||
70 | /** | 69 | /** |
71 | * add_pm_notifier - Register PM notifier for given ACPI device. | ||
72 | * @dev: ACPI device to add the notifier for. | ||
73 | * @context: PCI device or bus to check for PME status if an event is signaled. | ||
74 | * | ||
75 | * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of | ||
76 | * PM wake-up events. For example, wake-up events may be generated for bridges | ||
77 | * if one of the devices below the bridge is signaling PME, even if the bridge | ||
78 | * itself doesn't have a wake-up GPE associated with it. | ||
79 | */ | ||
80 | static acpi_status add_pm_notifier(struct acpi_device *dev, | ||
81 | acpi_notify_handler handler, | ||
82 | void *context) | ||
83 | { | ||
84 | acpi_status status = AE_ALREADY_EXISTS; | ||
85 | |||
86 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
87 | |||
88 | if (dev->wakeup.flags.notifier_present) | ||
89 | goto out; | ||
90 | |||
91 | status = acpi_install_notify_handler(dev->handle, | ||
92 | ACPI_SYSTEM_NOTIFY, | ||
93 | handler, context); | ||
94 | if (ACPI_FAILURE(status)) | ||
95 | goto out; | ||
96 | |||
97 | dev->wakeup.flags.notifier_present = true; | ||
98 | |||
99 | out: | ||
100 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
101 | return status; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
106 | * @dev: ACPI device to remove the notifier from. | ||
107 | */ | ||
108 | static acpi_status remove_pm_notifier(struct acpi_device *dev, | ||
109 | acpi_notify_handler handler) | ||
110 | { | ||
111 | acpi_status status = AE_BAD_PARAMETER; | ||
112 | |||
113 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
114 | |||
115 | if (!dev->wakeup.flags.notifier_present) | ||
116 | goto out; | ||
117 | |||
118 | status = acpi_remove_notify_handler(dev->handle, | ||
119 | ACPI_SYSTEM_NOTIFY, | ||
120 | handler); | ||
121 | if (ACPI_FAILURE(status)) | ||
122 | goto out; | ||
123 | |||
124 | dev->wakeup.flags.notifier_present = false; | ||
125 | |||
126 | out: | ||
127 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
128 | return status; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | 70 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. |
133 | * @dev: ACPI device to add the notifier for. | 71 | * @dev: ACPI device to add the notifier for. |
134 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | 72 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. |
@@ -136,7 +74,7 @@ static acpi_status remove_pm_notifier(struct acpi_device *dev, | |||
136 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | 74 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, |
137 | struct pci_bus *pci_bus) | 75 | struct pci_bus *pci_bus) |
138 | { | 76 | { |
139 | return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | 77 | return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); |
140 | } | 78 | } |
141 | 79 | ||
142 | /** | 80 | /** |
@@ -145,7 +83,7 @@ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | |||
145 | */ | 83 | */ |
146 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | 84 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) |
147 | { | 85 | { |
148 | return remove_pm_notifier(dev, pci_acpi_wake_bus); | 86 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); |
149 | } | 87 | } |
150 | 88 | ||
151 | /** | 89 | /** |
@@ -156,7 +94,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | |||
156 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | 94 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, |
157 | struct pci_dev *pci_dev) | 95 | struct pci_dev *pci_dev) |
158 | { | 96 | { |
159 | return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | 97 | return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); |
160 | } | 98 | } |
161 | 99 | ||
162 | /** | 100 | /** |
@@ -165,7 +103,7 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | |||
165 | */ | 103 | */ |
166 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | 104 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) |
167 | { | 105 | { |
168 | return remove_pm_notifier(dev, pci_acpi_wake_dev); | 106 | return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); |
169 | } | 107 | } |
170 | 108 | ||
171 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) | 109 | phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) |
@@ -257,11 +195,16 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
257 | return -ENODEV; | 195 | return -ENODEV; |
258 | 196 | ||
259 | switch (state) { | 197 | switch (state) { |
198 | case PCI_D3cold: | ||
199 | if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == | ||
200 | PM_QOS_FLAGS_ALL) { | ||
201 | error = -EBUSY; | ||
202 | break; | ||
203 | } | ||
260 | case PCI_D0: | 204 | case PCI_D0: |
261 | case PCI_D1: | 205 | case PCI_D1: |
262 | case PCI_D2: | 206 | case PCI_D2: |
263 | case PCI_D3hot: | 207 | case PCI_D3hot: |
264 | case PCI_D3cold: | ||
265 | error = acpi_bus_set_power(handle, state_conv[state]); | 208 | error = acpi_bus_set_power(handle, state_conv[state]); |
266 | } | 209 | } |
267 | 210 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index d96caefd914a..aeecf0f72cad 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
@@ -178,7 +178,7 @@ config PINCTRL_COH901 | |||
178 | ports of 8 GPIO pins each. | 178 | ports of 8 GPIO pins each. |
179 | 179 | ||
180 | config PINCTRL_SAMSUNG | 180 | config PINCTRL_SAMSUNG |
181 | bool "Samsung pinctrl driver" | 181 | bool |
182 | depends on OF && GPIOLIB | 182 | depends on OF && GPIOLIB |
183 | select PINMUX | 183 | select PINMUX |
184 | select PINCONF | 184 | select PINCONF |
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index fa4e0a5db3f8..ffd53e3eb92f 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h | |||
@@ -159,6 +159,8 @@ struct pnp_resource { | |||
159 | 159 | ||
160 | void pnp_free_resource(struct pnp_resource *pnp_res); | 160 | void pnp_free_resource(struct pnp_resource *pnp_res); |
161 | 161 | ||
162 | struct pnp_resource *pnp_add_resource(struct pnp_dev *dev, | ||
163 | struct resource *res); | ||
162 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, | 164 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, |
163 | int flags); | 165 | int flags); |
164 | struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, | 166 | struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 767f526209e8..72e822e17d47 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -245,6 +245,10 @@ static int __init pnpacpi_add_device(struct acpi_device *device) | |||
245 | char *pnpid; | 245 | char *pnpid; |
246 | struct acpi_hardware_id *id; | 246 | struct acpi_hardware_id *id; |
247 | 247 | ||
248 | /* Skip devices that are already bound */ | ||
249 | if (device->physical_node_count) | ||
250 | return 0; | ||
251 | |||
248 | /* | 252 | /* |
249 | * If a PnPacpi device is not present , the device | 253 | * If a PnPacpi device is not present , the device |
250 | * driver should not be loaded. | 254 | * driver should not be loaded. |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 5be4a392a3ae..b8f4ea7b27fc 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -28,37 +28,6 @@ | |||
28 | #include "../base.h" | 28 | #include "../base.h" |
29 | #include "pnpacpi.h" | 29 | #include "pnpacpi.h" |
30 | 30 | ||
31 | #ifdef CONFIG_IA64 | ||
32 | #define valid_IRQ(i) (1) | ||
33 | #else | ||
34 | #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * Allocated Resources | ||
39 | */ | ||
40 | static int irq_flags(int triggering, int polarity, int shareable) | ||
41 | { | ||
42 | int flags; | ||
43 | |||
44 | if (triggering == ACPI_LEVEL_SENSITIVE) { | ||
45 | if (polarity == ACPI_ACTIVE_LOW) | ||
46 | flags = IORESOURCE_IRQ_LOWLEVEL; | ||
47 | else | ||
48 | flags = IORESOURCE_IRQ_HIGHLEVEL; | ||
49 | } else { | ||
50 | if (polarity == ACPI_ACTIVE_LOW) | ||
51 | flags = IORESOURCE_IRQ_LOWEDGE; | ||
52 | else | ||
53 | flags = IORESOURCE_IRQ_HIGHEDGE; | ||
54 | } | ||
55 | |||
56 | if (shareable == ACPI_SHARED) | ||
57 | flags |= IORESOURCE_IRQ_SHAREABLE; | ||
58 | |||
59 | return flags; | ||
60 | } | ||
61 | |||
62 | static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, | 31 | static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, |
63 | int *polarity, int *shareable) | 32 | int *polarity, int *shareable) |
64 | { | 33 | { |
@@ -94,45 +63,6 @@ static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, | |||
94 | *shareable = ACPI_EXCLUSIVE; | 63 | *shareable = ACPI_EXCLUSIVE; |
95 | } | 64 | } |
96 | 65 | ||
97 | static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, | ||
98 | u32 gsi, int triggering, | ||
99 | int polarity, int shareable) | ||
100 | { | ||
101 | int irq, flags; | ||
102 | int p, t; | ||
103 | |||
104 | if (!valid_IRQ(gsi)) { | ||
105 | pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED); | ||
106 | return; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * in IO-APIC mode, use overrided attribute. Two reasons: | ||
111 | * 1. BIOS bug in DSDT | ||
112 | * 2. BIOS uses IO-APIC mode Interrupt Source Override | ||
113 | */ | ||
114 | if (!acpi_get_override_irq(gsi, &t, &p)) { | ||
115 | t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | ||
116 | p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | ||
117 | |||
118 | if (triggering != t || polarity != p) { | ||
119 | dev_warn(&dev->dev, "IRQ %d override to %s, %s\n", | ||
120 | gsi, t ? "edge":"level", p ? "low":"high"); | ||
121 | triggering = t; | ||
122 | polarity = p; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | flags = irq_flags(triggering, polarity, shareable); | ||
127 | irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); | ||
128 | if (irq >= 0) | ||
129 | pcibios_penalize_isa_irq(irq, 1); | ||
130 | else | ||
131 | flags |= IORESOURCE_DISABLED; | ||
132 | |||
133 | pnp_add_irq_resource(dev, irq, flags); | ||
134 | } | ||
135 | |||
136 | static int dma_flags(struct pnp_dev *dev, int type, int bus_master, | 66 | static int dma_flags(struct pnp_dev *dev, int type, int bus_master, |
137 | int transfer) | 67 | int transfer) |
138 | { | 68 | { |
@@ -177,21 +107,16 @@ static int dma_flags(struct pnp_dev *dev, int type, int bus_master, | |||
177 | return flags; | 107 | return flags; |
178 | } | 108 | } |
179 | 109 | ||
180 | static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start, | 110 | /* |
181 | u64 len, int io_decode, | 111 | * Allocated Resources |
182 | int window) | 112 | */ |
183 | { | ||
184 | int flags = 0; | ||
185 | u64 end = start + len - 1; | ||
186 | 113 | ||
187 | if (io_decode == ACPI_DECODE_16) | 114 | static void pnpacpi_add_irqresource(struct pnp_dev *dev, struct resource *r) |
188 | flags |= IORESOURCE_IO_16BIT_ADDR; | 115 | { |
189 | if (len == 0 || end >= 0x10003) | 116 | if (!(r->flags & IORESOURCE_DISABLED)) |
190 | flags |= IORESOURCE_DISABLED; | 117 | pcibios_penalize_isa_irq(r->start, 1); |
191 | if (window) | ||
192 | flags |= IORESOURCE_WINDOW; | ||
193 | 118 | ||
194 | pnp_add_io_resource(dev, start, end, flags); | 119 | pnp_add_resource(dev, r); |
195 | } | 120 | } |
196 | 121 | ||
197 | /* | 122 | /* |
@@ -249,130 +174,49 @@ static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev, | |||
249 | } | 174 | } |
250 | } | 175 | } |
251 | 176 | ||
252 | static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, | ||
253 | u64 start, u64 len, | ||
254 | int write_protect, int window) | ||
255 | { | ||
256 | int flags = 0; | ||
257 | u64 end = start + len - 1; | ||
258 | |||
259 | if (len == 0) | ||
260 | flags |= IORESOURCE_DISABLED; | ||
261 | if (write_protect == ACPI_READ_WRITE_MEMORY) | ||
262 | flags |= IORESOURCE_MEM_WRITEABLE; | ||
263 | if (window) | ||
264 | flags |= IORESOURCE_WINDOW; | ||
265 | |||
266 | pnp_add_mem_resource(dev, start, end, flags); | ||
267 | } | ||
268 | |||
269 | static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, | ||
270 | u64 start, u64 len) | ||
271 | { | ||
272 | u64 end = start + len - 1; | ||
273 | |||
274 | pnp_add_bus_resource(dev, start, end); | ||
275 | } | ||
276 | |||
277 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | ||
278 | struct acpi_resource *res) | ||
279 | { | ||
280 | struct acpi_resource_address64 addr, *p = &addr; | ||
281 | acpi_status status; | ||
282 | int window; | ||
283 | u64 len; | ||
284 | |||
285 | status = acpi_resource_to_address64(res, p); | ||
286 | if (!ACPI_SUCCESS(status)) { | ||
287 | dev_warn(&dev->dev, "failed to convert resource type %d\n", | ||
288 | res->type); | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | /* Windows apparently computes length rather than using _LEN */ | ||
293 | len = p->maximum - p->minimum + 1; | ||
294 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | ||
295 | |||
296 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
297 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, | ||
298 | p->info.mem.write_protect, window); | ||
299 | else if (p->resource_type == ACPI_IO_RANGE) | ||
300 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, | ||
301 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
302 | ACPI_DECODE_16, window); | ||
303 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | ||
304 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); | ||
305 | } | ||
306 | |||
307 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | ||
308 | struct acpi_resource *res) | ||
309 | { | ||
310 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; | ||
311 | int window; | ||
312 | u64 len; | ||
313 | |||
314 | /* Windows apparently computes length rather than using _LEN */ | ||
315 | len = p->maximum - p->minimum + 1; | ||
316 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | ||
317 | |||
318 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
319 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, | ||
320 | p->info.mem.write_protect, window); | ||
321 | else if (p->resource_type == ACPI_IO_RANGE) | ||
322 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, | ||
323 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
324 | ACPI_DECODE_16, window); | ||
325 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | ||
326 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); | ||
327 | } | ||
328 | |||
329 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | 177 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, |
330 | void *data) | 178 | void *data) |
331 | { | 179 | { |
332 | struct pnp_dev *dev = data; | 180 | struct pnp_dev *dev = data; |
333 | struct acpi_resource_irq *irq; | ||
334 | struct acpi_resource_dma *dma; | 181 | struct acpi_resource_dma *dma; |
335 | struct acpi_resource_io *io; | ||
336 | struct acpi_resource_fixed_io *fixed_io; | ||
337 | struct acpi_resource_vendor_typed *vendor_typed; | 182 | struct acpi_resource_vendor_typed *vendor_typed; |
338 | struct acpi_resource_memory24 *memory24; | 183 | struct resource r; |
339 | struct acpi_resource_memory32 *memory32; | ||
340 | struct acpi_resource_fixed_memory32 *fixed_memory32; | ||
341 | struct acpi_resource_extended_irq *extended_irq; | ||
342 | int i, flags; | 184 | int i, flags; |
343 | 185 | ||
344 | switch (res->type) { | 186 | if (acpi_dev_resource_memory(res, &r) |
345 | case ACPI_RESOURCE_TYPE_IRQ: | 187 | || acpi_dev_resource_io(res, &r) |
346 | /* | 188 | || acpi_dev_resource_address_space(res, &r) |
347 | * Per spec, only one interrupt per descriptor is allowed in | 189 | || acpi_dev_resource_ext_address_space(res, &r)) { |
348 | * _CRS, but some firmware violates this, so parse them all. | 190 | pnp_add_resource(dev, &r); |
349 | */ | 191 | return AE_OK; |
350 | irq = &res->data.irq; | 192 | } |
351 | if (irq->interrupt_count == 0) | ||
352 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
353 | else { | ||
354 | for (i = 0; i < irq->interrupt_count; i++) { | ||
355 | pnpacpi_parse_allocated_irqresource(dev, | ||
356 | irq->interrupts[i], | ||
357 | irq->triggering, | ||
358 | irq->polarity, | ||
359 | irq->sharable); | ||
360 | } | ||
361 | 193 | ||
194 | r.flags = 0; | ||
195 | if (acpi_dev_resource_interrupt(res, 0, &r)) { | ||
196 | pnpacpi_add_irqresource(dev, &r); | ||
197 | for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++) | ||
198 | pnpacpi_add_irqresource(dev, &r); | ||
199 | |||
200 | if (i > 1) { | ||
362 | /* | 201 | /* |
363 | * The IRQ encoder puts a single interrupt in each | 202 | * The IRQ encoder puts a single interrupt in each |
364 | * descriptor, so if a _CRS descriptor has more than | 203 | * descriptor, so if a _CRS descriptor has more than |
365 | * one interrupt, we won't be able to re-encode it. | 204 | * one interrupt, we won't be able to re-encode it. |
366 | */ | 205 | */ |
367 | if (pnp_can_write(dev) && irq->interrupt_count > 1) { | 206 | if (pnp_can_write(dev)) { |
368 | dev_warn(&dev->dev, "multiple interrupts in " | 207 | dev_warn(&dev->dev, "multiple interrupts in " |
369 | "_CRS descriptor; configuration can't " | 208 | "_CRS descriptor; configuration can't " |
370 | "be changed\n"); | 209 | "be changed\n"); |
371 | dev->capabilities &= ~PNP_WRITE; | 210 | dev->capabilities &= ~PNP_WRITE; |
372 | } | 211 | } |
373 | } | 212 | } |
374 | break; | 213 | return AE_OK; |
214 | } else if (r.flags & IORESOURCE_DISABLED) { | ||
215 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
216 | return AE_OK; | ||
217 | } | ||
375 | 218 | ||
219 | switch (res->type) { | ||
376 | case ACPI_RESOURCE_TYPE_DMA: | 220 | case ACPI_RESOURCE_TYPE_DMA: |
377 | dma = &res->data.dma; | 221 | dma = &res->data.dma; |
378 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) | 222 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) |
@@ -383,26 +227,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
383 | pnp_add_dma_resource(dev, dma->channels[0], flags); | 227 | pnp_add_dma_resource(dev, dma->channels[0], flags); |
384 | break; | 228 | break; |
385 | 229 | ||
386 | case ACPI_RESOURCE_TYPE_IO: | ||
387 | io = &res->data.io; | ||
388 | pnpacpi_parse_allocated_ioresource(dev, | ||
389 | io->minimum, | ||
390 | io->address_length, | ||
391 | io->io_decode, 0); | ||
392 | break; | ||
393 | |||
394 | case ACPI_RESOURCE_TYPE_START_DEPENDENT: | 230 | case ACPI_RESOURCE_TYPE_START_DEPENDENT: |
395 | case ACPI_RESOURCE_TYPE_END_DEPENDENT: | 231 | case ACPI_RESOURCE_TYPE_END_DEPENDENT: |
396 | break; | 232 | break; |
397 | 233 | ||
398 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
399 | fixed_io = &res->data.fixed_io; | ||
400 | pnpacpi_parse_allocated_ioresource(dev, | ||
401 | fixed_io->address, | ||
402 | fixed_io->address_length, | ||
403 | ACPI_DECODE_10, 0); | ||
404 | break; | ||
405 | |||
406 | case ACPI_RESOURCE_TYPE_VENDOR: | 234 | case ACPI_RESOURCE_TYPE_VENDOR: |
407 | vendor_typed = &res->data.vendor_typed; | 235 | vendor_typed = &res->data.vendor_typed; |
408 | pnpacpi_parse_allocated_vendor(dev, vendor_typed); | 236 | pnpacpi_parse_allocated_vendor(dev, vendor_typed); |
@@ -411,66 +239,6 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
411 | case ACPI_RESOURCE_TYPE_END_TAG: | 239 | case ACPI_RESOURCE_TYPE_END_TAG: |
412 | break; | 240 | break; |
413 | 241 | ||
414 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
415 | memory24 = &res->data.memory24; | ||
416 | pnpacpi_parse_allocated_memresource(dev, | ||
417 | memory24->minimum, | ||
418 | memory24->address_length, | ||
419 | memory24->write_protect, 0); | ||
420 | break; | ||
421 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
422 | memory32 = &res->data.memory32; | ||
423 | pnpacpi_parse_allocated_memresource(dev, | ||
424 | memory32->minimum, | ||
425 | memory32->address_length, | ||
426 | memory32->write_protect, 0); | ||
427 | break; | ||
428 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
429 | fixed_memory32 = &res->data.fixed_memory32; | ||
430 | pnpacpi_parse_allocated_memresource(dev, | ||
431 | fixed_memory32->address, | ||
432 | fixed_memory32->address_length, | ||
433 | fixed_memory32->write_protect, 0); | ||
434 | break; | ||
435 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
436 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
437 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
438 | pnpacpi_parse_allocated_address_space(dev, res); | ||
439 | break; | ||
440 | |||
441 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | ||
442 | pnpacpi_parse_allocated_ext_address_space(dev, res); | ||
443 | break; | ||
444 | |||
445 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | ||
446 | extended_irq = &res->data.extended_irq; | ||
447 | |||
448 | if (extended_irq->interrupt_count == 0) | ||
449 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | ||
450 | else { | ||
451 | for (i = 0; i < extended_irq->interrupt_count; i++) { | ||
452 | pnpacpi_parse_allocated_irqresource(dev, | ||
453 | extended_irq->interrupts[i], | ||
454 | extended_irq->triggering, | ||
455 | extended_irq->polarity, | ||
456 | extended_irq->sharable); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * The IRQ encoder puts a single interrupt in each | ||
461 | * descriptor, so if a _CRS descriptor has more than | ||
462 | * one interrupt, we won't be able to re-encode it. | ||
463 | */ | ||
464 | if (pnp_can_write(dev) && | ||
465 | extended_irq->interrupt_count > 1) { | ||
466 | dev_warn(&dev->dev, "multiple interrupts in " | ||
467 | "_CRS descriptor; configuration can't " | ||
468 | "be changed\n"); | ||
469 | dev->capabilities &= ~PNP_WRITE; | ||
470 | } | ||
471 | } | ||
472 | break; | ||
473 | |||
474 | case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: | 242 | case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: |
475 | break; | 243 | break; |
476 | 244 | ||
@@ -531,7 +299,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, | |||
531 | if (p->interrupts[i]) | 299 | if (p->interrupts[i]) |
532 | __set_bit(p->interrupts[i], map.bits); | 300 | __set_bit(p->interrupts[i], map.bits); |
533 | 301 | ||
534 | flags = irq_flags(p->triggering, p->polarity, p->sharable); | 302 | flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); |
535 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 303 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
536 | } | 304 | } |
537 | 305 | ||
@@ -555,7 +323,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
555 | } | 323 | } |
556 | } | 324 | } |
557 | 325 | ||
558 | flags = irq_flags(p->triggering, p->polarity, p->sharable); | 326 | flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); |
559 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 327 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
560 | } | 328 | } |
561 | 329 | ||
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index b0ecacbe53b1..3e6db1c1dc29 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c | |||
@@ -503,6 +503,22 @@ static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev) | |||
503 | return pnp_res; | 503 | return pnp_res; |
504 | } | 504 | } |
505 | 505 | ||
506 | struct pnp_resource *pnp_add_resource(struct pnp_dev *dev, | ||
507 | struct resource *res) | ||
508 | { | ||
509 | struct pnp_resource *pnp_res; | ||
510 | |||
511 | pnp_res = pnp_new_resource(dev); | ||
512 | if (!pnp_res) { | ||
513 | dev_err(&dev->dev, "can't add resource %pR\n", res); | ||
514 | return NULL; | ||
515 | } | ||
516 | |||
517 | pnp_res->res = *res; | ||
518 | dev_dbg(&dev->dev, "%pR\n", res); | ||
519 | return pnp_res; | ||
520 | } | ||
521 | |||
506 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, | 522 | struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, |
507 | int flags) | 523 | int flags) |
508 | { | 524 | { |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c17ae22567e0..0c6fcb461faf 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); | |||
401 | /** | 401 | /** |
402 | * rio_map_inb_region -- Map inbound memory region. | 402 | * rio_map_inb_region -- Map inbound memory region. |
403 | * @mport: Master port. | 403 | * @mport: Master port. |
404 | * @lstart: physical address of memory region to be mapped | 404 | * @local: physical address of memory region to be mapped |
405 | * @rbase: RIO base address assigned to this window | 405 | * @rbase: RIO base address assigned to this window |
406 | * @size: Size of the memory region | 406 | * @size: Size of the memory region |
407 | * @rflags: Flags for mapping. | 407 | * @rflags: Flags for mapping. |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5c4829cba6a6..e872c8be080e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) | |||
1381 | } | 1381 | } |
1382 | EXPORT_SYMBOL_GPL(regulator_get_exclusive); | 1382 | EXPORT_SYMBOL_GPL(regulator_get_exclusive); |
1383 | 1383 | ||
1384 | /** | 1384 | /* Locks held by regulator_put() */ |
1385 | * regulator_put - "free" the regulator source | 1385 | static void _regulator_put(struct regulator *regulator) |
1386 | * @regulator: regulator source | ||
1387 | * | ||
1388 | * Note: drivers must ensure that all regulator_enable calls made on this | ||
1389 | * regulator source are balanced by regulator_disable calls prior to calling | ||
1390 | * this function. | ||
1391 | */ | ||
1392 | void regulator_put(struct regulator *regulator) | ||
1393 | { | 1386 | { |
1394 | struct regulator_dev *rdev; | 1387 | struct regulator_dev *rdev; |
1395 | 1388 | ||
1396 | if (regulator == NULL || IS_ERR(regulator)) | 1389 | if (regulator == NULL || IS_ERR(regulator)) |
1397 | return; | 1390 | return; |
1398 | 1391 | ||
1399 | mutex_lock(®ulator_list_mutex); | ||
1400 | rdev = regulator->rdev; | 1392 | rdev = regulator->rdev; |
1401 | 1393 | ||
1402 | debugfs_remove_recursive(regulator->debugfs); | 1394 | debugfs_remove_recursive(regulator->debugfs); |
@@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator) | |||
1412 | rdev->exclusive = 0; | 1404 | rdev->exclusive = 0; |
1413 | 1405 | ||
1414 | module_put(rdev->owner); | 1406 | module_put(rdev->owner); |
1407 | } | ||
1408 | |||
1409 | /** | ||
1410 | * regulator_put - "free" the regulator source | ||
1411 | * @regulator: regulator source | ||
1412 | * | ||
1413 | * Note: drivers must ensure that all regulator_enable calls made on this | ||
1414 | * regulator source are balanced by regulator_disable calls prior to calling | ||
1415 | * this function. | ||
1416 | */ | ||
1417 | void regulator_put(struct regulator *regulator) | ||
1418 | { | ||
1419 | mutex_lock(®ulator_list_mutex); | ||
1420 | _regulator_put(regulator); | ||
1415 | mutex_unlock(®ulator_list_mutex); | 1421 | mutex_unlock(®ulator_list_mutex); |
1416 | } | 1422 | } |
1417 | EXPORT_SYMBOL_GPL(regulator_put); | 1423 | EXPORT_SYMBOL_GPL(regulator_put); |
@@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator, | |||
1974 | if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { | 1980 | if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { |
1975 | ret = regulator_get_voltage(regulator); | 1981 | ret = regulator_get_voltage(regulator); |
1976 | if (ret >= 0) | 1982 | if (ret >= 0) |
1977 | return (min_uV >= ret && ret <= max_uV); | 1983 | return (min_uV <= ret && ret <= max_uV); |
1978 | else | 1984 | else |
1979 | return ret; | 1985 | return ret; |
1980 | } | 1986 | } |
@@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3365 | if (ret != 0) { | 3371 | if (ret != 0) { |
3366 | rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", | 3372 | rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", |
3367 | config->ena_gpio, ret); | 3373 | config->ena_gpio, ret); |
3368 | goto clean; | 3374 | goto wash; |
3369 | } | 3375 | } |
3370 | 3376 | ||
3371 | rdev->ena_gpio = config->ena_gpio; | 3377 | rdev->ena_gpio = config->ena_gpio; |
@@ -3445,10 +3451,11 @@ unset_supplies: | |||
3445 | 3451 | ||
3446 | scrub: | 3452 | scrub: |
3447 | if (rdev->supply) | 3453 | if (rdev->supply) |
3448 | regulator_put(rdev->supply); | 3454 | _regulator_put(rdev->supply); |
3449 | if (rdev->ena_gpio) | 3455 | if (rdev->ena_gpio) |
3450 | gpio_free(rdev->ena_gpio); | 3456 | gpio_free(rdev->ena_gpio); |
3451 | kfree(rdev->constraints); | 3457 | kfree(rdev->constraints); |
3458 | wash: | ||
3452 | device_unregister(&rdev->dev); | 3459 | device_unregister(&rdev->dev); |
3453 | /* device core frees rdev */ | 3460 | /* device core frees rdev */ |
3454 | rdev = ERR_PTR(ret); | 3461 | rdev = ERR_PTR(ret); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9ffb6d5f17aa..4ed343e4eb41 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #define RAW3215_NR_CCWS 3 | 44 | #define RAW3215_NR_CCWS 3 |
45 | #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ | 45 | #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ |
46 | 46 | ||
47 | #define RAW3215_FIXED 1 /* 3215 console device is not be freed */ | ||
48 | #define RAW3215_WORKING 4 /* set if a request is being worked on */ | 47 | #define RAW3215_WORKING 4 /* set if a request is being worked on */ |
49 | #define RAW3215_THROTTLED 8 /* set if reading is disabled */ | 48 | #define RAW3215_THROTTLED 8 /* set if reading is disabled */ |
50 | #define RAW3215_STOPPED 16 /* set if writing is disabled */ | 49 | #define RAW3215_STOPPED 16 /* set if writing is disabled */ |
@@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data) | |||
339 | struct tty_struct *tty; | 338 | struct tty_struct *tty; |
340 | 339 | ||
341 | tty = tty_port_tty_get(&raw->port); | 340 | tty = tty_port_tty_get(&raw->port); |
342 | tty_wakeup(tty); | 341 | if (tty) { |
343 | tty_kref_put(tty); | 342 | tty_wakeup(tty); |
343 | tty_kref_put(tty); | ||
344 | } | ||
344 | } | 345 | } |
345 | 346 | ||
346 | /* | 347 | /* |
@@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw) | |||
629 | DECLARE_WAITQUEUE(wait, current); | 630 | DECLARE_WAITQUEUE(wait, current); |
630 | unsigned long flags; | 631 | unsigned long flags; |
631 | 632 | ||
632 | if (!(raw->port.flags & ASYNC_INITIALIZED) || | 633 | if (!(raw->port.flags & ASYNC_INITIALIZED)) |
633 | (raw->flags & RAW3215_FIXED)) | ||
634 | return; | 634 | return; |
635 | /* Wait for outstanding requests, then free irq */ | 635 | /* Wait for outstanding requests, then free irq */ |
636 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | 636 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); |
@@ -926,8 +926,6 @@ static int __init con3215_init(void) | |||
926 | dev_set_drvdata(&cdev->dev, raw); | 926 | dev_set_drvdata(&cdev->dev, raw); |
927 | cdev->handler = raw3215_irq; | 927 | cdev->handler = raw3215_irq; |
928 | 928 | ||
929 | raw->flags |= RAW3215_FIXED; | ||
930 | |||
931 | /* Request the console irq */ | 929 | /* Request the console irq */ |
932 | if (raw3215_startup(raw) != 0) { | 930 | if (raw3215_startup(raw) != 0) { |
933 | raw3215_free_info(raw); | 931 | raw3215_free_info(raw); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3e25d3150456..4d6ba00d0047 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, | |||
2942 | QETH_DBF_TEXT(SETUP, 2, "qipasscb"); | 2942 | QETH_DBF_TEXT(SETUP, 2, "qipasscb"); |
2943 | 2943 | ||
2944 | cmd = (struct qeth_ipa_cmd *) data; | 2944 | cmd = (struct qeth_ipa_cmd *) data; |
2945 | |||
2946 | switch (cmd->hdr.return_code) { | ||
2947 | case IPA_RC_NOTSUPP: | ||
2948 | case IPA_RC_L2_UNSUPPORTED_CMD: | ||
2949 | QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); | ||
2950 | card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; | ||
2951 | card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; | ||
2952 | return -0; | ||
2953 | default: | ||
2954 | if (cmd->hdr.return_code) { | ||
2955 | QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " | ||
2956 | "rc=%d\n", | ||
2957 | dev_name(&card->gdev->dev), | ||
2958 | cmd->hdr.return_code); | ||
2959 | return 0; | ||
2960 | } | ||
2961 | } | ||
2962 | |||
2945 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { | 2963 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { |
2946 | card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; | 2964 | card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; |
2947 | card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; | 2965 | card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; |
2948 | } else { | 2966 | } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { |
2949 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; | 2967 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; |
2950 | card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; | 2968 | card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; |
2951 | } | 2969 | } else |
2970 | QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" | ||
2971 | "\n", dev_name(&card->gdev->dev)); | ||
2952 | QETH_DBF_TEXT(SETUP, 2, "suppenbl"); | 2972 | QETH_DBF_TEXT(SETUP, 2, "suppenbl"); |
2953 | QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported); | 2973 | QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported); |
2954 | QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled); | 2974 | QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index e67e0258aec5..fddb62654b6a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
626 | QETH_DBF_TEXT(SETUP, 2, "doL2init"); | 626 | QETH_DBF_TEXT(SETUP, 2, "doL2init"); |
627 | QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); | 627 | QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); |
628 | 628 | ||
629 | rc = qeth_query_setadapterparms(card); | 629 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
630 | if (rc) { | 630 | rc = qeth_query_setadapterparms(card); |
631 | QETH_DBF_MESSAGE(2, "could not query adapter parameters on " | 631 | if (rc) { |
632 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 632 | QETH_DBF_MESSAGE(2, "could not query adapter " |
633 | "parameters on device %s: x%x\n", | ||
634 | CARD_BUS_ID(card), rc); | ||
635 | } | ||
633 | } | 636 | } |
634 | 637 | ||
635 | if (card->info.type == QETH_CARD_TYPE_IQD || | 638 | if (card->info.type == QETH_CARD_TYPE_IQD || |
@@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
676 | return -ERESTARTSYS; | 679 | return -ERESTARTSYS; |
677 | } | 680 | } |
678 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 681 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
679 | if (!rc) | 682 | if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) |
680 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 683 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
681 | return rc ? -EINVAL : 0; | 684 | return rc ? -EINVAL : 0; |
682 | } | 685 | } |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index c1bafc3f3fb1..9594ab62702b 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
1972 | frame_index, | 1972 | frame_index, |
1973 | (void **)&frame_buffer); | 1973 | (void **)&frame_buffer); |
1974 | 1974 | ||
1975 | sci_controller_copy_sata_response(&ireq->stp.req, | 1975 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1976 | frame_header, | 1976 | frame_header, |
1977 | frame_buffer); | 1977 | frame_buffer); |
1978 | 1978 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2936b447cae9..2c0d0ec8150b 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/cpu.h> | 55 | #include <linux/cpu.h> |
56 | #include <linux/mutex.h> | 56 | #include <linux/mutex.h> |
57 | #include <linux/async.h> | 57 | #include <linux/async.h> |
58 | #include <asm/unaligned.h> | ||
58 | 59 | ||
59 | #include <scsi/scsi.h> | 60 | #include <scsi/scsi.h> |
60 | #include <scsi/scsi_cmnd.h> | 61 | #include <scsi/scsi_cmnd.h> |
@@ -1062,6 +1063,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, | |||
1062 | EXPORT_SYMBOL_GPL(scsi_get_vpd_page); | 1063 | EXPORT_SYMBOL_GPL(scsi_get_vpd_page); |
1063 | 1064 | ||
1064 | /** | 1065 | /** |
1066 | * scsi_report_opcode - Find out if a given command opcode is supported | ||
1067 | * @sdev: scsi device to query | ||
1068 | * @buffer: scratch buffer (must be at least 20 bytes long) | ||
1069 | * @len: length of buffer | ||
1070 | * @opcode: opcode for command to look up | ||
1071 | * | ||
1072 | * Uses the REPORT SUPPORTED OPERATION CODES to look up the given | ||
1073 | * opcode. Returns 0 if RSOC fails or if the command opcode is | ||
1074 | * unsupported. Returns 1 if the device claims to support the command. | ||
1075 | */ | ||
1076 | int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, | ||
1077 | unsigned int len, unsigned char opcode) | ||
1078 | { | ||
1079 | unsigned char cmd[16]; | ||
1080 | struct scsi_sense_hdr sshdr; | ||
1081 | int result; | ||
1082 | |||
1083 | if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) | ||
1084 | return 0; | ||
1085 | |||
1086 | memset(cmd, 0, 16); | ||
1087 | cmd[0] = MAINTENANCE_IN; | ||
1088 | cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; | ||
1089 | cmd[2] = 1; /* One command format */ | ||
1090 | cmd[3] = opcode; | ||
1091 | put_unaligned_be32(len, &cmd[6]); | ||
1092 | memset(buffer, 0, len); | ||
1093 | |||
1094 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, | ||
1095 | &sshdr, 30 * HZ, 3, NULL); | ||
1096 | |||
1097 | if (result && scsi_sense_valid(&sshdr) && | ||
1098 | sshdr.sense_key == ILLEGAL_REQUEST && | ||
1099 | (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) | ||
1100 | return 0; | ||
1101 | |||
1102 | if ((buffer[1] & 3) == 3) /* Command supported */ | ||
1103 | return 1; | ||
1104 | |||
1105 | return 0; | ||
1106 | } | ||
1107 | EXPORT_SYMBOL(scsi_report_opcode); | ||
1108 | |||
1109 | /** | ||
1065 | * scsi_device_get - get an additional reference to a scsi_device | 1110 | * scsi_device_get - get an additional reference to a scsi_device |
1066 | * @sdev: device to get a reference to | 1111 | * @sdev: device to get a reference to |
1067 | * | 1112 | * |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index da36a3a81a9e..9032e910bca3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
900 | action = ACTION_FAIL; | 900 | action = ACTION_FAIL; |
901 | error = -EILSEQ; | 901 | error = -EILSEQ; |
902 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 902 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
903 | } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && | 903 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
904 | (cmd->cmnd[0] == UNMAP || | 904 | switch (cmd->cmnd[0]) { |
905 | cmd->cmnd[0] == WRITE_SAME_16 || | 905 | case UNMAP: |
906 | cmd->cmnd[0] == WRITE_SAME)) { | 906 | description = "Discard failure"; |
907 | description = "Discard failure"; | 907 | break; |
908 | case WRITE_SAME: | ||
909 | case WRITE_SAME_16: | ||
910 | if (cmd->cmnd[1] & 0x8) | ||
911 | description = "Discard failure"; | ||
912 | else | ||
913 | description = | ||
914 | "Write same failure"; | ||
915 | break; | ||
916 | default: | ||
917 | description = "Invalid command failure"; | ||
918 | break; | ||
919 | } | ||
908 | action = ACTION_FAIL; | 920 | action = ACTION_FAIL; |
909 | error = -EREMOTEIO; | 921 | error = -EREMOTEIO; |
910 | } else | 922 | } else |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 12f6fdfc1147..352bc77b7c88 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); | |||
99 | #endif | 99 | #endif |
100 | 100 | ||
101 | static void sd_config_discard(struct scsi_disk *, unsigned int); | 101 | static void sd_config_discard(struct scsi_disk *, unsigned int); |
102 | static void sd_config_write_same(struct scsi_disk *); | ||
102 | static int sd_revalidate_disk(struct gendisk *); | 103 | static int sd_revalidate_disk(struct gendisk *); |
103 | static void sd_unlock_native_capacity(struct gendisk *disk); | 104 | static void sd_unlock_native_capacity(struct gendisk *disk); |
104 | static int sd_probe(struct device *); | 105 | static int sd_probe(struct device *); |
@@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev, | |||
395 | return err ? err : count; | 396 | return err ? err : count; |
396 | } | 397 | } |
397 | 398 | ||
399 | static ssize_t | ||
400 | sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, | ||
401 | char *buf) | ||
402 | { | ||
403 | struct scsi_disk *sdkp = to_scsi_disk(dev); | ||
404 | |||
405 | return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks); | ||
406 | } | ||
407 | |||
408 | static ssize_t | ||
409 | sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, | ||
410 | const char *buf, size_t count) | ||
411 | { | ||
412 | struct scsi_disk *sdkp = to_scsi_disk(dev); | ||
413 | struct scsi_device *sdp = sdkp->device; | ||
414 | unsigned long max; | ||
415 | int err; | ||
416 | |||
417 | if (!capable(CAP_SYS_ADMIN)) | ||
418 | return -EACCES; | ||
419 | |||
420 | if (sdp->type != TYPE_DISK) | ||
421 | return -EINVAL; | ||
422 | |||
423 | err = kstrtoul(buf, 10, &max); | ||
424 | |||
425 | if (err) | ||
426 | return err; | ||
427 | |||
428 | if (max == 0) | ||
429 | sdp->no_write_same = 1; | ||
430 | else if (max <= SD_MAX_WS16_BLOCKS) | ||
431 | sdkp->max_ws_blocks = max; | ||
432 | |||
433 | sd_config_write_same(sdkp); | ||
434 | |||
435 | return count; | ||
436 | } | ||
437 | |||
398 | static struct device_attribute sd_disk_attrs[] = { | 438 | static struct device_attribute sd_disk_attrs[] = { |
399 | __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, | 439 | __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, |
400 | sd_store_cache_type), | 440 | sd_store_cache_type), |
@@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = { | |||
410 | __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), | 450 | __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), |
411 | __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, | 451 | __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, |
412 | sd_store_provisioning_mode), | 452 | sd_store_provisioning_mode), |
453 | __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, | ||
454 | sd_show_write_same_blocks, sd_store_write_same_blocks), | ||
413 | __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, | 455 | __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, |
414 | sd_show_max_medium_access_timeouts, | 456 | sd_show_max_medium_access_timeouts, |
415 | sd_store_max_medium_access_timeouts), | 457 | sd_store_max_medium_access_timeouts), |
@@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
561 | return; | 603 | return; |
562 | 604 | ||
563 | case SD_LBP_UNMAP: | 605 | case SD_LBP_UNMAP: |
564 | max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); | 606 | max_blocks = min_not_zero(sdkp->max_unmap_blocks, |
607 | (u32)SD_MAX_WS16_BLOCKS); | ||
565 | break; | 608 | break; |
566 | 609 | ||
567 | case SD_LBP_WS16: | 610 | case SD_LBP_WS16: |
568 | max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); | 611 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
612 | (u32)SD_MAX_WS16_BLOCKS); | ||
569 | break; | 613 | break; |
570 | 614 | ||
571 | case SD_LBP_WS10: | 615 | case SD_LBP_WS10: |
572 | max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); | 616 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
617 | (u32)SD_MAX_WS10_BLOCKS); | ||
573 | break; | 618 | break; |
574 | 619 | ||
575 | case SD_LBP_ZERO: | 620 | case SD_LBP_ZERO: |
576 | max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); | 621 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
622 | (u32)SD_MAX_WS10_BLOCKS); | ||
577 | q->limits.discard_zeroes_data = 1; | 623 | q->limits.discard_zeroes_data = 1; |
578 | break; | 624 | break; |
579 | } | 625 | } |
@@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
583 | } | 629 | } |
584 | 630 | ||
585 | /** | 631 | /** |
586 | * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device | 632 | * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device |
587 | * @sdp: scsi device to operate one | 633 | * @sdp: scsi device to operate one |
588 | * @rq: Request to prepare | 634 | * @rq: Request to prepare |
589 | * | 635 | * |
590 | * Will issue either UNMAP or WRITE SAME(16) depending on preference | 636 | * Will issue either UNMAP or WRITE SAME(16) depending on preference |
591 | * indicated by target device. | 637 | * indicated by target device. |
592 | **/ | 638 | **/ |
593 | static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | 639 | static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) |
594 | { | 640 | { |
595 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 641 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
596 | struct bio *bio = rq->bio; | 642 | sector_t sector = blk_rq_pos(rq); |
597 | sector_t sector = bio->bi_sector; | 643 | unsigned int nr_sectors = blk_rq_sectors(rq); |
598 | unsigned int nr_sectors = bio_sectors(bio); | 644 | unsigned int nr_bytes = blk_rq_bytes(rq); |
599 | unsigned int len; | 645 | unsigned int len; |
600 | int ret; | 646 | int ret; |
601 | char *buf; | 647 | char *buf; |
602 | struct page *page; | 648 | struct page *page; |
603 | 649 | ||
604 | if (sdkp->device->sector_size == 4096) { | 650 | sector >>= ilog2(sdp->sector_size) - 9; |
605 | sector >>= 3; | 651 | nr_sectors >>= ilog2(sdp->sector_size) - 9; |
606 | nr_sectors >>= 3; | ||
607 | } | ||
608 | |||
609 | rq->timeout = SD_TIMEOUT; | 652 | rq->timeout = SD_TIMEOUT; |
610 | 653 | ||
611 | memset(rq->cmd, 0, rq->cmd_len); | 654 | memset(rq->cmd, 0, rq->cmd_len); |
@@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
660 | blk_add_request_payload(rq, page, len); | 703 | blk_add_request_payload(rq, page, len); |
661 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 704 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); |
662 | rq->buffer = page_address(page); | 705 | rq->buffer = page_address(page); |
706 | rq->__data_len = nr_bytes; | ||
663 | 707 | ||
664 | out: | 708 | out: |
665 | if (ret != BLKPREP_OK) { | 709 | if (ret != BLKPREP_OK) { |
@@ -669,6 +713,83 @@ out: | |||
669 | return ret; | 713 | return ret; |
670 | } | 714 | } |
671 | 715 | ||
716 | static void sd_config_write_same(struct scsi_disk *sdkp) | ||
717 | { | ||
718 | struct request_queue *q = sdkp->disk->queue; | ||
719 | unsigned int logical_block_size = sdkp->device->sector_size; | ||
720 | unsigned int blocks = 0; | ||
721 | |||
722 | if (sdkp->device->no_write_same) { | ||
723 | sdkp->max_ws_blocks = 0; | ||
724 | goto out; | ||
725 | } | ||
726 | |||
727 | /* Some devices can not handle block counts above 0xffff despite | ||
728 | * supporting WRITE SAME(16). Consequently we default to 64k | ||
729 | * blocks per I/O unless the device explicitly advertises a | ||
730 | * bigger limit. | ||
731 | */ | ||
732 | if (sdkp->max_ws_blocks == 0) | ||
733 | sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS; | ||
734 | |||
735 | if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) | ||
736 | blocks = min_not_zero(sdkp->max_ws_blocks, | ||
737 | (u32)SD_MAX_WS16_BLOCKS); | ||
738 | else | ||
739 | blocks = min_not_zero(sdkp->max_ws_blocks, | ||
740 | (u32)SD_MAX_WS10_BLOCKS); | ||
741 | |||
742 | out: | ||
743 | blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9)); | ||
744 | } | ||
745 | |||
746 | /** | ||
747 | * sd_setup_write_same_cmnd - write the same data to multiple blocks | ||
748 | * @sdp: scsi device to operate one | ||
749 | * @rq: Request to prepare | ||
750 | * | ||
751 | * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on | ||
752 | * preference indicated by target device. | ||
753 | **/ | ||
754 | static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) | ||
755 | { | ||
756 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | ||
757 | struct bio *bio = rq->bio; | ||
758 | sector_t sector = blk_rq_pos(rq); | ||
759 | unsigned int nr_sectors = blk_rq_sectors(rq); | ||
760 | unsigned int nr_bytes = blk_rq_bytes(rq); | ||
761 | int ret; | ||
762 | |||
763 | if (sdkp->device->no_write_same) | ||
764 | return BLKPREP_KILL; | ||
765 | |||
766 | BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); | ||
767 | |||
768 | sector >>= ilog2(sdp->sector_size) - 9; | ||
769 | nr_sectors >>= ilog2(sdp->sector_size) - 9; | ||
770 | |||
771 | rq->__data_len = sdp->sector_size; | ||
772 | rq->timeout = SD_WRITE_SAME_TIMEOUT; | ||
773 | memset(rq->cmd, 0, rq->cmd_len); | ||
774 | |||
775 | if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { | ||
776 | rq->cmd_len = 16; | ||
777 | rq->cmd[0] = WRITE_SAME_16; | ||
778 | put_unaligned_be64(sector, &rq->cmd[2]); | ||
779 | put_unaligned_be32(nr_sectors, &rq->cmd[10]); | ||
780 | } else { | ||
781 | rq->cmd_len = 10; | ||
782 | rq->cmd[0] = WRITE_SAME; | ||
783 | put_unaligned_be32(sector, &rq->cmd[2]); | ||
784 | put_unaligned_be16(nr_sectors, &rq->cmd[7]); | ||
785 | } | ||
786 | |||
787 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | ||
788 | rq->__data_len = nr_bytes; | ||
789 | |||
790 | return ret; | ||
791 | } | ||
792 | |||
672 | static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) | 793 | static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) |
673 | { | 794 | { |
674 | rq->timeout = SD_FLUSH_TIMEOUT; | 795 | rq->timeout = SD_FLUSH_TIMEOUT; |
@@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
712 | * block PC requests to make life easier. | 833 | * block PC requests to make life easier. |
713 | */ | 834 | */ |
714 | if (rq->cmd_flags & REQ_DISCARD) { | 835 | if (rq->cmd_flags & REQ_DISCARD) { |
715 | ret = scsi_setup_discard_cmnd(sdp, rq); | 836 | ret = sd_setup_discard_cmnd(sdp, rq); |
837 | goto out; | ||
838 | } else if (rq->cmd_flags & REQ_WRITE_SAME) { | ||
839 | ret = sd_setup_write_same_cmnd(sdp, rq); | ||
716 | goto out; | 840 | goto out; |
717 | } else if (rq->cmd_flags & REQ_FLUSH) { | 841 | } else if (rq->cmd_flags & REQ_FLUSH) { |
718 | ret = scsi_setup_flush_cmnd(sdp, rq); | 842 | ret = scsi_setup_flush_cmnd(sdp, rq); |
@@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1482 | unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); | 1606 | unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); |
1483 | struct scsi_sense_hdr sshdr; | 1607 | struct scsi_sense_hdr sshdr; |
1484 | struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); | 1608 | struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); |
1609 | struct request *req = SCpnt->request; | ||
1485 | int sense_valid = 0; | 1610 | int sense_valid = 0; |
1486 | int sense_deferred = 0; | 1611 | int sense_deferred = 0; |
1487 | unsigned char op = SCpnt->cmnd[0]; | 1612 | unsigned char op = SCpnt->cmnd[0]; |
1613 | unsigned char unmap = SCpnt->cmnd[1] & 8; | ||
1488 | 1614 | ||
1489 | if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) | 1615 | if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { |
1490 | scsi_set_resid(SCpnt, 0); | 1616 | if (!result) { |
1617 | good_bytes = blk_rq_bytes(req); | ||
1618 | scsi_set_resid(SCpnt, 0); | ||
1619 | } else { | ||
1620 | good_bytes = 0; | ||
1621 | scsi_set_resid(SCpnt, blk_rq_bytes(req)); | ||
1622 | } | ||
1623 | } | ||
1491 | 1624 | ||
1492 | if (result) { | 1625 | if (result) { |
1493 | sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); | 1626 | sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); |
@@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1536 | if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ | 1669 | if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ |
1537 | good_bytes = sd_completed_bytes(SCpnt); | 1670 | good_bytes = sd_completed_bytes(SCpnt); |
1538 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 1671 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
1539 | if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && | 1672 | if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
1540 | (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) | 1673 | switch (op) { |
1541 | sd_config_discard(sdkp, SD_LBP_DISABLE); | 1674 | case UNMAP: |
1675 | sd_config_discard(sdkp, SD_LBP_DISABLE); | ||
1676 | break; | ||
1677 | case WRITE_SAME_16: | ||
1678 | case WRITE_SAME: | ||
1679 | if (unmap) | ||
1680 | sd_config_discard(sdkp, SD_LBP_DISABLE); | ||
1681 | else { | ||
1682 | sdkp->device->no_write_same = 1; | ||
1683 | sd_config_write_same(sdkp); | ||
1684 | |||
1685 | good_bytes = 0; | ||
1686 | req->__data_len = blk_rq_bytes(req); | ||
1687 | req->cmd_flags |= REQ_QUIET; | ||
1688 | } | ||
1689 | } | ||
1690 | } | ||
1542 | break; | 1691 | break; |
1543 | default: | 1692 | default: |
1544 | break; | 1693 | break; |
@@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2374 | if (buffer[3] == 0x3c) { | 2523 | if (buffer[3] == 0x3c) { |
2375 | unsigned int lba_count, desc_count; | 2524 | unsigned int lba_count, desc_count; |
2376 | 2525 | ||
2377 | sdkp->max_ws_blocks = | 2526 | sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); |
2378 | (u32) min_not_zero(get_unaligned_be64(&buffer[36]), | ||
2379 | (u64)0xffffffff); | ||
2380 | 2527 | ||
2381 | if (!sdkp->lbpme) | 2528 | if (!sdkp->lbpme) |
2382 | goto out; | 2529 | goto out; |
@@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp) | |||
2469 | kfree(buffer); | 2616 | kfree(buffer); |
2470 | } | 2617 | } |
2471 | 2618 | ||
2619 | static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) | ||
2620 | { | ||
2621 | if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE, | ||
2622 | WRITE_SAME_16)) | ||
2623 | sdkp->ws16 = 1; | ||
2624 | } | ||
2625 | |||
2472 | static int sd_try_extended_inquiry(struct scsi_device *sdp) | 2626 | static int sd_try_extended_inquiry(struct scsi_device *sdp) |
2473 | { | 2627 | { |
2474 | /* | 2628 | /* |
@@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2528 | sd_read_write_protect_flag(sdkp, buffer); | 2682 | sd_read_write_protect_flag(sdkp, buffer); |
2529 | sd_read_cache_type(sdkp, buffer); | 2683 | sd_read_cache_type(sdkp, buffer); |
2530 | sd_read_app_tag_own(sdkp, buffer); | 2684 | sd_read_app_tag_own(sdkp, buffer); |
2685 | sd_read_write_same(sdkp, buffer); | ||
2531 | } | 2686 | } |
2532 | 2687 | ||
2533 | sdkp->first_scan = 0; | 2688 | sdkp->first_scan = 0; |
@@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2545 | blk_queue_flush(sdkp->disk->queue, flush); | 2700 | blk_queue_flush(sdkp->disk->queue, flush); |
2546 | 2701 | ||
2547 | set_capacity(disk, sdkp->capacity); | 2702 | set_capacity(disk, sdkp->capacity); |
2703 | sd_config_write_same(sdkp); | ||
2548 | kfree(buffer); | 2704 | kfree(buffer); |
2549 | 2705 | ||
2550 | out: | 2706 | out: |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 47c52a6d733c..74a1e4ca5401 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define SD_TIMEOUT (30 * HZ) | 14 | #define SD_TIMEOUT (30 * HZ) |
15 | #define SD_MOD_TIMEOUT (75 * HZ) | 15 | #define SD_MOD_TIMEOUT (75 * HZ) |
16 | #define SD_FLUSH_TIMEOUT (60 * HZ) | 16 | #define SD_FLUSH_TIMEOUT (60 * HZ) |
17 | #define SD_WRITE_SAME_TIMEOUT (120 * HZ) | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * Number of allowed retries | 20 | * Number of allowed retries |
@@ -39,6 +40,11 @@ enum { | |||
39 | }; | 40 | }; |
40 | 41 | ||
41 | enum { | 42 | enum { |
43 | SD_MAX_WS10_BLOCKS = 0xffff, | ||
44 | SD_MAX_WS16_BLOCKS = 0x7fffff, | ||
45 | }; | ||
46 | |||
47 | enum { | ||
42 | SD_LBP_FULL = 0, /* Full logical block provisioning */ | 48 | SD_LBP_FULL = 0, /* Full logical block provisioning */ |
43 | SD_LBP_UNMAP, /* Use UNMAP command */ | 49 | SD_LBP_UNMAP, /* Use UNMAP command */ |
44 | SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ | 50 | SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ |
@@ -77,6 +83,7 @@ struct scsi_disk { | |||
77 | unsigned lbpws : 1; | 83 | unsigned lbpws : 1; |
78 | unsigned lbpws10 : 1; | 84 | unsigned lbpws10 : 1; |
79 | unsigned lbpvpd : 1; | 85 | unsigned lbpvpd : 1; |
86 | unsigned ws16 : 1; | ||
80 | }; | 87 | }; |
81 | #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) | 88 | #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) |
82 | 89 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 84c2861d6f4d..1ab05234729f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
38 | #include <linux/ioport.h> | ||
39 | #include <linux/acpi.h> | ||
38 | 40 | ||
39 | static void spidev_release(struct device *dev) | 41 | static void spidev_release(struct device *dev) |
40 | { | 42 | { |
@@ -93,6 +95,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) | |||
93 | if (of_driver_match_device(dev, drv)) | 95 | if (of_driver_match_device(dev, drv)) |
94 | return 1; | 96 | return 1; |
95 | 97 | ||
98 | /* Then try ACPI */ | ||
99 | if (acpi_driver_match_device(dev, drv)) | ||
100 | return 1; | ||
101 | |||
96 | if (sdrv->id_table) | 102 | if (sdrv->id_table) |
97 | return !!spi_match_id(sdrv->id_table, spi); | 103 | return !!spi_match_id(sdrv->id_table, spi); |
98 | 104 | ||
@@ -888,6 +894,100 @@ static void of_register_spi_devices(struct spi_master *master) | |||
888 | static void of_register_spi_devices(struct spi_master *master) { } | 894 | static void of_register_spi_devices(struct spi_master *master) { } |
889 | #endif | 895 | #endif |
890 | 896 | ||
897 | #ifdef CONFIG_ACPI | ||
898 | static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) | ||
899 | { | ||
900 | struct spi_device *spi = data; | ||
901 | |||
902 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
903 | struct acpi_resource_spi_serialbus *sb; | ||
904 | |||
905 | sb = &ares->data.spi_serial_bus; | ||
906 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { | ||
907 | spi->chip_select = sb->device_selection; | ||
908 | spi->max_speed_hz = sb->connection_speed; | ||
909 | |||
910 | if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) | ||
911 | spi->mode |= SPI_CPHA; | ||
912 | if (sb->clock_polarity == ACPI_SPI_START_HIGH) | ||
913 | spi->mode |= SPI_CPOL; | ||
914 | if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) | ||
915 | spi->mode |= SPI_CS_HIGH; | ||
916 | } | ||
917 | } else if (spi->irq < 0) { | ||
918 | struct resource r; | ||
919 | |||
920 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
921 | spi->irq = r.start; | ||
922 | } | ||
923 | |||
924 | /* Always tell the ACPI core to skip this resource */ | ||
925 | return 1; | ||
926 | } | ||
927 | |||
928 | static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, | ||
929 | void *data, void **return_value) | ||
930 | { | ||
931 | struct spi_master *master = data; | ||
932 | struct list_head resource_list; | ||
933 | struct acpi_device *adev; | ||
934 | struct spi_device *spi; | ||
935 | int ret; | ||
936 | |||
937 | if (acpi_bus_get_device(handle, &adev)) | ||
938 | return AE_OK; | ||
939 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
940 | return AE_OK; | ||
941 | |||
942 | spi = spi_alloc_device(master); | ||
943 | if (!spi) { | ||
944 | dev_err(&master->dev, "failed to allocate SPI device for %s\n", | ||
945 | dev_name(&adev->dev)); | ||
946 | return AE_NO_MEMORY; | ||
947 | } | ||
948 | |||
949 | ACPI_HANDLE_SET(&spi->dev, handle); | ||
950 | spi->irq = -1; | ||
951 | |||
952 | INIT_LIST_HEAD(&resource_list); | ||
953 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
954 | acpi_spi_add_resource, spi); | ||
955 | acpi_dev_free_resource_list(&resource_list); | ||
956 | |||
957 | if (ret < 0 || !spi->max_speed_hz) { | ||
958 | spi_dev_put(spi); | ||
959 | return AE_OK; | ||
960 | } | ||
961 | |||
962 | strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); | ||
963 | if (spi_add_device(spi)) { | ||
964 | dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", | ||
965 | dev_name(&adev->dev)); | ||
966 | spi_dev_put(spi); | ||
967 | } | ||
968 | |||
969 | return AE_OK; | ||
970 | } | ||
971 | |||
972 | static void acpi_register_spi_devices(struct spi_master *master) | ||
973 | { | ||
974 | acpi_status status; | ||
975 | acpi_handle handle; | ||
976 | |||
977 | handle = ACPI_HANDLE(&master->dev); | ||
978 | if (!handle) | ||
979 | return; | ||
980 | |||
981 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
982 | acpi_spi_add_device, NULL, | ||
983 | master, NULL); | ||
984 | if (ACPI_FAILURE(status)) | ||
985 | dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); | ||
986 | } | ||
987 | #else | ||
988 | static inline void acpi_register_spi_devices(struct spi_master *master) {} | ||
989 | #endif /* CONFIG_ACPI */ | ||
990 | |||
891 | static void spi_master_release(struct device *dev) | 991 | static void spi_master_release(struct device *dev) |
892 | { | 992 | { |
893 | struct spi_master *master; | 993 | struct spi_master *master; |
@@ -1023,8 +1123,9 @@ int spi_register_master(struct spi_master *master) | |||
1023 | spi_match_master_to_boardinfo(master, &bi->board_info); | 1123 | spi_match_master_to_boardinfo(master, &bi->board_info); |
1024 | mutex_unlock(&board_lock); | 1124 | mutex_unlock(&board_lock); |
1025 | 1125 | ||
1026 | /* Register devices from the device tree */ | 1126 | /* Register devices from the device tree and ACPI */ |
1027 | of_register_spi_devices(master); | 1127 | of_register_spi_devices(master); |
1128 | acpi_register_spi_devices(master); | ||
1028 | done: | 1129 | done: |
1029 | return status; | 1130 | return status; |
1030 | } | 1131 | } |
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h index f2ffd963f1c3..d0cafd637199 100644 --- a/drivers/staging/android/android_alarm.h +++ b/drivers/staging/android/android_alarm.h | |||
@@ -51,12 +51,10 @@ enum android_alarm_return_flags { | |||
51 | #define ANDROID_ALARM_WAIT _IO('a', 1) | 51 | #define ANDROID_ALARM_WAIT _IO('a', 1) |
52 | 52 | ||
53 | #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) | 53 | #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) |
54 | #define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size) | ||
55 | |||
56 | /* Set alarm */ | 54 | /* Set alarm */ |
57 | #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) | 55 | #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) |
58 | #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) | 56 | #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) |
59 | #define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec) | 57 | #define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) |
60 | #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) | 58 | #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) |
61 | #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) | 59 | #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) |
62 | #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) | 60 | #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) |
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index a5dec1ca1b82..13ee53bd0bf6 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c | |||
@@ -424,7 +424,6 @@ static void hvc_hangup(struct tty_struct *tty) | |||
424 | { | 424 | { |
425 | struct hvc_struct *hp = tty->driver_data; | 425 | struct hvc_struct *hp = tty->driver_data; |
426 | unsigned long flags; | 426 | unsigned long flags; |
427 | int temp_open_count; | ||
428 | 427 | ||
429 | if (!hp) | 428 | if (!hp) |
430 | return; | 429 | return; |
@@ -444,7 +443,6 @@ static void hvc_hangup(struct tty_struct *tty) | |||
444 | return; | 443 | return; |
445 | } | 444 | } |
446 | 445 | ||
447 | temp_open_count = hp->port.count; | ||
448 | hp->port.count = 0; | 446 | hp->port.count = 0; |
449 | spin_unlock_irqrestore(&hp->port.lock, flags); | 447 | spin_unlock_irqrestore(&hp->port.lock, flags); |
450 | tty_port_tty_set(&hp->port, NULL); | 448 | tty_port_tty_set(&hp->port, NULL); |
@@ -453,11 +451,6 @@ static void hvc_hangup(struct tty_struct *tty) | |||
453 | 451 | ||
454 | if (hp->ops->notifier_hangup) | 452 | if (hp->ops->notifier_hangup) |
455 | hp->ops->notifier_hangup(hp, hp->data); | 453 | hp->ops->notifier_hangup(hp, hp->data); |
456 | |||
457 | while(temp_open_count) { | ||
458 | --temp_open_count; | ||
459 | tty_port_put(&hp->port); | ||
460 | } | ||
461 | } | 454 | } |
462 | 455 | ||
463 | /* | 456 | /* |
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 2bc28a59d385..1ab1d2c66de4 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c | |||
@@ -1239,6 +1239,7 @@ static int __devexit max310x_remove(struct spi_device *spi) | |||
1239 | static const struct spi_device_id max310x_id_table[] = { | 1239 | static const struct spi_device_id max310x_id_table[] = { |
1240 | { "max3107", MAX310X_TYPE_MAX3107 }, | 1240 | { "max3107", MAX310X_TYPE_MAX3107 }, |
1241 | { "max3108", MAX310X_TYPE_MAX3108 }, | 1241 | { "max3108", MAX310X_TYPE_MAX3108 }, |
1242 | { } | ||
1242 | }; | 1243 | }; |
1243 | MODULE_DEVICE_TABLE(spi, max310x_id_table); | 1244 | MODULE_DEVICE_TABLE(spi, max310x_id_table); |
1244 | 1245 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1e741bca0265..f034716190ff 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -2151,8 +2151,15 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum); | |||
2151 | irqreturn_t usb_hcd_irq (int irq, void *__hcd) | 2151 | irqreturn_t usb_hcd_irq (int irq, void *__hcd) |
2152 | { | 2152 | { |
2153 | struct usb_hcd *hcd = __hcd; | 2153 | struct usb_hcd *hcd = __hcd; |
2154 | unsigned long flags; | ||
2154 | irqreturn_t rc; | 2155 | irqreturn_t rc; |
2155 | 2156 | ||
2157 | /* IRQF_DISABLED doesn't work correctly with shared IRQs | ||
2158 | * when the first handler doesn't use it. So let's just | ||
2159 | * assume it's never used. | ||
2160 | */ | ||
2161 | local_irq_save(flags); | ||
2162 | |||
2156 | if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) | 2163 | if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) |
2157 | rc = IRQ_NONE; | 2164 | rc = IRQ_NONE; |
2158 | else if (hcd->driver->irq(hcd) == IRQ_NONE) | 2165 | else if (hcd->driver->irq(hcd) == IRQ_NONE) |
@@ -2160,6 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) | |||
2160 | else | 2167 | else |
2161 | rc = IRQ_HANDLED; | 2168 | rc = IRQ_HANDLED; |
2162 | 2169 | ||
2170 | local_irq_restore(flags); | ||
2163 | return rc; | 2171 | return rc; |
2164 | } | 2172 | } |
2165 | EXPORT_SYMBOL_GPL(usb_hcd_irq); | 2173 | EXPORT_SYMBOL_GPL(usb_hcd_irq); |
@@ -2347,6 +2355,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd, | |||
2347 | int retval; | 2355 | int retval; |
2348 | 2356 | ||
2349 | if (hcd->driver->irq) { | 2357 | if (hcd->driver->irq) { |
2358 | |||
2359 | /* IRQF_DISABLED doesn't work as advertised when used together | ||
2360 | * with IRQF_SHARED. As usb_hcd_irq() will always disable | ||
2361 | * interrupts we can remove it here. | ||
2362 | */ | ||
2363 | if (irqflags & IRQF_SHARED) | ||
2364 | irqflags &= ~IRQF_DISABLED; | ||
2365 | |||
2350 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", | 2366 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", |
2351 | hcd->driver->description, hcd->self.busnum); | 2367 | hcd->driver->description, hcd->self.busnum); |
2352 | retval = request_irq(irqnum, &usb_hcd_irq, irqflags, | 2368 | retval = request_irq(irqnum, &usb_hcd_irq, irqflags, |
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index e426ad626d74..4bfa78af379c 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/usb/ehci_def.h> | 20 | #include <linux/usb/ehci_def.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
23 | #include <linux/kconfig.h> | ||
23 | #include <linux/kgdb.h> | 24 | #include <linux/kgdb.h> |
24 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -614,12 +615,6 @@ err: | |||
614 | return -ENODEV; | 615 | return -ENODEV; |
615 | } | 616 | } |
616 | 617 | ||
617 | int dbgp_external_startup(struct usb_hcd *hcd) | ||
618 | { | ||
619 | return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); | ||
620 | } | ||
621 | EXPORT_SYMBOL_GPL(dbgp_external_startup); | ||
622 | |||
623 | static int ehci_reset_port(int port) | 618 | static int ehci_reset_port(int port) |
624 | { | 619 | { |
625 | u32 portsc; | 620 | u32 portsc; |
@@ -979,6 +974,7 @@ struct console early_dbgp_console = { | |||
979 | .index = -1, | 974 | .index = -1, |
980 | }; | 975 | }; |
981 | 976 | ||
977 | #if IS_ENABLED(CONFIG_USB_EHCI_HCD) | ||
982 | int dbgp_reset_prep(struct usb_hcd *hcd) | 978 | int dbgp_reset_prep(struct usb_hcd *hcd) |
983 | { | 979 | { |
984 | int ret = xen_dbgp_reset_prep(hcd); | 980 | int ret = xen_dbgp_reset_prep(hcd); |
@@ -1007,6 +1003,13 @@ int dbgp_reset_prep(struct usb_hcd *hcd) | |||
1007 | } | 1003 | } |
1008 | EXPORT_SYMBOL_GPL(dbgp_reset_prep); | 1004 | EXPORT_SYMBOL_GPL(dbgp_reset_prep); |
1009 | 1005 | ||
1006 | int dbgp_external_startup(struct usb_hcd *hcd) | ||
1007 | { | ||
1008 | return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); | ||
1009 | } | ||
1010 | EXPORT_SYMBOL_GPL(dbgp_external_startup); | ||
1011 | #endif /* USB_EHCI_HCD */ | ||
1012 | |||
1010 | #ifdef CONFIG_KGDB | 1013 | #ifdef CONFIG_KGDB |
1011 | 1014 | ||
1012 | static char kgdbdbgp_buf[DBGP_MAX_PACKET]; | 1015 | static char kgdbdbgp_buf[DBGP_MAX_PACKET]; |
diff --git a/drivers/usb/host/ehci-ls1x.c b/drivers/usb/host/ehci-ls1x.c index ca759652626b..aa0f328922df 100644 --- a/drivers/usb/host/ehci-ls1x.c +++ b/drivers/usb/host/ehci-ls1x.c | |||
@@ -113,7 +113,7 @@ static int ehci_hcd_ls1x_probe(struct platform_device *pdev) | |||
113 | goto err_put_hcd; | 113 | goto err_put_hcd; |
114 | } | 114 | } |
115 | 115 | ||
116 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | 116 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); |
117 | if (ret) | 117 | if (ret) |
118 | goto err_put_hcd; | 118 | goto err_put_hcd; |
119 | 119 | ||
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c index 84201cd1a472..41e378f17c66 100644 --- a/drivers/usb/host/ohci-xls.c +++ b/drivers/usb/host/ohci-xls.c | |||
@@ -56,7 +56,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver, | |||
56 | goto err3; | 56 | goto err3; |
57 | } | 57 | } |
58 | 58 | ||
59 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); | 59 | retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); |
60 | if (retval != 0) | 60 | if (retval != 0) |
61 | goto err4; | 61 | goto err4; |
62 | return retval; | 62 | return retval; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d0b87e7b4abf..b6b84dacc791 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -707,11 +707,12 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
707 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); | 707 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); |
708 | 708 | ||
709 | /* | 709 | /* |
710 | * use mode 1 only if we expect data of at least ep packet_sz | 710 | * Enable Mode 1 on RX transfers only when short_not_ok flag |
711 | * and have not yet received a short packet | 711 | * is set. Currently short_not_ok flag is set only from |
712 | * file_storage and f_mass_storage drivers | ||
712 | */ | 713 | */ |
713 | if ((request->length - request->actual >= musb_ep->packet_sz) && | 714 | |
714 | (fifo_count >= musb_ep->packet_sz)) | 715 | if (request->short_not_ok && fifo_count == musb_ep->packet_sz) |
715 | use_mode_1 = 1; | 716 | use_mode_1 = 1; |
716 | else | 717 | else |
717 | use_mode_1 = 0; | 718 | use_mode_1 = 0; |
@@ -727,6 +728,27 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
727 | c = musb->dma_controller; | 728 | c = musb->dma_controller; |
728 | channel = musb_ep->dma; | 729 | channel = musb_ep->dma; |
729 | 730 | ||
731 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in | ||
732 | * mode 0 only. So we do not get endpoint interrupts due to DMA | ||
733 | * completion. We only get interrupts from DMA controller. | ||
734 | * | ||
735 | * We could operate in DMA mode 1 if we knew the size of the tranfer | ||
736 | * in advance. For mass storage class, request->length = what the host | ||
737 | * sends, so that'd work. But for pretty much everything else, | ||
738 | * request->length is routinely more than what the host sends. For | ||
739 | * most these gadgets, end of is signified either by a short packet, | ||
740 | * or filling the last byte of the buffer. (Sending extra data in | ||
741 | * that last pckate should trigger an overflow fault.) But in mode 1, | ||
742 | * we don't get DMA completion interrupt for short packets. | ||
743 | * | ||
744 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), | ||
745 | * to get endpoint interrupt on every DMA req, but that didn't seem | ||
746 | * to work reliably. | ||
747 | * | ||
748 | * REVISIT an updated g_file_storage can set req->short_not_ok, which | ||
749 | * then becomes usable as a runtime "use mode 1" hint... | ||
750 | */ | ||
751 | |||
730 | /* Experimental: Mode1 works with mass storage use cases */ | 752 | /* Experimental: Mode1 works with mass storage use cases */ |
731 | if (use_mode_1) { | 753 | if (use_mode_1) { |
732 | csr |= MUSB_RXCSR_AUTOCLEAR; | 754 | csr |= MUSB_RXCSR_AUTOCLEAR; |
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index d62a91fedc22..0e62f504410e 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c | |||
@@ -65,7 +65,7 @@ static int __devinit ux500_probe(struct platform_device *pdev) | |||
65 | struct platform_device *musb; | 65 | struct platform_device *musb; |
66 | struct ux500_glue *glue; | 66 | struct ux500_glue *glue; |
67 | struct clk *clk; | 67 | struct clk *clk; |
68 | 68 | int musbid; | |
69 | int ret = -ENOMEM; | 69 | int ret = -ENOMEM; |
70 | 70 | ||
71 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); | 71 | glue = kzalloc(sizeof(*glue), GFP_KERNEL); |
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index d8c8a42bff3e..6223062d5d1b 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig | |||
@@ -58,7 +58,7 @@ config USB_ULPI_VIEWPORT | |||
58 | 58 | ||
59 | config TWL4030_USB | 59 | config TWL4030_USB |
60 | tristate "TWL4030 USB Transceiver Driver" | 60 | tristate "TWL4030 USB Transceiver Driver" |
61 | depends on TWL4030_CORE && REGULATOR_TWL4030 | 61 | depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS |
62 | select USB_OTG_UTILS | 62 | select USB_OTG_UTILS |
63 | help | 63 | help |
64 | Enable this to support the USB OTG transceiver on TWL4030 | 64 | Enable this to support the USB OTG transceiver on TWL4030 |
@@ -68,7 +68,7 @@ config TWL4030_USB | |||
68 | 68 | ||
69 | config TWL6030_USB | 69 | config TWL6030_USB |
70 | tristate "TWL6030 USB Transceiver Driver" | 70 | tristate "TWL6030 USB Transceiver Driver" |
71 | depends on TWL4030_CORE && OMAP_USB2 | 71 | depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS |
72 | select USB_OTG_UTILS | 72 | select USB_OTG_UTILS |
73 | help | 73 | help |
74 | Enable this to support the USB OTG transceiver on TWL6030 | 74 | Enable this to support the USB OTG transceiver on TWL6030 |
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 7179b0c5f814..cff8dd5b462d 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
@@ -2430,7 +2430,7 @@ static void keyspan_release(struct usb_serial *serial) | |||
2430 | static int keyspan_port_probe(struct usb_serial_port *port) | 2430 | static int keyspan_port_probe(struct usb_serial_port *port) |
2431 | { | 2431 | { |
2432 | struct usb_serial *serial = port->serial; | 2432 | struct usb_serial *serial = port->serial; |
2433 | struct keyspan_port_private *s_priv; | 2433 | struct keyspan_serial_private *s_priv; |
2434 | struct keyspan_port_private *p_priv; | 2434 | struct keyspan_port_private *p_priv; |
2435 | const struct keyspan_device_details *d_details; | 2435 | const struct keyspan_device_details *d_details; |
2436 | struct callbacks *cback; | 2436 | struct callbacks *cback; |
@@ -2445,7 +2445,6 @@ static int keyspan_port_probe(struct usb_serial_port *port) | |||
2445 | if (!p_priv) | 2445 | if (!p_priv) |
2446 | return -ENOMEM; | 2446 | return -ENOMEM; |
2447 | 2447 | ||
2448 | s_priv = usb_get_serial_data(port->serial); | ||
2449 | p_priv->device_details = d_details; | 2448 | p_priv->device_details = d_details; |
2450 | 2449 | ||
2451 | /* Setup values for the various callback routines */ | 2450 | /* Setup values for the various callback routines */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5dee7d61241e..edc64bb6f457 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -158,6 +158,7 @@ static void option_instat_callback(struct urb *urb); | |||
158 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 | 158 | #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 |
159 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 | 159 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 |
160 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 | 160 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 |
161 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 | ||
161 | #define NOVATELWIRELESS_PRODUCT_G1 0xA001 | 162 | #define NOVATELWIRELESS_PRODUCT_G1 0xA001 |
162 | #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 | 163 | #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 |
163 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 | 164 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 |
@@ -193,6 +194,9 @@ static void option_instat_callback(struct urb *urb); | |||
193 | #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 | 194 | #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 |
194 | #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 | 195 | #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 |
195 | 196 | ||
197 | #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ | ||
198 | #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ | ||
199 | |||
196 | #define KYOCERA_VENDOR_ID 0x0c88 | 200 | #define KYOCERA_VENDOR_ID 0x0c88 |
197 | #define KYOCERA_PRODUCT_KPC650 0x17da | 201 | #define KYOCERA_PRODUCT_KPC650 0x17da |
198 | #define KYOCERA_PRODUCT_KPC680 0x180a | 202 | #define KYOCERA_PRODUCT_KPC680 0x180a |
@@ -283,6 +287,7 @@ static void option_instat_callback(struct urb *urb); | |||
283 | /* ALCATEL PRODUCTS */ | 287 | /* ALCATEL PRODUCTS */ |
284 | #define ALCATEL_VENDOR_ID 0x1bbb | 288 | #define ALCATEL_VENDOR_ID 0x1bbb |
285 | #define ALCATEL_PRODUCT_X060S_X200 0x0000 | 289 | #define ALCATEL_PRODUCT_X060S_X200 0x0000 |
290 | #define ALCATEL_PRODUCT_X220_X500D 0x0017 | ||
286 | 291 | ||
287 | #define PIRELLI_VENDOR_ID 0x1266 | 292 | #define PIRELLI_VENDOR_ID 0x1266 |
288 | #define PIRELLI_PRODUCT_C100_1 0x1002 | 293 | #define PIRELLI_PRODUCT_C100_1 0x1002 |
@@ -706,6 +711,7 @@ static const struct usb_device_id option_ids[] = { | |||
706 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, | 711 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, |
707 | /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ | 712 | /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ |
708 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, | 713 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, |
714 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, | ||
709 | 715 | ||
710 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 716 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
711 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 717 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
@@ -728,6 +734,8 @@ static const struct usb_device_id option_ids[] = { | |||
728 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 734 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
729 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 735 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
730 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 736 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
737 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, | ||
738 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, | ||
731 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ | 739 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ |
732 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 740 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
733 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, | 741 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
@@ -1157,6 +1165,7 @@ static const struct usb_device_id option_ids[] = { | |||
1157 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), | 1165 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), |
1158 | .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist | 1166 | .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist |
1159 | }, | 1167 | }, |
1168 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, | ||
1160 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, | 1169 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, |
1161 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, | 1170 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, |
1162 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), | 1171 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 61a73ad1a187..a3e9c095f0d8 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -455,9 +455,6 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, | |||
455 | struct usb_serial *serial = port->serial; | 455 | struct usb_serial *serial = port->serial; |
456 | struct urb *urb; | 456 | struct urb *urb; |
457 | 457 | ||
458 | if (endpoint == -1) | ||
459 | return NULL; /* endpoint not needed */ | ||
460 | |||
461 | urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ | 458 | urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ |
462 | if (urb == NULL) { | 459 | if (urb == NULL) { |
463 | dev_dbg(&serial->interface->dev, | 460 | dev_dbg(&serial->interface->dev, |
@@ -489,6 +486,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port) | |||
489 | init_usb_anchor(&portdata->delayed); | 486 | init_usb_anchor(&portdata->delayed); |
490 | 487 | ||
491 | for (i = 0; i < N_IN_URB; i++) { | 488 | for (i = 0; i < N_IN_URB; i++) { |
489 | if (!port->bulk_in_size) | ||
490 | break; | ||
491 | |||
492 | buffer = (u8 *)__get_free_page(GFP_KERNEL); | 492 | buffer = (u8 *)__get_free_page(GFP_KERNEL); |
493 | if (!buffer) | 493 | if (!buffer) |
494 | goto bail_out_error; | 494 | goto bail_out_error; |
@@ -502,8 +502,8 @@ int usb_wwan_port_probe(struct usb_serial_port *port) | |||
502 | } | 502 | } |
503 | 503 | ||
504 | for (i = 0; i < N_OUT_URB; i++) { | 504 | for (i = 0; i < N_OUT_URB; i++) { |
505 | if (port->bulk_out_endpointAddress == -1) | 505 | if (!port->bulk_out_size) |
506 | continue; | 506 | break; |
507 | 507 | ||
508 | buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); | 508 | buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); |
509 | if (!buffer) | 509 | if (!buffer) |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a3d54366afcc..92f35abee92d 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev) | |||
186 | /* Some devices don't handle VPD pages correctly */ | 186 | /* Some devices don't handle VPD pages correctly */ |
187 | sdev->skip_vpd_pages = 1; | 187 | sdev->skip_vpd_pages = 1; |
188 | 188 | ||
189 | /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */ | ||
190 | sdev->no_report_opcodes = 1; | ||
191 | |||
192 | /* Do not attempt to use WRITE SAME */ | ||
193 | sdev->no_write_same = 1; | ||
194 | |||
189 | /* Some disks return the total number of blocks in response | 195 | /* Some disks return the total number of blocks in response |
190 | * to READ CAPACITY rather than the highest block number. | 196 | * to READ CAPACITY rather than the highest block number. |
191 | * If this device makes that mistake, tell the sd driver. */ | 197 | * If this device makes that mistake, tell the sd driver. */ |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index d64ac3842884..bee92846cfab 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
@@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module) | |||
365 | struct omap_dss_output *out; | 365 | struct omap_dss_output *out; |
366 | enum omap_dss_output_id id; | 366 | enum omap_dss_output_id id; |
367 | 367 | ||
368 | id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; | 368 | switch (module) { |
369 | case 0: | ||
370 | id = OMAP_DSS_OUTPUT_DSI1; | ||
371 | break; | ||
372 | case 1: | ||
373 | id = OMAP_DSS_OUTPUT_DSI2; | ||
374 | break; | ||
375 | default: | ||
376 | return NULL; | ||
377 | } | ||
369 | 378 | ||
370 | out = omap_dss_get_output(id); | 379 | out = omap_dss_get_output(id); |
371 | 380 | ||
372 | return out->pdev; | 381 | return out ? out->pdev : NULL; |
373 | } | 382 | } |
374 | 383 | ||
375 | static inline void dsi_write_reg(struct platform_device *dsidev, | 384 | static inline void dsi_write_reg(struct platform_device *dsidev, |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 2ab1c3e96553..5f6eea801b06 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -697,11 +697,15 @@ static int dss_get_clocks(void) | |||
697 | 697 | ||
698 | dss.dss_clk = clk; | 698 | dss.dss_clk = clk; |
699 | 699 | ||
700 | clk = clk_get(NULL, dss.feat->clk_name); | 700 | if (dss.feat->clk_name) { |
701 | if (IS_ERR(clk)) { | 701 | clk = clk_get(NULL, dss.feat->clk_name); |
702 | DSSERR("Failed to get %s\n", dss.feat->clk_name); | 702 | if (IS_ERR(clk)) { |
703 | r = PTR_ERR(clk); | 703 | DSSERR("Failed to get %s\n", dss.feat->clk_name); |
704 | goto err; | 704 | r = PTR_ERR(clk); |
705 | goto err; | ||
706 | } | ||
707 | } else { | ||
708 | clk = NULL; | ||
705 | } | 709 | } |
706 | 710 | ||
707 | dss.dpll4_m4_ck = clk; | 711 | dss.dpll4_m4_ck = clk; |
@@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev) | |||
805 | 809 | ||
806 | if (cpu_is_omap24xx()) | 810 | if (cpu_is_omap24xx()) |
807 | src = &omap24xx_dss_feats; | 811 | src = &omap24xx_dss_feats; |
808 | else if (cpu_is_omap34xx()) | ||
809 | src = &omap34xx_dss_feats; | ||
810 | else if (cpu_is_omap3630()) | 812 | else if (cpu_is_omap3630()) |
811 | src = &omap3630_dss_feats; | 813 | src = &omap3630_dss_feats; |
814 | else if (cpu_is_omap34xx()) | ||
815 | src = &omap34xx_dss_feats; | ||
812 | else if (cpu_is_omap44xx()) | 816 | else if (cpu_is_omap44xx()) |
813 | src = &omap44xx_dss_feats; | 817 | src = &omap44xx_dss_feats; |
814 | else if (soc_is_omap54xx()) | 818 | else if (soc_is_omap54xx()) |
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index a48a7dd75b33..8c9b8b3b7f77 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c | |||
@@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s) | |||
644 | { | 644 | { |
645 | mutex_lock(&hdmi.lock); | 645 | mutex_lock(&hdmi.lock); |
646 | 646 | ||
647 | if (hdmi_runtime_get()) | 647 | if (hdmi_runtime_get()) { |
648 | mutex_unlock(&hdmi.lock); | ||
648 | return; | 649 | return; |
650 | } | ||
649 | 651 | ||
650 | hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s); | 652 | hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s); |
651 | hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s); | 653 | hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s); |
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index 606b89f12351..d630b26a005c 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c | |||
@@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) | |||
787 | 787 | ||
788 | case OMAPFB_WAITFORVSYNC: | 788 | case OMAPFB_WAITFORVSYNC: |
789 | DBG("ioctl WAITFORVSYNC\n"); | 789 | DBG("ioctl WAITFORVSYNC\n"); |
790 | if (!display && !display->output && !display->output->manager) { | 790 | if (!display || !display->output || !display->output->manager) { |
791 | r = -EINVAL; | 791 | r = -EINVAL; |
792 | break; | 792 | break; |
793 | } | 793 | } |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8adb9cc267f9..71f5c459b088 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
361 | down_write(&mm->mmap_sem); | 361 | down_write(&mm->mmap_sem); |
362 | 362 | ||
363 | vma = find_vma(mm, m.addr); | 363 | vma = find_vma(mm, m.addr); |
364 | ret = -EINVAL; | ||
365 | if (!vma || | 364 | if (!vma || |
366 | vma->vm_ops != &privcmd_vm_ops || | 365 | vma->vm_ops != &privcmd_vm_ops || |
367 | (m.addr != vma->vm_start) || | 366 | (m.addr != vma->vm_start) || |
368 | ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || | 367 | ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || |
369 | !privcmd_enforce_singleshot_mapping(vma)) { | 368 | !privcmd_enforce_singleshot_mapping(vma)) { |
370 | up_write(&mm->mmap_sem); | 369 | up_write(&mm->mmap_sem); |
370 | ret = -EINVAL; | ||
371 | goto out; | 371 | goto out; |
372 | } | 372 | } |
373 | 373 | ||
@@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
383 | 383 | ||
384 | up_write(&mm->mmap_sem); | 384 | up_write(&mm->mmap_sem); |
385 | 385 | ||
386 | if (state.global_error && (version == 1)) { | 386 | if (version == 1) { |
387 | /* Write back errors in second pass. */ | 387 | if (state.global_error) { |
388 | state.user_mfn = (xen_pfn_t *)m.arr; | 388 | /* Write back errors in second pass. */ |
389 | state.err = err_array; | 389 | state.user_mfn = (xen_pfn_t *)m.arr; |
390 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), | 390 | state.err = err_array; |
391 | &pagelist, mmap_return_errors_v1, &state); | 391 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
392 | &pagelist, mmap_return_errors_v1, &state); | ||
393 | } else | ||
394 | ret = 0; | ||
395 | |||
392 | } else if (version == 2) { | 396 | } else if (version == 2) { |
393 | ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); | 397 | ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); |
394 | if (ret) | 398 | if (ret) |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 7320a66e958f..22548f56197b 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) | |||
2101 | end = start + (range->len >> sb->s_blocksize_bits) - 1; | 2101 | end = start + (range->len >> sb->s_blocksize_bits) - 1; |
2102 | minlen = range->minlen >> sb->s_blocksize_bits; | 2102 | minlen = range->minlen >> sb->s_blocksize_bits; |
2103 | 2103 | ||
2104 | if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) || | 2104 | if (minlen > EXT3_BLOCKS_PER_GROUP(sb) || |
2105 | unlikely(start >= max_blks)) | 2105 | start >= max_blks || |
2106 | range->len < sb->s_blocksize) | ||
2106 | return -EINVAL; | 2107 | return -EINVAL; |
2107 | if (end >= max_blks) | 2108 | if (end >= max_blks) |
2108 | end = max_blks - 1; | 2109 | end = max_blks - 1; |
@@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files) | |||
685 | struct fdtable *fdt; | 685 | struct fdtable *fdt; |
686 | 686 | ||
687 | /* exec unshares first */ | 687 | /* exec unshares first */ |
688 | BUG_ON(atomic_read(&files->count) != 1); | ||
689 | spin_lock(&files->file_lock); | 688 | spin_lock(&files->file_lock); |
690 | for (i = 0; ; i++) { | 689 | for (i = 0; ; i++) { |
691 | unsigned long set; | 690 | unsigned long set; |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 60ef3fb707ff..1506673c087e 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
138 | struct page *pg; | 138 | struct page *pg; |
139 | struct inode *inode = mapping->host; | 139 | struct inode *inode = mapping->host; |
140 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 140 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
141 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
142 | struct jffs2_raw_inode ri; | ||
143 | uint32_t alloc_len = 0; | ||
141 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 144 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
142 | uint32_t pageofs = index << PAGE_CACHE_SHIFT; | 145 | uint32_t pageofs = index << PAGE_CACHE_SHIFT; |
143 | int ret = 0; | 146 | int ret = 0; |
144 | 147 | ||
148 | jffs2_dbg(1, "%s()\n", __func__); | ||
149 | |||
150 | if (pageofs > inode->i_size) { | ||
151 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, | ||
152 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
153 | if (ret) | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | mutex_lock(&f->sem); | ||
145 | pg = grab_cache_page_write_begin(mapping, index, flags); | 158 | pg = grab_cache_page_write_begin(mapping, index, flags); |
146 | if (!pg) | 159 | if (!pg) { |
160 | if (alloc_len) | ||
161 | jffs2_complete_reservation(c); | ||
162 | mutex_unlock(&f->sem); | ||
147 | return -ENOMEM; | 163 | return -ENOMEM; |
164 | } | ||
148 | *pagep = pg; | 165 | *pagep = pg; |
149 | 166 | ||
150 | jffs2_dbg(1, "%s()\n", __func__); | 167 | if (alloc_len) { |
151 | |||
152 | if (pageofs > inode->i_size) { | ||
153 | /* Make new hole frag from old EOF to new page */ | 168 | /* Make new hole frag from old EOF to new page */ |
154 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
155 | struct jffs2_raw_inode ri; | ||
156 | struct jffs2_full_dnode *fn; | 169 | struct jffs2_full_dnode *fn; |
157 | uint32_t alloc_len; | ||
158 | 170 | ||
159 | jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 171 | jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
160 | (unsigned int)inode->i_size, pageofs); | 172 | (unsigned int)inode->i_size, pageofs); |
161 | 173 | ||
162 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, | ||
163 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
164 | if (ret) | ||
165 | goto out_page; | ||
166 | |||
167 | mutex_lock(&f->sem); | ||
168 | memset(&ri, 0, sizeof(ri)); | 174 | memset(&ri, 0, sizeof(ri)); |
169 | 175 | ||
170 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 176 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
191 | if (IS_ERR(fn)) { | 197 | if (IS_ERR(fn)) { |
192 | ret = PTR_ERR(fn); | 198 | ret = PTR_ERR(fn); |
193 | jffs2_complete_reservation(c); | 199 | jffs2_complete_reservation(c); |
194 | mutex_unlock(&f->sem); | ||
195 | goto out_page; | 200 | goto out_page; |
196 | } | 201 | } |
197 | ret = jffs2_add_full_dnode_to_inode(c, f, fn); | 202 | ret = jffs2_add_full_dnode_to_inode(c, f, fn); |
@@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
206 | jffs2_mark_node_obsolete(c, fn->raw); | 211 | jffs2_mark_node_obsolete(c, fn->raw); |
207 | jffs2_free_full_dnode(fn); | 212 | jffs2_free_full_dnode(fn); |
208 | jffs2_complete_reservation(c); | 213 | jffs2_complete_reservation(c); |
209 | mutex_unlock(&f->sem); | ||
210 | goto out_page; | 214 | goto out_page; |
211 | } | 215 | } |
212 | jffs2_complete_reservation(c); | 216 | jffs2_complete_reservation(c); |
213 | inode->i_size = pageofs; | 217 | inode->i_size = pageofs; |
214 | mutex_unlock(&f->sem); | ||
215 | } | 218 | } |
216 | 219 | ||
217 | /* | 220 | /* |
@@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
220 | * case of a short-copy. | 223 | * case of a short-copy. |
221 | */ | 224 | */ |
222 | if (!PageUptodate(pg)) { | 225 | if (!PageUptodate(pg)) { |
223 | mutex_lock(&f->sem); | ||
224 | ret = jffs2_do_readpage_nolock(inode, pg); | 226 | ret = jffs2_do_readpage_nolock(inode, pg); |
225 | mutex_unlock(&f->sem); | ||
226 | if (ret) | 227 | if (ret) |
227 | goto out_page; | 228 | goto out_page; |
228 | } | 229 | } |
230 | mutex_unlock(&f->sem); | ||
229 | jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); | 231 | jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); |
230 | return ret; | 232 | return ret; |
231 | 233 | ||
232 | out_page: | 234 | out_page: |
233 | unlock_page(pg); | 235 | unlock_page(pg); |
234 | page_cache_release(pg); | 236 | page_cache_release(pg); |
237 | mutex_unlock(&f->sem); | ||
235 | return ret; | 238 | return ret; |
236 | } | 239 | } |
237 | 240 | ||
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 721d692fa8d4..6fcaeb8c902e 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c | |||
@@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
258 | if (ret) | 258 | if (ret) |
259 | goto out_close_fd; | 259 | goto out_close_fd; |
260 | 260 | ||
261 | fd_install(fd, f); | 261 | if (fd != FAN_NOFD) |
262 | fd_install(fd, f); | ||
262 | return fanotify_event_metadata.event_len; | 263 | return fanotify_event_metadata.event_len; |
263 | 264 | ||
264 | out_close_fd: | 265 | out_close_fd: |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 144a96732dd7..3c231adf8450 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = { | |||
873 | .release = mem_release, | 873 | .release = mem_release, |
874 | }; | 874 | }; |
875 | 875 | ||
876 | static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, | ||
877 | loff_t *ppos) | ||
878 | { | ||
879 | struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); | ||
880 | char buffer[PROC_NUMBUF]; | ||
881 | int oom_adj = OOM_ADJUST_MIN; | ||
882 | size_t len; | ||
883 | unsigned long flags; | ||
884 | |||
885 | if (!task) | ||
886 | return -ESRCH; | ||
887 | if (lock_task_sighand(task, &flags)) { | ||
888 | if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) | ||
889 | oom_adj = OOM_ADJUST_MAX; | ||
890 | else | ||
891 | oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / | ||
892 | OOM_SCORE_ADJ_MAX; | ||
893 | unlock_task_sighand(task, &flags); | ||
894 | } | ||
895 | put_task_struct(task); | ||
896 | len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); | ||
897 | return simple_read_from_buffer(buf, count, ppos, buffer, len); | ||
898 | } | ||
899 | |||
900 | static ssize_t oom_adj_write(struct file *file, const char __user *buf, | ||
901 | size_t count, loff_t *ppos) | ||
902 | { | ||
903 | struct task_struct *task; | ||
904 | char buffer[PROC_NUMBUF]; | ||
905 | int oom_adj; | ||
906 | unsigned long flags; | ||
907 | int err; | ||
908 | |||
909 | memset(buffer, 0, sizeof(buffer)); | ||
910 | if (count > sizeof(buffer) - 1) | ||
911 | count = sizeof(buffer) - 1; | ||
912 | if (copy_from_user(buffer, buf, count)) { | ||
913 | err = -EFAULT; | ||
914 | goto out; | ||
915 | } | ||
916 | |||
917 | err = kstrtoint(strstrip(buffer), 0, &oom_adj); | ||
918 | if (err) | ||
919 | goto out; | ||
920 | if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && | ||
921 | oom_adj != OOM_DISABLE) { | ||
922 | err = -EINVAL; | ||
923 | goto out; | ||
924 | } | ||
925 | |||
926 | task = get_proc_task(file->f_path.dentry->d_inode); | ||
927 | if (!task) { | ||
928 | err = -ESRCH; | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | task_lock(task); | ||
933 | if (!task->mm) { | ||
934 | err = -EINVAL; | ||
935 | goto err_task_lock; | ||
936 | } | ||
937 | |||
938 | if (!lock_task_sighand(task, &flags)) { | ||
939 | err = -ESRCH; | ||
940 | goto err_task_lock; | ||
941 | } | ||
942 | |||
943 | /* | ||
944 | * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum | ||
945 | * value is always attainable. | ||
946 | */ | ||
947 | if (oom_adj == OOM_ADJUST_MAX) | ||
948 | oom_adj = OOM_SCORE_ADJ_MAX; | ||
949 | else | ||
950 | oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; | ||
951 | |||
952 | if (oom_adj < task->signal->oom_score_adj && | ||
953 | !capable(CAP_SYS_RESOURCE)) { | ||
954 | err = -EACCES; | ||
955 | goto err_sighand; | ||
956 | } | ||
957 | |||
958 | /* | ||
959 | * /proc/pid/oom_adj is provided for legacy purposes, ask users to use | ||
960 | * /proc/pid/oom_score_adj instead. | ||
961 | */ | ||
962 | printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", | ||
963 | current->comm, task_pid_nr(current), task_pid_nr(task), | ||
964 | task_pid_nr(task)); | ||
965 | |||
966 | task->signal->oom_score_adj = oom_adj; | ||
967 | trace_oom_score_adj_update(task); | ||
968 | err_sighand: | ||
969 | unlock_task_sighand(task, &flags); | ||
970 | err_task_lock: | ||
971 | task_unlock(task); | ||
972 | put_task_struct(task); | ||
973 | out: | ||
974 | return err < 0 ? err : count; | ||
975 | } | ||
976 | |||
977 | static const struct file_operations proc_oom_adj_operations = { | ||
978 | .read = oom_adj_read, | ||
979 | .write = oom_adj_write, | ||
980 | .llseek = generic_file_llseek, | ||
981 | }; | ||
982 | |||
876 | static ssize_t oom_score_adj_read(struct file *file, char __user *buf, | 983 | static ssize_t oom_score_adj_read(struct file *file, char __user *buf, |
877 | size_t count, loff_t *ppos) | 984 | size_t count, loff_t *ppos) |
878 | { | 985 | { |
@@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
2598 | REG("cgroup", S_IRUGO, proc_cgroup_operations), | 2705 | REG("cgroup", S_IRUGO, proc_cgroup_operations), |
2599 | #endif | 2706 | #endif |
2600 | INF("oom_score", S_IRUGO, proc_oom_score), | 2707 | INF("oom_score", S_IRUGO, proc_oom_score), |
2708 | REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), | ||
2601 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), | 2709 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), |
2602 | #ifdef CONFIG_AUDITSYSCALL | 2710 | #ifdef CONFIG_AUDITSYSCALL |
2603 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), | 2711 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), |
@@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = { | |||
2964 | REG("cgroup", S_IRUGO, proc_cgroup_operations), | 3072 | REG("cgroup", S_IRUGO, proc_cgroup_operations), |
2965 | #endif | 3073 | #endif |
2966 | INF("oom_score", S_IRUGO, proc_oom_score), | 3074 | INF("oom_score", S_IRUGO, proc_oom_score), |
3075 | REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), | ||
2967 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), | 3076 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), |
2968 | #ifdef CONFIG_AUDITSYSCALL | 3077 | #ifdef CONFIG_AUDITSYSCALL |
2969 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), | 3078 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index a40da07e93d6..947fbe06c3b1 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -161,6 +161,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) | |||
161 | 161 | ||
162 | while (s < e) { | 162 | while (s < e) { |
163 | unsigned long flags; | 163 | unsigned long flags; |
164 | u64 id; | ||
164 | 165 | ||
165 | if (c > psinfo->bufsize) | 166 | if (c > psinfo->bufsize) |
166 | c = psinfo->bufsize; | 167 | c = psinfo->bufsize; |
@@ -172,7 +173,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) | |||
172 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 173 | spin_lock_irqsave(&psinfo->buf_lock, flags); |
173 | } | 174 | } |
174 | memcpy(psinfo->buf, s, c); | 175 | memcpy(psinfo->buf, s, c); |
175 | psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo); | 176 | psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo); |
176 | spin_unlock_irqrestore(&psinfo->buf_lock, flags); | 177 | spin_unlock_irqrestore(&psinfo->buf_lock, flags); |
177 | s += c; | 178 | s += c; |
178 | c = e - s; | 179 | c = e - s; |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index f27f01a98aa2..d83736fbc26c 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, | |||
1782 | 1782 | ||
1783 | BUG_ON(!th->t_trans_id); | 1783 | BUG_ON(!th->t_trans_id); |
1784 | 1784 | ||
1785 | dquot_initialize(inode); | 1785 | reiserfs_write_unlock(inode->i_sb); |
1786 | err = dquot_alloc_inode(inode); | 1786 | err = dquot_alloc_inode(inode); |
1787 | reiserfs_write_lock(inode->i_sb); | ||
1787 | if (err) | 1788 | if (err) |
1788 | goto out_end_trans; | 1789 | goto out_end_trans; |
1789 | if (!dir->i_nlink) { | 1790 | if (!dir->i_nlink) { |
@@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, | |||
1979 | 1980 | ||
1980 | out_end_trans: | 1981 | out_end_trans: |
1981 | journal_end(th, th->t_super, th->t_blocks_allocated); | 1982 | journal_end(th, th->t_super, th->t_blocks_allocated); |
1983 | reiserfs_write_unlock(inode->i_sb); | ||
1982 | /* Drop can be outside and it needs more credits so it's better to have it outside */ | 1984 | /* Drop can be outside and it needs more credits so it's better to have it outside */ |
1983 | dquot_drop(inode); | 1985 | dquot_drop(inode); |
1986 | reiserfs_write_lock(inode->i_sb); | ||
1984 | inode->i_flags |= S_NOQUOTA; | 1987 | inode->i_flags |= S_NOQUOTA; |
1985 | make_bad_inode(inode); | 1988 | make_bad_inode(inode); |
1986 | 1989 | ||
@@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
3103 | /* must be turned off for recursive notify_change calls */ | 3106 | /* must be turned off for recursive notify_change calls */ |
3104 | ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); | 3107 | ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); |
3105 | 3108 | ||
3106 | depth = reiserfs_write_lock_once(inode->i_sb); | ||
3107 | if (is_quota_modification(inode, attr)) | 3109 | if (is_quota_modification(inode, attr)) |
3108 | dquot_initialize(inode); | 3110 | dquot_initialize(inode); |
3109 | 3111 | depth = reiserfs_write_lock_once(inode->i_sb); | |
3110 | if (attr->ia_valid & ATTR_SIZE) { | 3112 | if (attr->ia_valid & ATTR_SIZE) { |
3111 | /* version 2 items will be caught by the s_maxbytes check | 3113 | /* version 2 items will be caught by the s_maxbytes check |
3112 | ** done for us in vmtruncate | 3114 | ** done for us in vmtruncate |
@@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
3170 | error = journal_begin(&th, inode->i_sb, jbegin_count); | 3172 | error = journal_begin(&th, inode->i_sb, jbegin_count); |
3171 | if (error) | 3173 | if (error) |
3172 | goto out; | 3174 | goto out; |
3175 | reiserfs_write_unlock_once(inode->i_sb, depth); | ||
3173 | error = dquot_transfer(inode, attr); | 3176 | error = dquot_transfer(inode, attr); |
3177 | depth = reiserfs_write_lock_once(inode->i_sb); | ||
3174 | if (error) { | 3178 | if (error) { |
3175 | journal_end(&th, inode->i_sb, jbegin_count); | 3179 | journal_end(&th, inode->i_sb, jbegin_count); |
3176 | goto out; | 3180 | goto out; |
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index f8afa4b162b8..2f40a4c70a4d 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c | |||
@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree | |||
1968 | key2type(&(key->on_disk_key))); | 1968 | key2type(&(key->on_disk_key))); |
1969 | #endif | 1969 | #endif |
1970 | 1970 | ||
1971 | reiserfs_write_unlock(inode->i_sb); | ||
1971 | retval = dquot_alloc_space_nodirty(inode, pasted_size); | 1972 | retval = dquot_alloc_space_nodirty(inode, pasted_size); |
1973 | reiserfs_write_lock(inode->i_sb); | ||
1972 | if (retval) { | 1974 | if (retval) { |
1973 | pathrelse(search_path); | 1975 | pathrelse(search_path); |
1974 | return retval; | 1976 | return retval; |
@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, | |||
2061 | "reiserquota insert_item(): allocating %u id=%u type=%c", | 2063 | "reiserquota insert_item(): allocating %u id=%u type=%c", |
2062 | quota_bytes, inode->i_uid, head2type(ih)); | 2064 | quota_bytes, inode->i_uid, head2type(ih)); |
2063 | #endif | 2065 | #endif |
2066 | reiserfs_write_unlock(inode->i_sb); | ||
2064 | /* We can't dirty inode here. It would be immediately written but | 2067 | /* We can't dirty inode here. It would be immediately written but |
2065 | * appropriate stat item isn't inserted yet... */ | 2068 | * appropriate stat item isn't inserted yet... */ |
2066 | retval = dquot_alloc_space_nodirty(inode, quota_bytes); | 2069 | retval = dquot_alloc_space_nodirty(inode, quota_bytes); |
2070 | reiserfs_write_lock(inode->i_sb); | ||
2067 | if (retval) { | 2071 | if (retval) { |
2068 | pathrelse(path); | 2072 | pathrelse(path); |
2069 | return retval; | 2073 | return retval; |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 1078ae179993..418bdc3a57da 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s) | |||
298 | retval = remove_save_link_only(s, &save_link_key, 0); | 298 | retval = remove_save_link_only(s, &save_link_key, 0); |
299 | continue; | 299 | continue; |
300 | } | 300 | } |
301 | reiserfs_write_unlock(s); | ||
301 | dquot_initialize(inode); | 302 | dquot_initialize(inode); |
303 | reiserfs_write_lock(s); | ||
302 | 304 | ||
303 | if (truncate && S_ISDIR(inode->i_mode)) { | 305 | if (truncate && S_ISDIR(inode->i_mode)) { |
304 | /* We got a truncate request for a dir which is impossible. | 306 | /* We got a truncate request for a dir which is impossible. |
@@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1335 | kfree(qf_names[i]); | 1337 | kfree(qf_names[i]); |
1336 | #endif | 1338 | #endif |
1337 | err = -EINVAL; | 1339 | err = -EINVAL; |
1338 | goto out_err; | 1340 | goto out_unlock; |
1339 | } | 1341 | } |
1340 | #ifdef CONFIG_QUOTA | 1342 | #ifdef CONFIG_QUOTA |
1341 | handle_quota_files(s, qf_names, &qfmt); | 1343 | handle_quota_files(s, qf_names, &qfmt); |
@@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1379 | if (blocks) { | 1381 | if (blocks) { |
1380 | err = reiserfs_resize(s, blocks); | 1382 | err = reiserfs_resize(s, blocks); |
1381 | if (err != 0) | 1383 | if (err != 0) |
1382 | goto out_err; | 1384 | goto out_unlock; |
1383 | } | 1385 | } |
1384 | 1386 | ||
1385 | if (*mount_flags & MS_RDONLY) { | 1387 | if (*mount_flags & MS_RDONLY) { |
@@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1389 | /* it is read-only already */ | 1391 | /* it is read-only already */ |
1390 | goto out_ok; | 1392 | goto out_ok; |
1391 | 1393 | ||
1394 | /* | ||
1395 | * Drop write lock. Quota will retake it when needed and lock | ||
1396 | * ordering requires calling dquot_suspend() without it. | ||
1397 | */ | ||
1398 | reiserfs_write_unlock(s); | ||
1392 | err = dquot_suspend(s, -1); | 1399 | err = dquot_suspend(s, -1); |
1393 | if (err < 0) | 1400 | if (err < 0) |
1394 | goto out_err; | 1401 | goto out_err; |
1402 | reiserfs_write_lock(s); | ||
1395 | 1403 | ||
1396 | /* try to remount file system with read-only permissions */ | 1404 | /* try to remount file system with read-only permissions */ |
1397 | if (sb_umount_state(rs) == REISERFS_VALID_FS | 1405 | if (sb_umount_state(rs) == REISERFS_VALID_FS |
@@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1401 | 1409 | ||
1402 | err = journal_begin(&th, s, 10); | 1410 | err = journal_begin(&th, s, 10); |
1403 | if (err) | 1411 | if (err) |
1404 | goto out_err; | 1412 | goto out_unlock; |
1405 | 1413 | ||
1406 | /* Mounting a rw partition read-only. */ | 1414 | /* Mounting a rw partition read-only. */ |
1407 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); | 1415 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); |
@@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1416 | 1424 | ||
1417 | if (reiserfs_is_journal_aborted(journal)) { | 1425 | if (reiserfs_is_journal_aborted(journal)) { |
1418 | err = journal->j_errno; | 1426 | err = journal->j_errno; |
1419 | goto out_err; | 1427 | goto out_unlock; |
1420 | } | 1428 | } |
1421 | 1429 | ||
1422 | handle_data_mode(s, mount_options); | 1430 | handle_data_mode(s, mount_options); |
@@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1425 | s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ | 1433 | s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ |
1426 | err = journal_begin(&th, s, 10); | 1434 | err = journal_begin(&th, s, 10); |
1427 | if (err) | 1435 | if (err) |
1428 | goto out_err; | 1436 | goto out_unlock; |
1429 | 1437 | ||
1430 | /* Mount a partition which is read-only, read-write */ | 1438 | /* Mount a partition which is read-only, read-write */ |
1431 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); | 1439 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); |
@@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1442 | SB_JOURNAL(s)->j_must_wait = 1; | 1450 | SB_JOURNAL(s)->j_must_wait = 1; |
1443 | err = journal_end(&th, s, 10); | 1451 | err = journal_end(&th, s, 10); |
1444 | if (err) | 1452 | if (err) |
1445 | goto out_err; | 1453 | goto out_unlock; |
1446 | 1454 | ||
1447 | if (!(*mount_flags & MS_RDONLY)) { | 1455 | if (!(*mount_flags & MS_RDONLY)) { |
1456 | /* | ||
1457 | * Drop write lock. Quota will retake it when needed and lock | ||
1458 | * ordering requires calling dquot_resume() without it. | ||
1459 | */ | ||
1460 | reiserfs_write_unlock(s); | ||
1448 | dquot_resume(s, -1); | 1461 | dquot_resume(s, -1); |
1462 | reiserfs_write_lock(s); | ||
1449 | finish_unfinished(s); | 1463 | finish_unfinished(s); |
1450 | reiserfs_xattr_init(s, *mount_flags); | 1464 | reiserfs_xattr_init(s, *mount_flags); |
1451 | } | 1465 | } |
@@ -1455,9 +1469,10 @@ out_ok: | |||
1455 | reiserfs_write_unlock(s); | 1469 | reiserfs_write_unlock(s); |
1456 | return 0; | 1470 | return 0; |
1457 | 1471 | ||
1472 | out_unlock: | ||
1473 | reiserfs_write_unlock(s); | ||
1458 | out_err: | 1474 | out_err: |
1459 | kfree(new_opts); | 1475 | kfree(new_opts); |
1460 | reiserfs_write_unlock(s); | ||
1461 | return err; | 1476 | return err; |
1462 | } | 1477 | } |
1463 | 1478 | ||
@@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot) | |||
2095 | REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); | 2110 | REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); |
2096 | if (ret) | 2111 | if (ret) |
2097 | goto out; | 2112 | goto out; |
2113 | reiserfs_write_unlock(dquot->dq_sb); | ||
2098 | ret = dquot_commit(dquot); | 2114 | ret = dquot_commit(dquot); |
2115 | reiserfs_write_lock(dquot->dq_sb); | ||
2099 | err = | 2116 | err = |
2100 | journal_end(&th, dquot->dq_sb, | 2117 | journal_end(&th, dquot->dq_sb, |
2101 | REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); | 2118 | REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); |
2102 | if (!ret && err) | 2119 | if (!ret && err) |
2103 | ret = err; | 2120 | ret = err; |
2104 | out: | 2121 | out: |
2105 | reiserfs_write_unlock(dquot->dq_sb); | 2122 | reiserfs_write_unlock(dquot->dq_sb); |
2106 | return ret; | 2123 | return ret; |
2107 | } | 2124 | } |
@@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot) | |||
2117 | REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); | 2134 | REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); |
2118 | if (ret) | 2135 | if (ret) |
2119 | goto out; | 2136 | goto out; |
2137 | reiserfs_write_unlock(dquot->dq_sb); | ||
2120 | ret = dquot_acquire(dquot); | 2138 | ret = dquot_acquire(dquot); |
2139 | reiserfs_write_lock(dquot->dq_sb); | ||
2121 | err = | 2140 | err = |
2122 | journal_end(&th, dquot->dq_sb, | 2141 | journal_end(&th, dquot->dq_sb, |
2123 | REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); | 2142 | REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); |
2124 | if (!ret && err) | 2143 | if (!ret && err) |
2125 | ret = err; | 2144 | ret = err; |
2126 | out: | 2145 | out: |
2127 | reiserfs_write_unlock(dquot->dq_sb); | 2146 | reiserfs_write_unlock(dquot->dq_sb); |
2128 | return ret; | 2147 | return ret; |
2129 | } | 2148 | } |
@@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot) | |||
2137 | ret = | 2156 | ret = |
2138 | journal_begin(&th, dquot->dq_sb, | 2157 | journal_begin(&th, dquot->dq_sb, |
2139 | REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); | 2158 | REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); |
2159 | reiserfs_write_unlock(dquot->dq_sb); | ||
2140 | if (ret) { | 2160 | if (ret) { |
2141 | /* Release dquot anyway to avoid endless cycle in dqput() */ | 2161 | /* Release dquot anyway to avoid endless cycle in dqput() */ |
2142 | dquot_release(dquot); | 2162 | dquot_release(dquot); |
2143 | goto out; | 2163 | goto out; |
2144 | } | 2164 | } |
2145 | ret = dquot_release(dquot); | 2165 | ret = dquot_release(dquot); |
2166 | reiserfs_write_lock(dquot->dq_sb); | ||
2146 | err = | 2167 | err = |
2147 | journal_end(&th, dquot->dq_sb, | 2168 | journal_end(&th, dquot->dq_sb, |
2148 | REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); | 2169 | REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); |
2149 | if (!ret && err) | 2170 | if (!ret && err) |
2150 | ret = err; | 2171 | ret = err; |
2151 | out: | ||
2152 | reiserfs_write_unlock(dquot->dq_sb); | 2172 | reiserfs_write_unlock(dquot->dq_sb); |
2173 | out: | ||
2153 | return ret; | 2174 | return ret; |
2154 | } | 2175 | } |
2155 | 2176 | ||
@@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type) | |||
2174 | ret = journal_begin(&th, sb, 2); | 2195 | ret = journal_begin(&th, sb, 2); |
2175 | if (ret) | 2196 | if (ret) |
2176 | goto out; | 2197 | goto out; |
2198 | reiserfs_write_unlock(sb); | ||
2177 | ret = dquot_commit_info(sb, type); | 2199 | ret = dquot_commit_info(sb, type); |
2200 | reiserfs_write_lock(sb); | ||
2178 | err = journal_end(&th, sb, 2); | 2201 | err = journal_end(&th, sb, 2); |
2179 | if (!ret && err) | 2202 | if (!ret && err) |
2180 | ret = err; | 2203 | ret = err; |
2181 | out: | 2204 | out: |
2182 | reiserfs_write_unlock(sb); | 2205 | reiserfs_write_unlock(sb); |
2183 | return ret; | 2206 | return ret; |
2184 | } | 2207 | } |
@@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, | |||
2203 | struct reiserfs_transaction_handle th; | 2226 | struct reiserfs_transaction_handle th; |
2204 | int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; | 2227 | int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; |
2205 | 2228 | ||
2206 | if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) | 2229 | reiserfs_write_lock(sb); |
2207 | return -EINVAL; | 2230 | if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) { |
2231 | err = -EINVAL; | ||
2232 | goto out; | ||
2233 | } | ||
2208 | 2234 | ||
2209 | /* Quotafile not on the same filesystem? */ | 2235 | /* Quotafile not on the same filesystem? */ |
2210 | if (path->dentry->d_sb != sb) { | 2236 | if (path->dentry->d_sb != sb) { |
@@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, | |||
2246 | if (err) | 2272 | if (err) |
2247 | goto out; | 2273 | goto out; |
2248 | } | 2274 | } |
2249 | err = dquot_quota_on(sb, type, format_id, path); | 2275 | reiserfs_write_unlock(sb); |
2276 | return dquot_quota_on(sb, type, format_id, path); | ||
2250 | out: | 2277 | out: |
2278 | reiserfs_write_unlock(sb); | ||
2251 | return err; | 2279 | return err; |
2252 | } | 2280 | } |
2253 | 2281 | ||
@@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, | |||
2320 | tocopy = sb->s_blocksize - offset < towrite ? | 2348 | tocopy = sb->s_blocksize - offset < towrite ? |
2321 | sb->s_blocksize - offset : towrite; | 2349 | sb->s_blocksize - offset : towrite; |
2322 | tmp_bh.b_state = 0; | 2350 | tmp_bh.b_state = 0; |
2351 | reiserfs_write_lock(sb); | ||
2323 | err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); | 2352 | err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); |
2353 | reiserfs_write_unlock(sb); | ||
2324 | if (err) | 2354 | if (err) |
2325 | goto out; | 2355 | goto out; |
2326 | if (offset || tocopy != sb->s_blocksize) | 2356 | if (offset || tocopy != sb->s_blocksize) |
@@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, | |||
2336 | flush_dcache_page(bh->b_page); | 2366 | flush_dcache_page(bh->b_page); |
2337 | set_buffer_uptodate(bh); | 2367 | set_buffer_uptodate(bh); |
2338 | unlock_buffer(bh); | 2368 | unlock_buffer(bh); |
2369 | reiserfs_write_lock(sb); | ||
2339 | reiserfs_prepare_for_journal(sb, bh, 1); | 2370 | reiserfs_prepare_for_journal(sb, bh, 1); |
2340 | journal_mark_dirty(current->journal_info, sb, bh); | 2371 | journal_mark_dirty(current->journal_info, sb, bh); |
2341 | if (!journal_quota) | 2372 | if (!journal_quota) |
2342 | reiserfs_add_ordered_list(inode, bh); | 2373 | reiserfs_add_ordered_list(inode, bh); |
2374 | reiserfs_write_unlock(sb); | ||
2343 | brelse(bh); | 2375 | brelse(bh); |
2344 | offset = 0; | 2376 | offset = 0; |
2345 | towrite -= tocopy; | 2377 | towrite -= tocopy; |
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index 28ec13af28d9..2dcf3d473fec 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c | |||
@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) | |||
681 | if (!lprops) { | 681 | if (!lprops) { |
682 | lprops = ubifs_fast_find_freeable(c); | 682 | lprops = ubifs_fast_find_freeable(c); |
683 | if (!lprops) { | 683 | if (!lprops) { |
684 | ubifs_assert(c->freeable_cnt == 0); | 684 | /* |
685 | if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { | 685 | * The first condition means the following: go scan the |
686 | * LPT if there are uncategorized lprops, which means | ||
687 | * there may be freeable LEBs there (UBIFS does not | ||
688 | * store the information about freeable LEBs in the | ||
689 | * master node). | ||
690 | */ | ||
691 | if (c->in_a_category_cnt != c->main_lebs || | ||
692 | c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { | ||
693 | ubifs_assert(c->freeable_cnt == 0); | ||
686 | lprops = scan_for_leb_for_idx(c); | 694 | lprops = scan_for_leb_for_idx(c); |
687 | if (IS_ERR(lprops)) { | 695 | if (IS_ERR(lprops)) { |
688 | err = PTR_ERR(lprops); | 696 | err = PTR_ERR(lprops); |
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index e5a2a35a46dc..46190a7c42a6 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c | |||
@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, | |||
300 | default: | 300 | default: |
301 | ubifs_assert(0); | 301 | ubifs_assert(0); |
302 | } | 302 | } |
303 | |||
303 | lprops->flags &= ~LPROPS_CAT_MASK; | 304 | lprops->flags &= ~LPROPS_CAT_MASK; |
304 | lprops->flags |= cat; | 305 | lprops->flags |= cat; |
306 | c->in_a_category_cnt += 1; | ||
307 | ubifs_assert(c->in_a_category_cnt <= c->main_lebs); | ||
305 | } | 308 | } |
306 | 309 | ||
307 | /** | 310 | /** |
@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, | |||
334 | default: | 337 | default: |
335 | ubifs_assert(0); | 338 | ubifs_assert(0); |
336 | } | 339 | } |
340 | |||
341 | c->in_a_category_cnt -= 1; | ||
342 | ubifs_assert(c->in_a_category_cnt >= 0); | ||
337 | } | 343 | } |
338 | 344 | ||
339 | /** | 345 | /** |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 5486346d0a3f..d133c276fe05 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -1183,6 +1183,8 @@ struct ubifs_debug_info; | |||
1183 | * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) | 1183 | * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) |
1184 | * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) | 1184 | * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) |
1185 | * @freeable_cnt: number of freeable LEBs in @freeable_list | 1185 | * @freeable_cnt: number of freeable LEBs in @freeable_list |
1186 | * @in_a_category_cnt: count of lprops which are in a certain category, which | ||
1187 | * basically meants that they were loaded from the flash | ||
1186 | * | 1188 | * |
1187 | * @ltab_lnum: LEB number of LPT's own lprops table | 1189 | * @ltab_lnum: LEB number of LPT's own lprops table |
1188 | * @ltab_offs: offset of LPT's own lprops table | 1190 | * @ltab_offs: offset of LPT's own lprops table |
@@ -1412,6 +1414,7 @@ struct ubifs_info { | |||
1412 | struct list_head freeable_list; | 1414 | struct list_head freeable_list; |
1413 | struct list_head frdi_idx_list; | 1415 | struct list_head frdi_idx_list; |
1414 | int freeable_cnt; | 1416 | int freeable_cnt; |
1417 | int in_a_category_cnt; | ||
1415 | 1418 | ||
1416 | int ltab_lnum; | 1419 | int ltab_lnum; |
1417 | int ltab_offs; | 1420 | int ltab_offs; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index e562dd43f41f..e57e2daa357c 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) | |||
481 | * | 481 | * |
482 | * The fix is two passes across the ioend list - one to start writeback on the | 482 | * The fix is two passes across the ioend list - one to start writeback on the |
483 | * buffer_heads, and then submit them for I/O on the second pass. | 483 | * buffer_heads, and then submit them for I/O on the second pass. |
484 | * | ||
485 | * If @fail is non-zero, it means that we have a situation where some part of | ||
486 | * the submission process has failed after we have marked paged for writeback | ||
487 | * and unlocked them. In this situation, we need to fail the ioend chain rather | ||
488 | * than submit it to IO. This typically only happens on a filesystem shutdown. | ||
484 | */ | 489 | */ |
485 | STATIC void | 490 | STATIC void |
486 | xfs_submit_ioend( | 491 | xfs_submit_ioend( |
487 | struct writeback_control *wbc, | 492 | struct writeback_control *wbc, |
488 | xfs_ioend_t *ioend) | 493 | xfs_ioend_t *ioend, |
494 | int fail) | ||
489 | { | 495 | { |
490 | xfs_ioend_t *head = ioend; | 496 | xfs_ioend_t *head = ioend; |
491 | xfs_ioend_t *next; | 497 | xfs_ioend_t *next; |
@@ -506,6 +512,18 @@ xfs_submit_ioend( | |||
506 | next = ioend->io_list; | 512 | next = ioend->io_list; |
507 | bio = NULL; | 513 | bio = NULL; |
508 | 514 | ||
515 | /* | ||
516 | * If we are failing the IO now, just mark the ioend with an | ||
517 | * error and finish it. This will run IO completion immediately | ||
518 | * as there is only one reference to the ioend at this point in | ||
519 | * time. | ||
520 | */ | ||
521 | if (fail) { | ||
522 | ioend->io_error = -fail; | ||
523 | xfs_finish_ioend(ioend); | ||
524 | continue; | ||
525 | } | ||
526 | |||
509 | for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { | 527 | for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { |
510 | 528 | ||
511 | if (!bio) { | 529 | if (!bio) { |
@@ -1060,7 +1078,18 @@ xfs_vm_writepage( | |||
1060 | 1078 | ||
1061 | xfs_start_page_writeback(page, 1, count); | 1079 | xfs_start_page_writeback(page, 1, count); |
1062 | 1080 | ||
1063 | if (ioend && imap_valid) { | 1081 | /* if there is no IO to be submitted for this page, we are done */ |
1082 | if (!ioend) | ||
1083 | return 0; | ||
1084 | |||
1085 | ASSERT(iohead); | ||
1086 | |||
1087 | /* | ||
1088 | * Any errors from this point onwards need tobe reported through the IO | ||
1089 | * completion path as we have marked the initial page as under writeback | ||
1090 | * and unlocked it. | ||
1091 | */ | ||
1092 | if (imap_valid) { | ||
1064 | xfs_off_t end_index; | 1093 | xfs_off_t end_index; |
1065 | 1094 | ||
1066 | end_index = imap.br_startoff + imap.br_blockcount; | 1095 | end_index = imap.br_startoff + imap.br_blockcount; |
@@ -1079,20 +1108,15 @@ xfs_vm_writepage( | |||
1079 | wbc, end_index); | 1108 | wbc, end_index); |
1080 | } | 1109 | } |
1081 | 1110 | ||
1082 | if (iohead) { | ||
1083 | /* | ||
1084 | * Reserve log space if we might write beyond the on-disk | ||
1085 | * inode size. | ||
1086 | */ | ||
1087 | if (ioend->io_type != XFS_IO_UNWRITTEN && | ||
1088 | xfs_ioend_is_append(ioend)) { | ||
1089 | err = xfs_setfilesize_trans_alloc(ioend); | ||
1090 | if (err) | ||
1091 | goto error; | ||
1092 | } | ||
1093 | 1111 | ||
1094 | xfs_submit_ioend(wbc, iohead); | 1112 | /* |
1095 | } | 1113 | * Reserve log space if we might write beyond the on-disk inode size. |
1114 | */ | ||
1115 | err = 0; | ||
1116 | if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend)) | ||
1117 | err = xfs_setfilesize_trans_alloc(ioend); | ||
1118 | |||
1119 | xfs_submit_ioend(wbc, iohead, err); | ||
1096 | 1120 | ||
1097 | return 0; | 1121 | return 0; |
1098 | 1122 | ||
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index d330111ca738..70eec1829776 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
1291 | leaf2 = blk2->bp->b_addr; | 1291 | leaf2 = blk2->bp->b_addr; |
1292 | ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); | 1292 | ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); |
1293 | ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); | 1293 | ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); |
1294 | ASSERT(leaf2->hdr.count == 0); | ||
1294 | args = state->args; | 1295 | args = state->args; |
1295 | 1296 | ||
1296 | trace_xfs_attr_leaf_rebalance(args); | 1297 | trace_xfs_attr_leaf_rebalance(args); |
@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
1361 | * I assert that since all callers pass in an empty | 1362 | * I assert that since all callers pass in an empty |
1362 | * second buffer, this code should never execute. | 1363 | * second buffer, this code should never execute. |
1363 | */ | 1364 | */ |
1365 | ASSERT(0); | ||
1364 | 1366 | ||
1365 | /* | 1367 | /* |
1366 | * Figure the total bytes to be added to the destination leaf. | 1368 | * Figure the total bytes to be added to the destination leaf. |
@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
1422 | args->index2 = 0; | 1424 | args->index2 = 0; |
1423 | args->blkno2 = blk2->blkno; | 1425 | args->blkno2 = blk2->blkno; |
1424 | } else { | 1426 | } else { |
1427 | /* | ||
1428 | * On a double leaf split, the original attr location | ||
1429 | * is already stored in blkno2/index2, so don't | ||
1430 | * overwrite it overwise we corrupt the tree. | ||
1431 | */ | ||
1425 | blk2->index = blk1->index | 1432 | blk2->index = blk1->index |
1426 | - be16_to_cpu(leaf1->hdr.count); | 1433 | - be16_to_cpu(leaf1->hdr.count); |
1427 | args->index = args->index2 = blk2->index; | 1434 | args->index = blk2->index; |
1428 | args->blkno = args->blkno2 = blk2->blkno; | 1435 | args->blkno = blk2->blkno; |
1436 | if (!state->extravalid) { | ||
1437 | /* | ||
1438 | * set the new attr location to match the old | ||
1439 | * one and let the higher level split code | ||
1440 | * decide where in the leaf to place it. | ||
1441 | */ | ||
1442 | args->index2 = blk2->index; | ||
1443 | args->blkno2 = blk2->blkno; | ||
1444 | } | ||
1429 | } | 1445 | } |
1430 | } else { | 1446 | } else { |
1431 | ASSERT(state->inleaf == 1); | 1447 | ASSERT(state->inleaf == 1); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 933b7930b863..4b0b8dd1b7b0 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io( | |||
1197 | { | 1197 | { |
1198 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; | 1198 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; |
1199 | 1199 | ||
1200 | xfs_buf_ioerror(bp, -error); | 1200 | /* |
1201 | * don't overwrite existing errors - otherwise we can lose errors on | ||
1202 | * buffers that require multiple bios to complete. | ||
1203 | */ | ||
1204 | if (!bp->b_error) | ||
1205 | xfs_buf_ioerror(bp, -error); | ||
1201 | 1206 | ||
1202 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1207 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1203 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1208 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
1204 | 1209 | ||
1205 | _xfs_buf_ioend(bp, 1); | 1210 | _xfs_buf_ioend(bp, 1); |
@@ -1279,6 +1284,11 @@ next_chunk: | |||
1279 | if (size) | 1284 | if (size) |
1280 | goto next_chunk; | 1285 | goto next_chunk; |
1281 | } else { | 1286 | } else { |
1287 | /* | ||
1288 | * This is guaranteed not to be the last io reference count | ||
1289 | * because the caller (xfs_buf_iorequest) holds a count itself. | ||
1290 | */ | ||
1291 | atomic_dec(&bp->b_io_remaining); | ||
1282 | xfs_buf_ioerror(bp, EIO); | 1292 | xfs_buf_ioerror(bp, EIO); |
1283 | bio_put(bio); | 1293 | bio_put(bio); |
1284 | } | 1294 | } |
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 03f14856bd09..0943457e0fa5 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h | |||
@@ -241,6 +241,7 @@ | |||
241 | *****************************************************************************/ | 241 | *****************************************************************************/ |
242 | 242 | ||
243 | #define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ | 243 | #define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ |
244 | #define ACPI_DB_LINE_BUFFER_SIZE 512 | ||
244 | 245 | ||
245 | #define ACPI_DEBUGGER_COMMAND_PROMPT '-' | 246 | #define ACPI_DEBUGGER_COMMAND_PROMPT '-' |
246 | #define ACPI_DEBUGGER_EXECUTE_PROMPT '%' | 247 | #define ACPI_DEBUGGER_EXECUTE_PROMPT '%' |
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 19503449814f..6c3890e02140 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h | |||
@@ -122,7 +122,7 @@ | |||
122 | #define AE_CODE_TBL_MAX 0x0005 | 122 | #define AE_CODE_TBL_MAX 0x0005 |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * AML exceptions. These are caused by problems with | 125 | * AML exceptions. These are caused by problems with |
126 | * the actual AML byte stream | 126 | * the actual AML byte stream |
127 | */ | 127 | */ |
128 | #define AE_AML_BAD_OPCODE (acpi_status) (0x0001 | AE_CODE_AML) | 128 | #define AE_AML_BAD_OPCODE (acpi_status) (0x0001 | AE_CODE_AML) |
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 745dd24e3cb5..7665df663284 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #define METHOD_NAME__HID "_HID" | 50 | #define METHOD_NAME__HID "_HID" |
51 | #define METHOD_NAME__CID "_CID" | 51 | #define METHOD_NAME__CID "_CID" |
52 | #define METHOD_NAME__UID "_UID" | 52 | #define METHOD_NAME__UID "_UID" |
53 | #define METHOD_NAME__SUB "_SUB" | ||
53 | #define METHOD_NAME__ADR "_ADR" | 54 | #define METHOD_NAME__ADR "_ADR" |
54 | #define METHOD_NAME__INI "_INI" | 55 | #define METHOD_NAME__INI "_INI" |
55 | #define METHOD_NAME__STA "_STA" | 56 | #define METHOD_NAME__STA "_STA" |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 6f385e5909db..7ced5dc20dd3 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -201,6 +201,7 @@ struct acpi_device_power_flags { | |||
201 | struct acpi_device_power_state { | 201 | struct acpi_device_power_state { |
202 | struct { | 202 | struct { |
203 | u8 valid:1; | 203 | u8 valid:1; |
204 | u8 os_accessible:1; | ||
204 | u8 explicit_set:1; /* _PSx present? */ | 205 | u8 explicit_set:1; /* _PSx present? */ |
205 | u8 reserved:6; | 206 | u8 reserved:6; |
206 | } flags; | 207 | } flags; |
@@ -339,6 +340,7 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle, | |||
339 | unsigned long long *sta); | 340 | unsigned long long *sta); |
340 | int acpi_bus_get_status(struct acpi_device *device); | 341 | int acpi_bus_get_status(struct acpi_device *device); |
341 | int acpi_bus_set_power(acpi_handle handle, int state); | 342 | int acpi_bus_set_power(acpi_handle handle, int state); |
343 | int acpi_device_set_power(struct acpi_device *device, int state); | ||
342 | int acpi_bus_update_power(acpi_handle handle, int *state_p); | 344 | int acpi_bus_update_power(acpi_handle handle, int *state_p); |
343 | bool acpi_bus_power_manageable(acpi_handle handle); | 345 | bool acpi_bus_power_manageable(acpi_handle handle); |
344 | bool acpi_bus_can_wakeup(acpi_handle handle); | 346 | bool acpi_bus_can_wakeup(acpi_handle handle); |
@@ -410,36 +412,100 @@ acpi_handle acpi_get_child(acpi_handle, u64); | |||
410 | int acpi_is_root_bridge(acpi_handle); | 412 | int acpi_is_root_bridge(acpi_handle); |
411 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); | 413 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); |
412 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); | 414 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); |
413 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) | 415 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) |
414 | 416 | ||
415 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); | 417 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); |
416 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); | 418 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); |
417 | 419 | ||
418 | #ifdef CONFIG_PM | 420 | #ifdef CONFIG_PM |
421 | acpi_status acpi_add_pm_notifier(struct acpi_device *adev, | ||
422 | acpi_notify_handler handler, void *context); | ||
423 | acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
424 | acpi_notify_handler handler); | ||
425 | int acpi_device_power_state(struct device *dev, struct acpi_device *adev, | ||
426 | u32 target_state, int d_max_in, int *d_min_p); | ||
419 | int acpi_pm_device_sleep_state(struct device *, int *, int); | 427 | int acpi_pm_device_sleep_state(struct device *, int *, int); |
420 | #else | 428 | #else |
421 | static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) | 429 | static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, |
430 | acpi_notify_handler handler, | ||
431 | void *context) | ||
432 | { | ||
433 | return AE_SUPPORT; | ||
434 | } | ||
435 | static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, | ||
436 | acpi_notify_handler handler) | ||
437 | { | ||
438 | return AE_SUPPORT; | ||
439 | } | ||
440 | static inline int __acpi_device_power_state(int m, int *p) | ||
422 | { | 441 | { |
423 | if (p) | 442 | if (p) |
424 | *p = ACPI_STATE_D0; | 443 | *p = ACPI_STATE_D0; |
425 | return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; | 444 | return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; |
426 | } | 445 | } |
446 | static inline int acpi_device_power_state(struct device *dev, | ||
447 | struct acpi_device *adev, | ||
448 | u32 target_state, int d_max_in, | ||
449 | int *d_min_p) | ||
450 | { | ||
451 | return __acpi_device_power_state(d_max_in, d_min_p); | ||
452 | } | ||
453 | static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) | ||
454 | { | ||
455 | return __acpi_device_power_state(m, p); | ||
456 | } | ||
427 | #endif | 457 | #endif |
428 | 458 | ||
429 | #ifdef CONFIG_PM_SLEEP | 459 | #ifdef CONFIG_PM_RUNTIME |
460 | int __acpi_device_run_wake(struct acpi_device *, bool); | ||
430 | int acpi_pm_device_run_wake(struct device *, bool); | 461 | int acpi_pm_device_run_wake(struct device *, bool); |
431 | int acpi_pm_device_sleep_wake(struct device *, bool); | ||
432 | #else | 462 | #else |
463 | static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en) | ||
464 | { | ||
465 | return -ENODEV; | ||
466 | } | ||
433 | static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) | 467 | static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) |
434 | { | 468 | { |
435 | return -ENODEV; | 469 | return -ENODEV; |
436 | } | 470 | } |
471 | #endif | ||
472 | |||
473 | #ifdef CONFIG_PM_SLEEP | ||
474 | int __acpi_device_sleep_wake(struct acpi_device *, u32, bool); | ||
475 | int acpi_pm_device_sleep_wake(struct device *, bool); | ||
476 | #else | ||
477 | static inline int __acpi_device_sleep_wake(struct acpi_device *adev, | ||
478 | u32 target_state, bool enable) | ||
479 | { | ||
480 | return -ENODEV; | ||
481 | } | ||
437 | static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | 482 | static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) |
438 | { | 483 | { |
439 | return -ENODEV; | 484 | return -ENODEV; |
440 | } | 485 | } |
441 | #endif | 486 | #endif |
442 | 487 | ||
488 | #ifdef CONFIG_ACPI_SLEEP | ||
489 | u32 acpi_target_system_state(void); | ||
490 | #else | ||
491 | static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; } | ||
492 | #endif | ||
493 | |||
494 | static inline bool acpi_device_power_manageable(struct acpi_device *adev) | ||
495 | { | ||
496 | return adev->flags.power_manageable; | ||
497 | } | ||
498 | |||
499 | static inline bool acpi_device_can_wakeup(struct acpi_device *adev) | ||
500 | { | ||
501 | return adev->wakeup.flags.valid; | ||
502 | } | ||
503 | |||
504 | static inline bool acpi_device_can_poweroff(struct acpi_device *adev) | ||
505 | { | ||
506 | return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible; | ||
507 | } | ||
508 | |||
443 | #else /* CONFIG_ACPI */ | 509 | #else /* CONFIG_ACPI */ |
444 | 510 | ||
445 | static inline int register_acpi_bus_type(void *bus) { return 0; } | 511 | static inline int register_acpi_bus_type(void *bus) { return 0; } |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 1222ba93d80a..43152742b46f 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -1,7 +1,6 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These | 3 | * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These |
5 | * interfaces must be implemented by OSL to interface the | 4 | * interfaces must be implemented by OSL to interface the |
6 | * ACPI components to the host operating system. | 5 | * ACPI components to the host operating system. |
7 | * | 6 | * |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 8b891dbead66..3d88395d4d6f 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /****************************************************************************** | 1 | /****************************************************************************** |
3 | * | 2 | * |
4 | * Name: acpixf.h - External interfaces to the ACPI subsystem | 3 | * Name: acpixf.h - External interfaces to the ACPI subsystem |
@@ -47,7 +46,7 @@ | |||
47 | 46 | ||
48 | /* Current ACPICA subsystem version in YYYYMMDD format */ | 47 | /* Current ACPICA subsystem version in YYYYMMDD format */ |
49 | 48 | ||
50 | #define ACPI_CA_VERSION 0x20120913 | 49 | #define ACPI_CA_VERSION 0x20121018 |
51 | 50 | ||
52 | #include <acpi/acconfig.h> | 51 | #include <acpi/acconfig.h> |
53 | #include <acpi/actypes.h> | 52 | #include <acpi/actypes.h> |
@@ -178,8 +177,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id); | |||
178 | 177 | ||
179 | acpi_status | 178 | acpi_status |
180 | acpi_get_table_header(acpi_string signature, | 179 | acpi_get_table_header(acpi_string signature, |
181 | u32 instance, | 180 | u32 instance, struct acpi_table_header *out_table_header); |
182 | struct acpi_table_header *out_table_header); | ||
183 | 181 | ||
184 | acpi_status | 182 | acpi_status |
185 | acpi_get_table_with_size(acpi_string signature, | 183 | acpi_get_table_with_size(acpi_string signature, |
@@ -190,8 +188,7 @@ acpi_get_table(acpi_string signature, | |||
190 | u32 instance, struct acpi_table_header **out_table); | 188 | u32 instance, struct acpi_table_header **out_table); |
191 | 189 | ||
192 | acpi_status | 190 | acpi_status |
193 | acpi_get_table_by_index(u32 table_index, | 191 | acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table); |
194 | struct acpi_table_header **out_table); | ||
195 | 192 | ||
196 | acpi_status | 193 | acpi_status |
197 | acpi_install_table_handler(acpi_tbl_handler handler, void *context); | 194 | acpi_install_table_handler(acpi_tbl_handler handler, void *context); |
@@ -274,7 +271,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function); | |||
274 | 271 | ||
275 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | 272 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
276 | acpi_install_global_event_handler | 273 | acpi_install_global_event_handler |
277 | (ACPI_GBL_EVENT_HANDLER handler, void *context)) | 274 | (acpi_gbl_event_handler handler, void *context)) |
278 | 275 | ||
279 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | 276 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
280 | acpi_install_fixed_event_handler(u32 | 277 | acpi_install_fixed_event_handler(u32 |
@@ -300,10 +297,9 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status | |||
300 | u32 gpe_number, | 297 | u32 gpe_number, |
301 | acpi_gpe_handler | 298 | acpi_gpe_handler |
302 | address)) | 299 | address)) |
303 | acpi_status | 300 | acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type, |
304 | acpi_install_notify_handler(acpi_handle device, | 301 | acpi_notify_handler handler, |
305 | u32 handler_type, | 302 | void *context); |
306 | acpi_notify_handler handler, void *context); | ||
307 | 303 | ||
308 | acpi_status | 304 | acpi_status |
309 | acpi_remove_notify_handler(acpi_handle device, | 305 | acpi_remove_notify_handler(acpi_handle device, |
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 8c61b5fe42a4..6585141e4b97 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h | |||
@@ -277,10 +277,10 @@ struct acpi_table_gtdt { | |||
277 | ******************************************************************************/ | 277 | ******************************************************************************/ |
278 | 278 | ||
279 | #define ACPI_MPST_CHANNEL_INFO \ | 279 | #define ACPI_MPST_CHANNEL_INFO \ |
280 | u16 reserved1; \ | ||
281 | u8 channel_id; \ | 280 | u8 channel_id; \ |
282 | u8 reserved2; \ | 281 | u8 reserved1[3]; \ |
283 | u16 power_node_count; | 282 | u16 power_node_count; \ |
283 | u16 reserved2; | ||
284 | 284 | ||
285 | /* Main table */ | 285 | /* Main table */ |
286 | 286 | ||
@@ -304,9 +304,8 @@ struct acpi_mpst_power_node { | |||
304 | u32 length; | 304 | u32 length; |
305 | u64 range_address; | 305 | u64 range_address; |
306 | u64 range_length; | 306 | u64 range_length; |
307 | u8 num_power_states; | 307 | u32 num_power_states; |
308 | u8 num_physical_components; | 308 | u32 num_physical_components; |
309 | u16 reserved2; | ||
310 | }; | 309 | }; |
311 | 310 | ||
312 | /* Values for Flags field above */ | 311 | /* Values for Flags field above */ |
@@ -332,10 +331,11 @@ struct acpi_mpst_component { | |||
332 | 331 | ||
333 | struct acpi_mpst_data_hdr { | 332 | struct acpi_mpst_data_hdr { |
334 | u16 characteristics_count; | 333 | u16 characteristics_count; |
334 | u16 reserved; | ||
335 | }; | 335 | }; |
336 | 336 | ||
337 | struct acpi_mpst_power_data { | 337 | struct acpi_mpst_power_data { |
338 | u8 revision; | 338 | u8 structure_id; |
339 | u8 flags; | 339 | u8 flags; |
340 | u16 reserved1; | 340 | u16 reserved1; |
341 | u32 average_power; | 341 | u32 average_power; |
@@ -356,10 +356,10 @@ struct acpi_mpst_shared { | |||
356 | u32 signature; | 356 | u32 signature; |
357 | u16 pcc_command; | 357 | u16 pcc_command; |
358 | u16 pcc_status; | 358 | u16 pcc_status; |
359 | u16 command_register; | 359 | u32 command_register; |
360 | u16 status_register; | 360 | u32 status_register; |
361 | u16 power_state_id; | 361 | u32 power_state_id; |
362 | u16 power_node_id; | 362 | u32 power_node_id; |
363 | u64 energy_consumed; | 363 | u64 energy_consumed; |
364 | u64 average_power; | 364 | u64 average_power; |
365 | }; | 365 | }; |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a85bae968262..4f43f1fba132 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
@@ -453,10 +453,14 @@ typedef u64 acpi_integer; | |||
453 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) | 453 | #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) |
454 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) | 454 | #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) |
455 | 455 | ||
456 | /* Optimizations for 4-character (32-bit) acpi_name manipulation */ | ||
457 | |||
456 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED | 458 | #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED |
457 | #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) | 459 | #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) |
460 | #define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) | ||
458 | #else | 461 | #else |
459 | #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) | 462 | #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) |
463 | #define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) | ||
460 | #endif | 464 | #endif |
461 | 465 | ||
462 | /******************************************************************************* | 466 | /******************************************************************************* |
@@ -796,11 +800,11 @@ typedef u8 acpi_adr_space_type; | |||
796 | 800 | ||
797 | /* Sleep function dispatch */ | 801 | /* Sleep function dispatch */ |
798 | 802 | ||
799 | typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state); | 803 | typedef acpi_status(*acpi_sleep_function) (u8 sleep_state); |
800 | 804 | ||
801 | struct acpi_sleep_functions { | 805 | struct acpi_sleep_functions { |
802 | ACPI_SLEEP_FUNCTION legacy_function; | 806 | acpi_sleep_function legacy_function; |
803 | ACPI_SLEEP_FUNCTION extended_function; | 807 | acpi_sleep_function extended_function; |
804 | }; | 808 | }; |
805 | 809 | ||
806 | /* | 810 | /* |
@@ -922,7 +926,8 @@ struct acpi_system_info { | |||
922 | /* | 926 | /* |
923 | * Types specific to the OS service interfaces | 927 | * Types specific to the OS service interfaces |
924 | */ | 928 | */ |
925 | typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); | 929 | typedef u32 |
930 | (ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); | ||
926 | 931 | ||
927 | typedef void | 932 | typedef void |
928 | (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); | 933 | (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); |
@@ -931,14 +936,15 @@ typedef void | |||
931 | * Various handlers and callback procedures | 936 | * Various handlers and callback procedures |
932 | */ | 937 | */ |
933 | typedef | 938 | typedef |
934 | void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, | 939 | void (*acpi_gbl_event_handler) (u32 event_type, |
935 | acpi_handle device, | 940 | acpi_handle device, |
936 | u32 event_number, void *context); | 941 | u32 event_number, void *context); |
937 | 942 | ||
938 | #define ACPI_EVENT_TYPE_GPE 0 | 943 | #define ACPI_EVENT_TYPE_GPE 0 |
939 | #define ACPI_EVENT_TYPE_FIXED 1 | 944 | #define ACPI_EVENT_TYPE_FIXED 1 |
940 | 945 | ||
941 | typedef u32(*acpi_event_handler) (void *context); | 946 | typedef |
947 | u32(*acpi_event_handler) (void *context); | ||
942 | 948 | ||
943 | typedef | 949 | typedef |
944 | u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); | 950 | u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); |
@@ -1018,17 +1024,17 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); | |||
1018 | 1024 | ||
1019 | #define ACPI_UUID_LENGTH 16 | 1025 | #define ACPI_UUID_LENGTH 16 |
1020 | 1026 | ||
1021 | /* Structures used for device/processor HID, UID, CID */ | 1027 | /* Structures used for device/processor HID, UID, CID, and SUB */ |
1022 | 1028 | ||
1023 | struct acpica_device_id { | 1029 | struct acpi_pnp_device_id { |
1024 | u32 length; /* Length of string + null */ | 1030 | u32 length; /* Length of string + null */ |
1025 | char *string; | 1031 | char *string; |
1026 | }; | 1032 | }; |
1027 | 1033 | ||
1028 | struct acpica_device_id_list { | 1034 | struct acpi_pnp_device_id_list { |
1029 | u32 count; /* Number of IDs in Ids array */ | 1035 | u32 count; /* Number of IDs in Ids array */ |
1030 | u32 list_size; /* Size of list, including ID strings */ | 1036 | u32 list_size; /* Size of list, including ID strings */ |
1031 | struct acpica_device_id ids[1]; /* ID array */ | 1037 | struct acpi_pnp_device_id ids[1]; /* ID array */ |
1032 | }; | 1038 | }; |
1033 | 1039 | ||
1034 | /* | 1040 | /* |
@@ -1046,9 +1052,10 @@ struct acpi_device_info { | |||
1046 | u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ | 1052 | u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ |
1047 | u32 current_status; /* _STA value */ | 1053 | u32 current_status; /* _STA value */ |
1048 | u64 address; /* _ADR value */ | 1054 | u64 address; /* _ADR value */ |
1049 | struct acpica_device_id hardware_id; /* _HID value */ | 1055 | struct acpi_pnp_device_id hardware_id; /* _HID value */ |
1050 | struct acpica_device_id unique_id; /* _UID value */ | 1056 | struct acpi_pnp_device_id unique_id; /* _UID value */ |
1051 | struct acpica_device_id_list compatible_id_list; /* _CID list <must be last> */ | 1057 | struct acpi_pnp_device_id subsystem_id; /* _SUB value */ |
1058 | struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */ | ||
1052 | }; | 1059 | }; |
1053 | 1060 | ||
1054 | /* Values for Flags field above (acpi_get_object_info) */ | 1061 | /* Values for Flags field above (acpi_get_object_info) */ |
@@ -1061,11 +1068,12 @@ struct acpi_device_info { | |||
1061 | #define ACPI_VALID_ADR 0x02 | 1068 | #define ACPI_VALID_ADR 0x02 |
1062 | #define ACPI_VALID_HID 0x04 | 1069 | #define ACPI_VALID_HID 0x04 |
1063 | #define ACPI_VALID_UID 0x08 | 1070 | #define ACPI_VALID_UID 0x08 |
1064 | #define ACPI_VALID_CID 0x10 | 1071 | #define ACPI_VALID_SUB 0x10 |
1065 | #define ACPI_VALID_SXDS 0x20 | 1072 | #define ACPI_VALID_CID 0x20 |
1066 | #define ACPI_VALID_SXWS 0x40 | 1073 | #define ACPI_VALID_SXDS 0x40 |
1074 | #define ACPI_VALID_SXWS 0x80 | ||
1067 | 1075 | ||
1068 | /* Flags for _STA method */ | 1076 | /* Flags for _STA return value (current_status above) */ |
1069 | 1077 | ||
1070 | #define ACPI_STA_DEVICE_PRESENT 0x01 | 1078 | #define ACPI_STA_DEVICE_PRESENT 0x01 |
1071 | #define ACPI_STA_DEVICE_ENABLED 0x02 | 1079 | #define ACPI_STA_DEVICE_ENABLED 0x02 |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index af1cbaf535ed..c5c35e629426 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -210,6 +210,7 @@ | |||
210 | {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | 210 | {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ |
211 | {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | 211 | {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ |
212 | {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | 212 | {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ |
213 | {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | ||
213 | {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | 214 | {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ |
214 | {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ | 215 | {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ |
215 | {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 216 | {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 9201ac1f0511..c33fa3ce9b7c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -25,7 +25,9 @@ | |||
25 | #ifndef _LINUX_ACPI_H | 25 | #ifndef _LINUX_ACPI_H |
26 | #define _LINUX_ACPI_H | 26 | #define _LINUX_ACPI_H |
27 | 27 | ||
28 | #include <linux/errno.h> | ||
28 | #include <linux/ioport.h> /* for struct resource */ | 29 | #include <linux/ioport.h> /* for struct resource */ |
30 | #include <linux/device.h> | ||
29 | 31 | ||
30 | #ifdef CONFIG_ACPI | 32 | #ifdef CONFIG_ACPI |
31 | 33 | ||
@@ -250,6 +252,26 @@ extern int pnpacpi_disabled; | |||
250 | 252 | ||
251 | #define PXM_INVAL (-1) | 253 | #define PXM_INVAL (-1) |
252 | 254 | ||
255 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); | ||
256 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); | ||
257 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
258 | struct resource *res); | ||
259 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | ||
260 | struct resource *res); | ||
261 | unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); | ||
262 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | ||
263 | struct resource *res); | ||
264 | |||
265 | struct resource_list_entry { | ||
266 | struct list_head node; | ||
267 | struct resource res; | ||
268 | }; | ||
269 | |||
270 | void acpi_dev_free_resource_list(struct list_head *list); | ||
271 | int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | ||
272 | int (*preproc)(struct acpi_resource *, void *), | ||
273 | void *preproc_data); | ||
274 | |||
253 | int acpi_check_resource_conflict(const struct resource *res); | 275 | int acpi_check_resource_conflict(const struct resource *res); |
254 | 276 | ||
255 | int acpi_check_region(resource_size_t start, resource_size_t n, | 277 | int acpi_check_region(resource_size_t start, resource_size_t n, |
@@ -368,6 +390,17 @@ extern int acpi_nvs_register(__u64 start, __u64 size); | |||
368 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | 390 | extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
369 | void *data); | 391 | void *data); |
370 | 392 | ||
393 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | ||
394 | const struct device *dev); | ||
395 | |||
396 | static inline bool acpi_driver_match_device(struct device *dev, | ||
397 | const struct device_driver *drv) | ||
398 | { | ||
399 | return !!acpi_match_device(drv->acpi_match_table, dev); | ||
400 | } | ||
401 | |||
402 | #define ACPI_PTR(_ptr) (_ptr) | ||
403 | |||
371 | #else /* !CONFIG_ACPI */ | 404 | #else /* !CONFIG_ACPI */ |
372 | 405 | ||
373 | #define acpi_disabled 1 | 406 | #define acpi_disabled 1 |
@@ -422,6 +455,22 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | |||
422 | return 0; | 455 | return 0; |
423 | } | 456 | } |
424 | 457 | ||
458 | struct acpi_device_id; | ||
459 | |||
460 | static inline const struct acpi_device_id *acpi_match_device( | ||
461 | const struct acpi_device_id *ids, const struct device *dev) | ||
462 | { | ||
463 | return NULL; | ||
464 | } | ||
465 | |||
466 | static inline bool acpi_driver_match_device(struct device *dev, | ||
467 | const struct device_driver *drv) | ||
468 | { | ||
469 | return false; | ||
470 | } | ||
471 | |||
472 | #define ACPI_PTR(_ptr) (NULL) | ||
473 | |||
425 | #endif /* !CONFIG_ACPI */ | 474 | #endif /* !CONFIG_ACPI */ |
426 | 475 | ||
427 | #ifdef CONFIG_ACPI | 476 | #ifdef CONFIG_ACPI |
@@ -434,6 +483,43 @@ acpi_status acpi_os_prepare_sleep(u8 sleep_state, | |||
434 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) | 483 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
435 | #endif | 484 | #endif |
436 | 485 | ||
486 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) | ||
487 | int acpi_dev_runtime_suspend(struct device *dev); | ||
488 | int acpi_dev_runtime_resume(struct device *dev); | ||
489 | int acpi_subsys_runtime_suspend(struct device *dev); | ||
490 | int acpi_subsys_runtime_resume(struct device *dev); | ||
491 | #else | ||
492 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } | ||
493 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } | ||
494 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } | ||
495 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } | ||
496 | #endif | ||
497 | |||
498 | #ifdef CONFIG_ACPI_SLEEP | ||
499 | int acpi_dev_suspend_late(struct device *dev); | ||
500 | int acpi_dev_resume_early(struct device *dev); | ||
501 | int acpi_subsys_prepare(struct device *dev); | ||
502 | int acpi_subsys_suspend_late(struct device *dev); | ||
503 | int acpi_subsys_resume_early(struct device *dev); | ||
504 | #else | ||
505 | static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } | ||
506 | static inline int acpi_dev_resume_early(struct device *dev) { return 0; } | ||
507 | static inline int acpi_subsys_prepare(struct device *dev) { return 0; } | ||
508 | static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } | ||
509 | static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } | ||
510 | #endif | ||
511 | |||
512 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | ||
513 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
514 | void acpi_dev_pm_detach(struct device *dev, bool power_off); | ||
515 | #else | ||
516 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
517 | { | ||
518 | return -ENODEV; | ||
519 | } | ||
520 | static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} | ||
521 | #endif | ||
522 | |||
437 | #ifdef CONFIG_ACPI | 523 | #ifdef CONFIG_ACPI |
438 | __printf(3, 4) | 524 | __printf(3, 4) |
439 | void acpi_handle_printk(const char *level, acpi_handle handle, | 525 | void acpi_handle_printk(const char *level, acpi_handle handle, |
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h new file mode 100644 index 000000000000..91615a389b65 --- /dev/null +++ b/include/linux/acpi_gpio.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _LINUX_ACPI_GPIO_H_ | ||
2 | #define _LINUX_ACPI_GPIO_H_ | ||
3 | |||
4 | #include <linux/errno.h> | ||
5 | |||
6 | #ifdef CONFIG_GPIO_ACPI | ||
7 | |||
8 | int acpi_get_gpio(char *path, int pin); | ||
9 | |||
10 | #else /* CONFIG_GPIO_ACPI */ | ||
11 | |||
12 | static inline int acpi_get_gpio(char *path, int pin) | ||
13 | { | ||
14 | return -ENODEV; | ||
15 | } | ||
16 | |||
17 | #endif /* CONFIG_GPIO_ACPI */ | ||
18 | |||
19 | #endif /* _LINUX_ACPI_GPIO_H_ */ | ||
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index c12731582920..f9f5e9eeb9dd 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
@@ -335,8 +335,8 @@ const char *__clk_get_name(struct clk *clk); | |||
335 | struct clk_hw *__clk_get_hw(struct clk *clk); | 335 | struct clk_hw *__clk_get_hw(struct clk *clk); |
336 | u8 __clk_get_num_parents(struct clk *clk); | 336 | u8 __clk_get_num_parents(struct clk *clk); |
337 | struct clk *__clk_get_parent(struct clk *clk); | 337 | struct clk *__clk_get_parent(struct clk *clk); |
338 | inline int __clk_get_enable_count(struct clk *clk); | 338 | int __clk_get_enable_count(struct clk *clk); |
339 | inline int __clk_get_prepare_count(struct clk *clk); | 339 | int __clk_get_prepare_count(struct clk *clk); |
340 | unsigned long __clk_get_rate(struct clk *clk); | 340 | unsigned long __clk_get_rate(struct clk *clk); |
341 | unsigned long __clk_get_flags(struct clk *clk); | 341 | unsigned long __clk_get_flags(struct clk *clk); |
342 | int __clk_is_enabled(struct clk *clk); | 342 | int __clk_is_enabled(struct clk *clk); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b60f6ba01d0c..a55b88eaf96a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef _LINUX_CPUFREQ_H | 11 | #ifndef _LINUX_CPUFREQ_H |
12 | #define _LINUX_CPUFREQ_H | 12 | #define _LINUX_CPUFREQ_H |
13 | 13 | ||
14 | #include <asm/cputime.h> | ||
14 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
15 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
16 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
@@ -22,6 +23,8 @@ | |||
22 | #include <asm/div64.h> | 23 | #include <asm/div64.h> |
23 | 24 | ||
24 | #define CPUFREQ_NAME_LEN 16 | 25 | #define CPUFREQ_NAME_LEN 16 |
26 | /* Print length for names. Extra 1 space for accomodating '\n' in prints */ | ||
27 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) | ||
25 | 28 | ||
26 | 29 | ||
27 | /********************************************************************* | 30 | /********************************************************************* |
@@ -404,6 +407,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | |||
404 | unsigned int cpu); | 407 | unsigned int cpu); |
405 | 408 | ||
406 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 409 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
407 | |||
408 | |||
409 | #endif /* _LINUX_CPUFREQ_H */ | 410 | #endif /* _LINUX_CPUFREQ_H */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 279b1eaa8b73..3711b34dc4f9 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -82,13 +82,6 @@ cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) | |||
82 | st_usage->driver_data = data; | 82 | st_usage->driver_data = data; |
83 | } | 83 | } |
84 | 84 | ||
85 | struct cpuidle_state_kobj { | ||
86 | struct cpuidle_state *state; | ||
87 | struct cpuidle_state_usage *state_usage; | ||
88 | struct completion kobj_unregister; | ||
89 | struct kobject kobj; | ||
90 | }; | ||
91 | |||
92 | struct cpuidle_device { | 85 | struct cpuidle_device { |
93 | unsigned int registered:1; | 86 | unsigned int registered:1; |
94 | unsigned int enabled:1; | 87 | unsigned int enabled:1; |
@@ -98,7 +91,7 @@ struct cpuidle_device { | |||
98 | int state_count; | 91 | int state_count; |
99 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; | 92 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
100 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | 93 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
101 | 94 | struct cpuidle_driver_kobj *kobj_driver; | |
102 | struct list_head device_list; | 95 | struct list_head device_list; |
103 | struct kobject kobj; | 96 | struct kobject kobj; |
104 | struct completion kobj_unregister; | 97 | struct completion kobj_unregister; |
@@ -131,6 +124,7 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | |||
131 | struct cpuidle_driver { | 124 | struct cpuidle_driver { |
132 | const char *name; | 125 | const char *name; |
133 | struct module *owner; | 126 | struct module *owner; |
127 | int refcnt; | ||
134 | 128 | ||
135 | unsigned int power_specified:1; | 129 | unsigned int power_specified:1; |
136 | /* set to 1 to use the core cpuidle time keeping (for all states). */ | 130 | /* set to 1 to use the core cpuidle time keeping (for all states). */ |
@@ -163,6 +157,10 @@ extern int cpuidle_wrap_enter(struct cpuidle_device *dev, | |||
163 | struct cpuidle_driver *drv, int index)); | 157 | struct cpuidle_driver *drv, int index)); |
164 | extern int cpuidle_play_dead(void); | 158 | extern int cpuidle_play_dead(void); |
165 | 159 | ||
160 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | ||
161 | extern int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
162 | extern void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu); | ||
163 | |||
166 | #else | 164 | #else |
167 | static inline void disable_cpuidle(void) { } | 165 | static inline void disable_cpuidle(void) { } |
168 | static inline int cpuidle_idle_call(void) { return -ENODEV; } | 166 | static inline int cpuidle_idle_call(void) { return -ENODEV; } |
@@ -189,7 +187,6 @@ static inline int cpuidle_wrap_enter(struct cpuidle_device *dev, | |||
189 | struct cpuidle_driver *drv, int index)) | 187 | struct cpuidle_driver *drv, int index)) |
190 | { return -ENODEV; } | 188 | { return -ENODEV; } |
191 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 189 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
192 | |||
193 | #endif | 190 | #endif |
194 | 191 | ||
195 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 192 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 281c72a3b9d5..e83ef39b3bea 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -25,12 +25,12 @@ struct devfreq; | |||
25 | * struct devfreq_dev_status - Data given from devfreq user device to | 25 | * struct devfreq_dev_status - Data given from devfreq user device to |
26 | * governors. Represents the performance | 26 | * governors. Represents the performance |
27 | * statistics. | 27 | * statistics. |
28 | * @total_time The total time represented by this instance of | 28 | * @total_time: The total time represented by this instance of |
29 | * devfreq_dev_status | 29 | * devfreq_dev_status |
30 | * @busy_time The time that the device was working among the | 30 | * @busy_time: The time that the device was working among the |
31 | * total_time. | 31 | * total_time. |
32 | * @current_frequency The operating frequency. | 32 | * @current_frequency: The operating frequency. |
33 | * @private_data An entry not specified by the devfreq framework. | 33 | * @private_data: An entry not specified by the devfreq framework. |
34 | * A device and a specific governor may have their | 34 | * A device and a specific governor may have their |
35 | * own protocol with private_data. However, because | 35 | * own protocol with private_data. However, because |
36 | * this is governor-specific, a governor using this | 36 | * this is governor-specific, a governor using this |
@@ -54,23 +54,27 @@ struct devfreq_dev_status { | |||
54 | 54 | ||
55 | /** | 55 | /** |
56 | * struct devfreq_dev_profile - Devfreq's user device profile | 56 | * struct devfreq_dev_profile - Devfreq's user device profile |
57 | * @initial_freq The operating frequency when devfreq_add_device() is | 57 | * @initial_freq: The operating frequency when devfreq_add_device() is |
58 | * called. | 58 | * called. |
59 | * @polling_ms The polling interval in ms. 0 disables polling. | 59 | * @polling_ms: The polling interval in ms. 0 disables polling. |
60 | * @target The device should set its operating frequency at | 60 | * @target: The device should set its operating frequency at |
61 | * freq or lowest-upper-than-freq value. If freq is | 61 | * freq or lowest-upper-than-freq value. If freq is |
62 | * higher than any operable frequency, set maximum. | 62 | * higher than any operable frequency, set maximum. |
63 | * Before returning, target function should set | 63 | * Before returning, target function should set |
64 | * freq at the current frequency. | 64 | * freq at the current frequency. |
65 | * The "flags" parameter's possible values are | 65 | * The "flags" parameter's possible values are |
66 | * explained above with "DEVFREQ_FLAG_*" macros. | 66 | * explained above with "DEVFREQ_FLAG_*" macros. |
67 | * @get_dev_status The device should provide the current performance | 67 | * @get_dev_status: The device should provide the current performance |
68 | * status to devfreq, which is used by governors. | 68 | * status to devfreq, which is used by governors. |
69 | * @exit An optional callback that is called when devfreq | 69 | * @get_cur_freq: The device should provide the current frequency |
70 | * at which it is operating. | ||
71 | * @exit: An optional callback that is called when devfreq | ||
70 | * is removing the devfreq object due to error or | 72 | * is removing the devfreq object due to error or |
71 | * from devfreq_remove_device() call. If the user | 73 | * from devfreq_remove_device() call. If the user |
72 | * has registered devfreq->nb at a notifier-head, | 74 | * has registered devfreq->nb at a notifier-head, |
73 | * this is the time to unregister it. | 75 | * this is the time to unregister it. |
76 | * @freq_table: Optional list of frequencies to support statistics. | ||
77 | * @max_state: The size of freq_table. | ||
74 | */ | 78 | */ |
75 | struct devfreq_dev_profile { | 79 | struct devfreq_dev_profile { |
76 | unsigned long initial_freq; | 80 | unsigned long initial_freq; |
@@ -79,63 +83,63 @@ struct devfreq_dev_profile { | |||
79 | int (*target)(struct device *dev, unsigned long *freq, u32 flags); | 83 | int (*target)(struct device *dev, unsigned long *freq, u32 flags); |
80 | int (*get_dev_status)(struct device *dev, | 84 | int (*get_dev_status)(struct device *dev, |
81 | struct devfreq_dev_status *stat); | 85 | struct devfreq_dev_status *stat); |
86 | int (*get_cur_freq)(struct device *dev, unsigned long *freq); | ||
82 | void (*exit)(struct device *dev); | 87 | void (*exit)(struct device *dev); |
88 | |||
89 | unsigned int *freq_table; | ||
90 | unsigned int max_state; | ||
83 | }; | 91 | }; |
84 | 92 | ||
85 | /** | 93 | /** |
86 | * struct devfreq_governor - Devfreq policy governor | 94 | * struct devfreq_governor - Devfreq policy governor |
87 | * @name Governor's name | 95 | * @node: list node - contains registered devfreq governors |
88 | * @get_target_freq Returns desired operating frequency for the device. | 96 | * @name: Governor's name |
97 | * @get_target_freq: Returns desired operating frequency for the device. | ||
89 | * Basically, get_target_freq will run | 98 | * Basically, get_target_freq will run |
90 | * devfreq_dev_profile.get_dev_status() to get the | 99 | * devfreq_dev_profile.get_dev_status() to get the |
91 | * status of the device (load = busy_time / total_time). | 100 | * status of the device (load = busy_time / total_time). |
92 | * If no_central_polling is set, this callback is called | 101 | * If no_central_polling is set, this callback is called |
93 | * only with update_devfreq() notified by OPP. | 102 | * only with update_devfreq() notified by OPP. |
94 | * @init Called when the devfreq is being attached to a device | 103 | * @event_handler: Callback for devfreq core framework to notify events |
95 | * @exit Called when the devfreq is being removed from a | 104 | * to governors. Events include per device governor |
96 | * device. Governor should stop any internal routines | 105 | * init and exit, opp changes out of devfreq, suspend |
97 | * before return because related data may be | 106 | * and resume of per device devfreq during device idle. |
98 | * freed after exit(). | ||
99 | * @no_central_polling Do not use devfreq's central polling mechanism. | ||
100 | * When this is set, devfreq will not call | ||
101 | * get_target_freq with devfreq_monitor(). However, | ||
102 | * devfreq will call get_target_freq with | ||
103 | * devfreq_update() notified by OPP framework. | ||
104 | * | 107 | * |
105 | * Note that the callbacks are called with devfreq->lock locked by devfreq. | 108 | * Note that the callbacks are called with devfreq->lock locked by devfreq. |
106 | */ | 109 | */ |
107 | struct devfreq_governor { | 110 | struct devfreq_governor { |
111 | struct list_head node; | ||
112 | |||
108 | const char name[DEVFREQ_NAME_LEN]; | 113 | const char name[DEVFREQ_NAME_LEN]; |
109 | int (*get_target_freq)(struct devfreq *this, unsigned long *freq); | 114 | int (*get_target_freq)(struct devfreq *this, unsigned long *freq); |
110 | int (*init)(struct devfreq *this); | 115 | int (*event_handler)(struct devfreq *devfreq, |
111 | void (*exit)(struct devfreq *this); | 116 | unsigned int event, void *data); |
112 | const bool no_central_polling; | ||
113 | }; | 117 | }; |
114 | 118 | ||
115 | /** | 119 | /** |
116 | * struct devfreq - Device devfreq structure | 120 | * struct devfreq - Device devfreq structure |
117 | * @node list node - contains the devices with devfreq that have been | 121 | * @node: list node - contains the devices with devfreq that have been |
118 | * registered. | 122 | * registered. |
119 | * @lock a mutex to protect accessing devfreq. | 123 | * @lock: a mutex to protect accessing devfreq. |
120 | * @dev device registered by devfreq class. dev.parent is the device | 124 | * @dev: device registered by devfreq class. dev.parent is the device |
121 | * using devfreq. | 125 | * using devfreq. |
122 | * @profile device-specific devfreq profile | 126 | * @profile: device-specific devfreq profile |
123 | * @governor method how to choose frequency based on the usage. | 127 | * @governor: method how to choose frequency based on the usage. |
124 | * @nb notifier block used to notify devfreq object that it should | 128 | * @governor_name: devfreq governor name for use with this devfreq |
129 | * @nb: notifier block used to notify devfreq object that it should | ||
125 | * reevaluate operable frequencies. Devfreq users may use | 130 | * reevaluate operable frequencies. Devfreq users may use |
126 | * devfreq.nb to the corresponding register notifier call chain. | 131 | * devfreq.nb to the corresponding register notifier call chain. |
127 | * @polling_jiffies interval in jiffies. | 132 | * @work: delayed work for load monitoring. |
128 | * @previous_freq previously configured frequency value. | 133 | * @previous_freq: previously configured frequency value. |
129 | * @next_polling the number of remaining jiffies to poll with | 134 | * @data: Private data of the governor. The devfreq framework does not |
130 | * "devfreq_monitor" executions to reevaluate | ||
131 | * frequency/voltage of the device. Set by | ||
132 | * profile's polling_ms interval. | ||
133 | * @data Private data of the governor. The devfreq framework does not | ||
134 | * touch this. | 135 | * touch this. |
135 | * @being_removed a flag to mark that this object is being removed in | 136 | * @min_freq: Limit minimum frequency requested by user (0: none) |
136 | * order to prevent trying to remove the object multiple times. | 137 | * @max_freq: Limit maximum frequency requested by user (0: none) |
137 | * @min_freq Limit minimum frequency requested by user (0: none) | 138 | * @stop_polling: devfreq polling status of a device. |
138 | * @max_freq Limit maximum frequency requested by user (0: none) | 139 | * @total_trans: Number of devfreq transitions |
140 | * @trans_table: Statistics of devfreq transitions | ||
141 | * @time_in_state: Statistics of devfreq states | ||
142 | * @last_stat_updated: The last time stat updated | ||
139 | * | 143 | * |
140 | * This structure stores the devfreq information for a give device. | 144 | * This structure stores the devfreq information for a give device. |
141 | * | 145 | * |
@@ -152,26 +156,33 @@ struct devfreq { | |||
152 | struct device dev; | 156 | struct device dev; |
153 | struct devfreq_dev_profile *profile; | 157 | struct devfreq_dev_profile *profile; |
154 | const struct devfreq_governor *governor; | 158 | const struct devfreq_governor *governor; |
159 | char governor_name[DEVFREQ_NAME_LEN]; | ||
155 | struct notifier_block nb; | 160 | struct notifier_block nb; |
161 | struct delayed_work work; | ||
156 | 162 | ||
157 | unsigned long polling_jiffies; | ||
158 | unsigned long previous_freq; | 163 | unsigned long previous_freq; |
159 | unsigned int next_polling; | ||
160 | 164 | ||
161 | void *data; /* private data for governors */ | 165 | void *data; /* private data for governors */ |
162 | 166 | ||
163 | bool being_removed; | ||
164 | |||
165 | unsigned long min_freq; | 167 | unsigned long min_freq; |
166 | unsigned long max_freq; | 168 | unsigned long max_freq; |
169 | bool stop_polling; | ||
170 | |||
171 | /* information for device freqeuncy transition */ | ||
172 | unsigned int total_trans; | ||
173 | unsigned int *trans_table; | ||
174 | unsigned long *time_in_state; | ||
175 | unsigned long last_stat_updated; | ||
167 | }; | 176 | }; |
168 | 177 | ||
169 | #if defined(CONFIG_PM_DEVFREQ) | 178 | #if defined(CONFIG_PM_DEVFREQ) |
170 | extern struct devfreq *devfreq_add_device(struct device *dev, | 179 | extern struct devfreq *devfreq_add_device(struct device *dev, |
171 | struct devfreq_dev_profile *profile, | 180 | struct devfreq_dev_profile *profile, |
172 | const struct devfreq_governor *governor, | 181 | const char *governor_name, |
173 | void *data); | 182 | void *data); |
174 | extern int devfreq_remove_device(struct devfreq *devfreq); | 183 | extern int devfreq_remove_device(struct devfreq *devfreq); |
184 | extern int devfreq_suspend_device(struct devfreq *devfreq); | ||
185 | extern int devfreq_resume_device(struct devfreq *devfreq); | ||
175 | 186 | ||
176 | /* Helper functions for devfreq user device driver with OPP. */ | 187 | /* Helper functions for devfreq user device driver with OPP. */ |
177 | extern struct opp *devfreq_recommended_opp(struct device *dev, | 188 | extern struct opp *devfreq_recommended_opp(struct device *dev, |
@@ -181,23 +192,13 @@ extern int devfreq_register_opp_notifier(struct device *dev, | |||
181 | extern int devfreq_unregister_opp_notifier(struct device *dev, | 192 | extern int devfreq_unregister_opp_notifier(struct device *dev, |
182 | struct devfreq *devfreq); | 193 | struct devfreq *devfreq); |
183 | 194 | ||
184 | #ifdef CONFIG_DEVFREQ_GOV_POWERSAVE | 195 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
185 | extern const struct devfreq_governor devfreq_powersave; | ||
186 | #endif | ||
187 | #ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE | ||
188 | extern const struct devfreq_governor devfreq_performance; | ||
189 | #endif | ||
190 | #ifdef CONFIG_DEVFREQ_GOV_USERSPACE | ||
191 | extern const struct devfreq_governor devfreq_userspace; | ||
192 | #endif | ||
193 | #ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND | ||
194 | extern const struct devfreq_governor devfreq_simple_ondemand; | ||
195 | /** | 196 | /** |
196 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq | 197 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq |
197 | * and devfreq_add_device | 198 | * and devfreq_add_device |
198 | * @ upthreshold If the load is over this value, the frequency jumps. | 199 | * @upthreshold: If the load is over this value, the frequency jumps. |
199 | * Specify 0 to use the default. Valid value = 0 to 100. | 200 | * Specify 0 to use the default. Valid value = 0 to 100. |
200 | * @ downdifferential If the load is under upthreshold - downdifferential, | 201 | * @downdifferential: If the load is under upthreshold - downdifferential, |
201 | * the governor may consider slowing the frequency down. | 202 | * the governor may consider slowing the frequency down. |
202 | * Specify 0 to use the default. Valid value = 0 to 100. | 203 | * Specify 0 to use the default. Valid value = 0 to 100. |
203 | * downdifferential < upthreshold must hold. | 204 | * downdifferential < upthreshold must hold. |
@@ -214,7 +215,7 @@ struct devfreq_simple_ondemand_data { | |||
214 | #else /* !CONFIG_PM_DEVFREQ */ | 215 | #else /* !CONFIG_PM_DEVFREQ */ |
215 | static struct devfreq *devfreq_add_device(struct device *dev, | 216 | static struct devfreq *devfreq_add_device(struct device *dev, |
216 | struct devfreq_dev_profile *profile, | 217 | struct devfreq_dev_profile *profile, |
217 | struct devfreq_governor *governor, | 218 | const char *governor_name, |
218 | void *data) | 219 | void *data) |
219 | { | 220 | { |
220 | return NULL; | 221 | return NULL; |
@@ -225,6 +226,16 @@ static int devfreq_remove_device(struct devfreq *devfreq) | |||
225 | return 0; | 226 | return 0; |
226 | } | 227 | } |
227 | 228 | ||
229 | static int devfreq_suspend_device(struct devfreq *devfreq) | ||
230 | { | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int devfreq_resume_device(struct devfreq *devfreq) | ||
235 | { | ||
236 | return 0; | ||
237 | } | ||
238 | |||
228 | static struct opp *devfreq_recommended_opp(struct device *dev, | 239 | static struct opp *devfreq_recommended_opp(struct device *dev, |
229 | unsigned long *freq, u32 flags) | 240 | unsigned long *freq, u32 flags) |
230 | { | 241 | { |
@@ -243,11 +254,6 @@ static int devfreq_unregister_opp_notifier(struct device *dev, | |||
243 | return -EINVAL; | 254 | return -EINVAL; |
244 | } | 255 | } |
245 | 256 | ||
246 | #define devfreq_powersave NULL | ||
247 | #define devfreq_performance NULL | ||
248 | #define devfreq_userspace NULL | ||
249 | #define devfreq_simple_ondemand NULL | ||
250 | |||
251 | #endif /* CONFIG_PM_DEVFREQ */ | 257 | #endif /* CONFIG_PM_DEVFREQ */ |
252 | 258 | ||
253 | #endif /* __LINUX_DEVFREQ_H__ */ | 259 | #endif /* __LINUX_DEVFREQ_H__ */ |
diff --git a/include/linux/device.h b/include/linux/device.h index 86ef6ab553b1..05292e488346 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -190,6 +190,7 @@ extern struct klist *bus_get_device_klist(struct bus_type *bus); | |||
190 | * @mod_name: Used for built-in modules. | 190 | * @mod_name: Used for built-in modules. |
191 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. | 191 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. |
192 | * @of_match_table: The open firmware table. | 192 | * @of_match_table: The open firmware table. |
193 | * @acpi_match_table: The ACPI match table. | ||
193 | * @probe: Called to query the existence of a specific device, | 194 | * @probe: Called to query the existence of a specific device, |
194 | * whether this driver can work with it, and bind the driver | 195 | * whether this driver can work with it, and bind the driver |
195 | * to a specific device. | 196 | * to a specific device. |
@@ -223,6 +224,7 @@ struct device_driver { | |||
223 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ | 224 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ |
224 | 225 | ||
225 | const struct of_device_id *of_match_table; | 226 | const struct of_device_id *of_match_table; |
227 | const struct acpi_device_id *acpi_match_table; | ||
226 | 228 | ||
227 | int (*probe) (struct device *dev); | 229 | int (*probe) (struct device *dev); |
228 | int (*remove) (struct device *dev); | 230 | int (*remove) (struct device *dev); |
@@ -576,6 +578,12 @@ struct device_dma_parameters { | |||
576 | unsigned long segment_boundary_mask; | 578 | unsigned long segment_boundary_mask; |
577 | }; | 579 | }; |
578 | 580 | ||
581 | struct acpi_dev_node { | ||
582 | #ifdef CONFIG_ACPI | ||
583 | void *handle; | ||
584 | #endif | ||
585 | }; | ||
586 | |||
579 | /** | 587 | /** |
580 | * struct device - The basic device structure | 588 | * struct device - The basic device structure |
581 | * @parent: The device's "parent" device, the device to which it is attached. | 589 | * @parent: The device's "parent" device, the device to which it is attached. |
@@ -616,6 +624,7 @@ struct device_dma_parameters { | |||
616 | * @dma_mem: Internal for coherent mem override. | 624 | * @dma_mem: Internal for coherent mem override. |
617 | * @archdata: For arch-specific additions. | 625 | * @archdata: For arch-specific additions. |
618 | * @of_node: Associated device tree node. | 626 | * @of_node: Associated device tree node. |
627 | * @acpi_node: Associated ACPI device node. | ||
619 | * @devt: For creating the sysfs "dev". | 628 | * @devt: For creating the sysfs "dev". |
620 | * @id: device instance | 629 | * @id: device instance |
621 | * @devres_lock: Spinlock to protect the resource of the device. | 630 | * @devres_lock: Spinlock to protect the resource of the device. |
@@ -680,6 +689,7 @@ struct device { | |||
680 | struct dev_archdata archdata; | 689 | struct dev_archdata archdata; |
681 | 690 | ||
682 | struct device_node *of_node; /* associated device tree node */ | 691 | struct device_node *of_node; /* associated device tree node */ |
692 | struct acpi_dev_node acpi_node; /* associated ACPI device node */ | ||
683 | 693 | ||
684 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 694 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
685 | u32 id; /* device instance */ | 695 | u32 id; /* device instance */ |
@@ -700,6 +710,14 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) | |||
700 | return container_of(kobj, struct device, kobj); | 710 | return container_of(kobj, struct device, kobj); |
701 | } | 711 | } |
702 | 712 | ||
713 | #ifdef CONFIG_ACPI | ||
714 | #define ACPI_HANDLE(dev) ((dev)->acpi_node.handle) | ||
715 | #define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_) | ||
716 | #else | ||
717 | #define ACPI_HANDLE(dev) (NULL) | ||
718 | #define ACPI_HANDLE_SET(dev, _handle_) do { } while (0) | ||
719 | #endif | ||
720 | |||
703 | /* Get the wakeup routines, which depend on struct device */ | 721 | /* Get the wakeup routines, which depend on struct device */ |
704 | #include <linux/pm_wakeup.h> | 722 | #include <linux/pm_wakeup.h> |
705 | 723 | ||
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index d09af4b67cf1..b90091af5798 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -177,6 +177,7 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; } | |||
177 | static inline void thaw_processes(void) {} | 177 | static inline void thaw_processes(void) {} |
178 | static inline void thaw_kernel_threads(void) {} | 178 | static inline void thaw_kernel_threads(void) {} |
179 | 179 | ||
180 | static inline bool try_to_freeze_nowarn(void) { return false; } | ||
180 | static inline bool try_to_freeze(void) { return false; } | 181 | static inline bool try_to_freeze(void) { return false; } |
181 | 182 | ||
182 | static inline void freezer_do_not_count(void) {} | 183 | static inline void freezer_do_not_count(void) {} |
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h index df804ba73e0b..92a0dc75bc74 100644 --- a/include/linux/i2c-omap.h +++ b/include/linux/i2c-omap.h | |||
@@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data { | |||
34 | u32 clkrate; | 34 | u32 clkrate; |
35 | u32 rev; | 35 | u32 rev; |
36 | u32 flags; | 36 | u32 flags; |
37 | void (*set_mpu_wkup_lat)(struct device *dev, long set); | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | #endif | 40 | #endif |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 800de224336b..d0c4db7b4872 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -259,6 +259,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
259 | * @platform_data: stored in i2c_client.dev.platform_data | 259 | * @platform_data: stored in i2c_client.dev.platform_data |
260 | * @archdata: copied into i2c_client.dev.archdata | 260 | * @archdata: copied into i2c_client.dev.archdata |
261 | * @of_node: pointer to OpenFirmware device node | 261 | * @of_node: pointer to OpenFirmware device node |
262 | * @acpi_node: ACPI device node | ||
262 | * @irq: stored in i2c_client.irq | 263 | * @irq: stored in i2c_client.irq |
263 | * | 264 | * |
264 | * I2C doesn't actually support hardware probing, although controllers and | 265 | * I2C doesn't actually support hardware probing, although controllers and |
@@ -279,6 +280,7 @@ struct i2c_board_info { | |||
279 | void *platform_data; | 280 | void *platform_data; |
280 | struct dev_archdata *archdata; | 281 | struct dev_archdata *archdata; |
281 | struct device_node *of_node; | 282 | struct device_node *of_node; |
283 | struct acpi_dev_node acpi_node; | ||
282 | int irq; | 284 | int irq; |
283 | }; | 285 | }; |
284 | 286 | ||
@@ -501,4 +503,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) | |||
501 | i2c_del_driver) | 503 | i2c_del_driver) |
502 | 504 | ||
503 | #endif /* I2C */ | 505 | #endif /* I2C */ |
506 | |||
507 | #if IS_ENABLED(CONFIG_ACPI_I2C) | ||
508 | extern void acpi_i2c_register_devices(struct i2c_adapter *adap); | ||
509 | #else | ||
510 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {} | ||
511 | #endif | ||
512 | |||
504 | #endif /* _LINUX_I2C_H */ | 513 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index fa0680402738..bcaab4e6fe91 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } | |||
1684 | static inline bool page_is_guard(struct page *page) { return false; } | 1684 | static inline bool page_is_guard(struct page *page) { return false; } |
1685 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1685 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1686 | 1686 | ||
1687 | extern void reset_zone_present_pages(void); | ||
1688 | extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, | ||
1689 | unsigned long end_pfn); | ||
1690 | |||
1691 | #endif /* __KERNEL__ */ | 1687 | #endif /* __KERNEL__ */ |
1692 | #endif /* _LINUX_MM_H */ | 1688 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 50aaca81f63d..a23923ba8263 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |||
752 | unsigned long size, | 752 | unsigned long size, |
753 | enum memmap_context context); | 753 | enum memmap_context context); |
754 | 754 | ||
755 | extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); | 755 | extern void lruvec_init(struct lruvec *lruvec); |
756 | 756 | ||
757 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | 757 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) |
758 | { | 758 | { |
diff --git a/include/linux/of_address.h b/include/linux/of_address.h index e20e3af68fb6..0506eb53519b 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h | |||
@@ -42,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address( | |||
42 | { | 42 | { |
43 | return NULL; | 43 | return NULL; |
44 | } | 44 | } |
45 | #ifndef of_iomap | ||
45 | static inline void __iomem *of_iomap(struct device_node *device, int index) | 46 | static inline void __iomem *of_iomap(struct device_node *device, int index) |
46 | { | 47 | { |
47 | return NULL; | 48 | return NULL; |
48 | } | 49 | } |
50 | #endif | ||
49 | static inline const __be32 *of_get_address(struct device_node *dev, int index, | 51 | static inline const __be32 *of_get_address(struct device_node *dev, int index, |
50 | u64 *size, unsigned int *flags) | 52 | u64 *size, unsigned int *flags) |
51 | { | 53 | { |
diff --git a/include/linux/platform_data/omap_ocp2scp.h b/include/linux/platform_data/omap_ocp2scp.h new file mode 100644 index 000000000000..5c6c3939355f --- /dev/null +++ b/include/linux/platform_data/omap_ocp2scp.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * omap_ocp2scp.h -- ocp2scp header file | ||
3 | * | ||
4 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef __DRIVERS_OMAP_OCP2SCP_H | ||
20 | #define __DRIVERS_OMAP_OCP2SCP_H | ||
21 | |||
22 | struct omap_ocp2scp_dev { | ||
23 | const char *drv_name; | ||
24 | struct resource *res; | ||
25 | }; | ||
26 | |||
27 | struct omap_ocp2scp_platform_data { | ||
28 | int dev_cnt; | ||
29 | struct omap_ocp2scp_dev **devices; | ||
30 | }; | ||
31 | #endif /* __DRIVERS_OMAP_OCP2SCP_H */ | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 5711e9525a2a..a9ded9a3c175 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -55,6 +55,7 @@ extern int platform_add_devices(struct platform_device **, int); | |||
55 | 55 | ||
56 | struct platform_device_info { | 56 | struct platform_device_info { |
57 | struct device *parent; | 57 | struct device *parent; |
58 | struct acpi_dev_node acpi_node; | ||
58 | 59 | ||
59 | const char *name; | 60 | const char *name; |
60 | int id; | 61 | int id; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 007e687c4f69..03d7bb145311 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -546,10 +546,9 @@ struct dev_pm_info { | |||
546 | unsigned long active_jiffies; | 546 | unsigned long active_jiffies; |
547 | unsigned long suspended_jiffies; | 547 | unsigned long suspended_jiffies; |
548 | unsigned long accounting_timestamp; | 548 | unsigned long accounting_timestamp; |
549 | struct dev_pm_qos_request *pq_req; | ||
550 | #endif | 549 | #endif |
551 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ | 550 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
552 | struct pm_qos_constraints *constraints; | 551 | struct dev_pm_qos *qos; |
553 | }; | 552 | }; |
554 | 553 | ||
555 | extern void update_pm_runtime_accounting(struct device *dev); | 554 | extern void update_pm_runtime_accounting(struct device *dev); |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 9924ea1f22e0..5a95013905c8 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
@@ -20,6 +20,13 @@ enum { | |||
20 | PM_QOS_NUM_CLASSES, | 20 | PM_QOS_NUM_CLASSES, |
21 | }; | 21 | }; |
22 | 22 | ||
23 | enum pm_qos_flags_status { | ||
24 | PM_QOS_FLAGS_UNDEFINED = -1, | ||
25 | PM_QOS_FLAGS_NONE, | ||
26 | PM_QOS_FLAGS_SOME, | ||
27 | PM_QOS_FLAGS_ALL, | ||
28 | }; | ||
29 | |||
23 | #define PM_QOS_DEFAULT_VALUE -1 | 30 | #define PM_QOS_DEFAULT_VALUE -1 |
24 | 31 | ||
25 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | 32 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
@@ -27,14 +34,31 @@ enum { | |||
27 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | 34 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 |
28 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 | 35 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 |
29 | 36 | ||
37 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) | ||
38 | #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) | ||
39 | |||
30 | struct pm_qos_request { | 40 | struct pm_qos_request { |
31 | struct plist_node node; | 41 | struct plist_node node; |
32 | int pm_qos_class; | 42 | int pm_qos_class; |
33 | struct delayed_work work; /* for pm_qos_update_request_timeout */ | 43 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
34 | }; | 44 | }; |
35 | 45 | ||
46 | struct pm_qos_flags_request { | ||
47 | struct list_head node; | ||
48 | s32 flags; /* Do not change to 64 bit */ | ||
49 | }; | ||
50 | |||
51 | enum dev_pm_qos_req_type { | ||
52 | DEV_PM_QOS_LATENCY = 1, | ||
53 | DEV_PM_QOS_FLAGS, | ||
54 | }; | ||
55 | |||
36 | struct dev_pm_qos_request { | 56 | struct dev_pm_qos_request { |
37 | struct plist_node node; | 57 | enum dev_pm_qos_req_type type; |
58 | union { | ||
59 | struct plist_node pnode; | ||
60 | struct pm_qos_flags_request flr; | ||
61 | } data; | ||
38 | struct device *dev; | 62 | struct device *dev; |
39 | }; | 63 | }; |
40 | 64 | ||
@@ -45,8 +69,8 @@ enum pm_qos_type { | |||
45 | }; | 69 | }; |
46 | 70 | ||
47 | /* | 71 | /* |
48 | * Note: The lockless read path depends on the CPU accessing | 72 | * Note: The lockless read path depends on the CPU accessing target_value |
49 | * target_value atomically. Atomic access is only guaranteed on all CPU | 73 | * or effective_flags atomically. Atomic access is only guaranteed on all CPU |
50 | * types linux supports for 32 bit quantites | 74 | * types linux supports for 32 bit quantites |
51 | */ | 75 | */ |
52 | struct pm_qos_constraints { | 76 | struct pm_qos_constraints { |
@@ -57,6 +81,18 @@ struct pm_qos_constraints { | |||
57 | struct blocking_notifier_head *notifiers; | 81 | struct blocking_notifier_head *notifiers; |
58 | }; | 82 | }; |
59 | 83 | ||
84 | struct pm_qos_flags { | ||
85 | struct list_head list; | ||
86 | s32 effective_flags; /* Do not change to 64 bit */ | ||
87 | }; | ||
88 | |||
89 | struct dev_pm_qos { | ||
90 | struct pm_qos_constraints latency; | ||
91 | struct pm_qos_flags flags; | ||
92 | struct dev_pm_qos_request *latency_req; | ||
93 | struct dev_pm_qos_request *flags_req; | ||
94 | }; | ||
95 | |||
60 | /* Action requested to pm_qos_update_target */ | 96 | /* Action requested to pm_qos_update_target */ |
61 | enum pm_qos_req_action { | 97 | enum pm_qos_req_action { |
62 | PM_QOS_ADD_REQ, /* Add a new request */ | 98 | PM_QOS_ADD_REQ, /* Add a new request */ |
@@ -71,6 +107,9 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) | |||
71 | 107 | ||
72 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | 108 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
73 | enum pm_qos_req_action action, int value); | 109 | enum pm_qos_req_action action, int value); |
110 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, | ||
111 | struct pm_qos_flags_request *req, | ||
112 | enum pm_qos_req_action action, s32 val); | ||
74 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, | 113 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
75 | s32 value); | 114 | s32 value); |
76 | void pm_qos_update_request(struct pm_qos_request *req, | 115 | void pm_qos_update_request(struct pm_qos_request *req, |
@@ -86,10 +125,12 @@ int pm_qos_request_active(struct pm_qos_request *req); | |||
86 | s32 pm_qos_read_value(struct pm_qos_constraints *c); | 125 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
87 | 126 | ||
88 | #ifdef CONFIG_PM | 127 | #ifdef CONFIG_PM |
128 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); | ||
129 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); | ||
89 | s32 __dev_pm_qos_read_value(struct device *dev); | 130 | s32 __dev_pm_qos_read_value(struct device *dev); |
90 | s32 dev_pm_qos_read_value(struct device *dev); | 131 | s32 dev_pm_qos_read_value(struct device *dev); |
91 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | 132 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
92 | s32 value); | 133 | enum dev_pm_qos_req_type type, s32 value); |
93 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); | 134 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
94 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); | 135 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); |
95 | int dev_pm_qos_add_notifier(struct device *dev, | 136 | int dev_pm_qos_add_notifier(struct device *dev, |
@@ -103,12 +144,19 @@ void dev_pm_qos_constraints_destroy(struct device *dev); | |||
103 | int dev_pm_qos_add_ancestor_request(struct device *dev, | 144 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
104 | struct dev_pm_qos_request *req, s32 value); | 145 | struct dev_pm_qos_request *req, s32 value); |
105 | #else | 146 | #else |
147 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, | ||
148 | s32 mask) | ||
149 | { return PM_QOS_FLAGS_UNDEFINED; } | ||
150 | static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, | ||
151 | s32 mask) | ||
152 | { return PM_QOS_FLAGS_UNDEFINED; } | ||
106 | static inline s32 __dev_pm_qos_read_value(struct device *dev) | 153 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
107 | { return 0; } | 154 | { return 0; } |
108 | static inline s32 dev_pm_qos_read_value(struct device *dev) | 155 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
109 | { return 0; } | 156 | { return 0; } |
110 | static inline int dev_pm_qos_add_request(struct device *dev, | 157 | static inline int dev_pm_qos_add_request(struct device *dev, |
111 | struct dev_pm_qos_request *req, | 158 | struct dev_pm_qos_request *req, |
159 | enum dev_pm_qos_req_type type, | ||
112 | s32 value) | 160 | s32 value) |
113 | { return 0; } | 161 | { return 0; } |
114 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | 162 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, |
@@ -144,10 +192,31 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
144 | #ifdef CONFIG_PM_RUNTIME | 192 | #ifdef CONFIG_PM_RUNTIME |
145 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | 193 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
146 | void dev_pm_qos_hide_latency_limit(struct device *dev); | 194 | void dev_pm_qos_hide_latency_limit(struct device *dev); |
195 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
196 | void dev_pm_qos_hide_flags(struct device *dev); | ||
197 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
198 | |||
199 | static inline s32 dev_pm_qos_requested_latency(struct device *dev) | ||
200 | { | ||
201 | return dev->power.qos->latency_req->data.pnode.prio; | ||
202 | } | ||
203 | |||
204 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
205 | { | ||
206 | return dev->power.qos->flags_req->data.flr.flags; | ||
207 | } | ||
147 | #else | 208 | #else |
148 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | 209 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
149 | { return 0; } | 210 | { return 0; } |
150 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | 211 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
212 | static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) | ||
213 | { return 0; } | ||
214 | static inline void dev_pm_qos_hide_flags(struct device *dev) {} | ||
215 | static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) | ||
216 | { return 0; } | ||
217 | |||
218 | static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; } | ||
219 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } | ||
151 | #endif | 220 | #endif |
152 | 221 | ||
153 | #endif | 222 | #endif |
diff --git a/include/linux/rio.h b/include/linux/rio.h index 4187da511006..a3e784278667 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
@@ -275,9 +275,11 @@ struct rio_id_table { | |||
275 | * struct rio_net - RIO network info | 275 | * struct rio_net - RIO network info |
276 | * @node: Node in global list of RIO networks | 276 | * @node: Node in global list of RIO networks |
277 | * @devices: List of devices in this network | 277 | * @devices: List of devices in this network |
278 | * @switches: List of switches in this netowrk | ||
278 | * @mports: List of master ports accessing this network | 279 | * @mports: List of master ports accessing this network |
279 | * @hport: Default port for accessing this network | 280 | * @hport: Default port for accessing this network |
280 | * @id: RIO network ID | 281 | * @id: RIO network ID |
282 | * @destid_table: destID allocation table | ||
281 | */ | 283 | */ |
282 | struct rio_net { | 284 | struct rio_net { |
283 | struct list_head node; /* node in list of networks */ | 285 | struct list_head node; /* node in list of networks */ |
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index c64de9dd7631..2f694f3846a9 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h | |||
@@ -46,8 +46,9 @@ struct ads7846_platform_data { | |||
46 | u16 debounce_rep; /* additional consecutive good readings | 46 | u16 debounce_rep; /* additional consecutive good readings |
47 | * required after the first two */ | 47 | * required after the first two */ |
48 | int gpio_pendown; /* the GPIO used to decide the pendown | 48 | int gpio_pendown; /* the GPIO used to decide the pendown |
49 | * state if get_pendown_state == NULL | 49 | * state if get_pendown_state == NULL */ |
50 | */ | 50 | int gpio_pendown_debounce; /* platform specific debounce time for |
51 | * the gpio_pendown */ | ||
51 | int (*get_pendown_state)(void); | 52 | int (*get_pendown_state)(void); |
52 | int (*filter_init) (const struct ads7846_platform_data *pdata, | 53 | int (*filter_init) (const struct ads7846_platform_data *pdata, |
53 | void **filter_data); | 54 | void **filter_data); |
diff --git a/include/linux/tick.h b/include/linux/tick.h index f37fceb69b73..1a6567b48492 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -142,4 +142,10 @@ static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | |||
142 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | 142 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } |
143 | # endif /* !NO_HZ */ | 143 | # endif /* !NO_HZ */ |
144 | 144 | ||
145 | # ifdef CONFIG_CPU_IDLE_GOV_MENU | ||
146 | extern void menu_hrtimer_cancel(void); | ||
147 | # else | ||
148 | static inline void menu_hrtimer_cancel(void) {} | ||
149 | # endif /* CONFIG_CPU_IDLE_GOV_MENU */ | ||
150 | |||
145 | #endif | 151 | #endif |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6f0ba01afe73..63445ede48bb 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -1351,7 +1351,7 @@ struct xfrm6_tunnel { | |||
1351 | }; | 1351 | }; |
1352 | 1352 | ||
1353 | extern void xfrm_init(void); | 1353 | extern void xfrm_init(void); |
1354 | extern void xfrm4_init(int rt_hash_size); | 1354 | extern void xfrm4_init(void); |
1355 | extern int xfrm_state_init(struct net *net); | 1355 | extern int xfrm_state_init(struct net *net); |
1356 | extern void xfrm_state_fini(struct net *net); | 1356 | extern void xfrm_state_fini(struct net *net); |
1357 | extern void xfrm4_state_init(void); | 1357 | extern void xfrm4_state_init(void); |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 88fae8d20154..55367b04dc94 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -135,6 +135,8 @@ struct scsi_device { | |||
135 | * because we did a bus reset. */ | 135 | * because we did a bus reset. */ |
136 | unsigned use_10_for_rw:1; /* first try 10-byte read / write */ | 136 | unsigned use_10_for_rw:1; /* first try 10-byte read / write */ |
137 | unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ | 137 | unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ |
138 | unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ | ||
139 | unsigned no_write_same:1; /* no WRITE SAME command */ | ||
138 | unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ | 140 | unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ |
139 | unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ | 141 | unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ |
140 | unsigned skip_vpd_pages:1; /* do not read VPD pages */ | 142 | unsigned skip_vpd_pages:1; /* do not read VPD pages */ |
@@ -362,6 +364,8 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, | |||
362 | int retries, struct scsi_sense_hdr *sshdr); | 364 | int retries, struct scsi_sense_hdr *sshdr); |
363 | extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, | 365 | extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, |
364 | int buf_len); | 366 | int buf_len); |
367 | extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, | ||
368 | unsigned int len, unsigned char opcode); | ||
365 | extern int scsi_device_set_state(struct scsi_device *sdev, | 369 | extern int scsi_device_set_state(struct scsi_device *sdev, |
366 | enum scsi_device_state state); | 370 | enum scsi_device_state state); |
367 | extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, | 371 | extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, |
diff --git a/include/uapi/linux/oom.h b/include/uapi/linux/oom.h index a49c4afc7060..b29272d621ce 100644 --- a/include/uapi/linux/oom.h +++ b/include/uapi/linux/oom.h | |||
@@ -8,4 +8,13 @@ | |||
8 | #define OOM_SCORE_ADJ_MIN (-1000) | 8 | #define OOM_SCORE_ADJ_MIN (-1000) |
9 | #define OOM_SCORE_ADJ_MAX 1000 | 9 | #define OOM_SCORE_ADJ_MAX 1000 |
10 | 10 | ||
11 | /* | ||
12 | * /proc/<pid>/oom_adj set to -17 protects from the oom killer for legacy | ||
13 | * purposes. | ||
14 | */ | ||
15 | #define OOM_DISABLE (-17) | ||
16 | /* inclusive */ | ||
17 | #define OOM_ADJUST_MIN (-16) | ||
18 | #define OOM_ADJUST_MAX 15 | ||
19 | |||
11 | #endif /* _UAPI__INCLUDE_LINUX_OOM_H */ | 20 | #endif /* _UAPI__INCLUDE_LINUX_OOM_H */ |
diff --git a/kernel/futex.c b/kernel/futex.c index 3717e7b306e0..20ef219bbe9b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, | |||
716 | struct futex_pi_state **ps, | 716 | struct futex_pi_state **ps, |
717 | struct task_struct *task, int set_waiters) | 717 | struct task_struct *task, int set_waiters) |
718 | { | 718 | { |
719 | int lock_taken, ret, ownerdied = 0; | 719 | int lock_taken, ret, force_take = 0; |
720 | u32 uval, newval, curval, vpid = task_pid_vnr(task); | 720 | u32 uval, newval, curval, vpid = task_pid_vnr(task); |
721 | 721 | ||
722 | retry: | 722 | retry: |
@@ -755,17 +755,15 @@ retry: | |||
755 | newval = curval | FUTEX_WAITERS; | 755 | newval = curval | FUTEX_WAITERS; |
756 | 756 | ||
757 | /* | 757 | /* |
758 | * There are two cases, where a futex might have no owner (the | 758 | * Should we force take the futex? See below. |
759 | * owner TID is 0): OWNER_DIED. We take over the futex in this | ||
760 | * case. We also do an unconditional take over, when the owner | ||
761 | * of the futex died. | ||
762 | * | ||
763 | * This is safe as we are protected by the hash bucket lock ! | ||
764 | */ | 759 | */ |
765 | if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { | 760 | if (unlikely(force_take)) { |
766 | /* Keep the OWNER_DIED bit */ | 761 | /* |
762 | * Keep the OWNER_DIED and the WAITERS bit and set the | ||
763 | * new TID value. | ||
764 | */ | ||
767 | newval = (curval & ~FUTEX_TID_MASK) | vpid; | 765 | newval = (curval & ~FUTEX_TID_MASK) | vpid; |
768 | ownerdied = 0; | 766 | force_take = 0; |
769 | lock_taken = 1; | 767 | lock_taken = 1; |
770 | } | 768 | } |
771 | 769 | ||
@@ -775,7 +773,7 @@ retry: | |||
775 | goto retry; | 773 | goto retry; |
776 | 774 | ||
777 | /* | 775 | /* |
778 | * We took the lock due to owner died take over. | 776 | * We took the lock due to forced take over. |
779 | */ | 777 | */ |
780 | if (unlikely(lock_taken)) | 778 | if (unlikely(lock_taken)) |
781 | return 1; | 779 | return 1; |
@@ -790,20 +788,25 @@ retry: | |||
790 | switch (ret) { | 788 | switch (ret) { |
791 | case -ESRCH: | 789 | case -ESRCH: |
792 | /* | 790 | /* |
793 | * No owner found for this futex. Check if the | 791 | * We failed to find an owner for this |
794 | * OWNER_DIED bit is set to figure out whether | 792 | * futex. So we have no pi_state to block |
795 | * this is a robust futex or not. | 793 | * on. This can happen in two cases: |
794 | * | ||
795 | * 1) The owner died | ||
796 | * 2) A stale FUTEX_WAITERS bit | ||
797 | * | ||
798 | * Re-read the futex value. | ||
796 | */ | 799 | */ |
797 | if (get_futex_value_locked(&curval, uaddr)) | 800 | if (get_futex_value_locked(&curval, uaddr)) |
798 | return -EFAULT; | 801 | return -EFAULT; |
799 | 802 | ||
800 | /* | 803 | /* |
801 | * We simply start over in case of a robust | 804 | * If the owner died or we have a stale |
802 | * futex. The code above will take the futex | 805 | * WAITERS bit the owner TID in the user space |
803 | * and return happy. | 806 | * futex is 0. |
804 | */ | 807 | */ |
805 | if (curval & FUTEX_OWNER_DIED) { | 808 | if (!(curval & FUTEX_TID_MASK)) { |
806 | ownerdied = 1; | 809 | force_take = 1; |
807 | goto retry; | 810 | goto retry; |
808 | } | 811 | } |
809 | default: | 812 | default: |
diff --git a/kernel/power/main.c b/kernel/power/main.c index f458238109cc..1c16f9167de1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -59,7 +59,7 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
59 | { | 59 | { |
60 | unsigned long val; | 60 | unsigned long val; |
61 | 61 | ||
62 | if (strict_strtoul(buf, 10, &val)) | 62 | if (kstrtoul(buf, 10, &val)) |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | 64 | ||
65 | if (val > 1) | 65 | if (val > 1) |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 846bd42c7ed1..9322ff7eaad6 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -213,6 +213,69 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
216 | * pm_qos_flags_remove_req - Remove device PM QoS flags request. | ||
217 | * @pqf: Device PM QoS flags set to remove the request from. | ||
218 | * @req: Request to remove from the set. | ||
219 | */ | ||
220 | static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, | ||
221 | struct pm_qos_flags_request *req) | ||
222 | { | ||
223 | s32 val = 0; | ||
224 | |||
225 | list_del(&req->node); | ||
226 | list_for_each_entry(req, &pqf->list, node) | ||
227 | val |= req->flags; | ||
228 | |||
229 | pqf->effective_flags = val; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * pm_qos_update_flags - Update a set of PM QoS flags. | ||
234 | * @pqf: Set of flags to update. | ||
235 | * @req: Request to add to the set, to modify, or to remove from the set. | ||
236 | * @action: Action to take on the set. | ||
237 | * @val: Value of the request to add or modify. | ||
238 | * | ||
239 | * Update the given set of PM QoS flags and call notifiers if the aggregate | ||
240 | * value has changed. Returns 1 if the aggregate constraint value has changed, | ||
241 | * 0 otherwise. | ||
242 | */ | ||
243 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, | ||
244 | struct pm_qos_flags_request *req, | ||
245 | enum pm_qos_req_action action, s32 val) | ||
246 | { | ||
247 | unsigned long irqflags; | ||
248 | s32 prev_value, curr_value; | ||
249 | |||
250 | spin_lock_irqsave(&pm_qos_lock, irqflags); | ||
251 | |||
252 | prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
253 | |||
254 | switch (action) { | ||
255 | case PM_QOS_REMOVE_REQ: | ||
256 | pm_qos_flags_remove_req(pqf, req); | ||
257 | break; | ||
258 | case PM_QOS_UPDATE_REQ: | ||
259 | pm_qos_flags_remove_req(pqf, req); | ||
260 | case PM_QOS_ADD_REQ: | ||
261 | req->flags = val; | ||
262 | INIT_LIST_HEAD(&req->node); | ||
263 | list_add_tail(&req->node, &pqf->list); | ||
264 | pqf->effective_flags |= val; | ||
265 | break; | ||
266 | default: | ||
267 | /* no action */ | ||
268 | ; | ||
269 | } | ||
270 | |||
271 | curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
272 | |||
273 | spin_unlock_irqrestore(&pm_qos_lock, irqflags); | ||
274 | |||
275 | return prev_value != curr_value; | ||
276 | } | ||
277 | |||
278 | /** | ||
216 | * pm_qos_request - returns current system wide qos expectation | 279 | * pm_qos_request - returns current system wide qos expectation |
217 | * @pm_qos_class: identification of which qos value is requested | 280 | * @pm_qos_class: identification of which qos value is requested |
218 | * | 281 | * |
@@ -500,7 +563,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
500 | } else { | 563 | } else { |
501 | ascii_value[count] = '\0'; | 564 | ascii_value[count] = '\0'; |
502 | } | 565 | } |
503 | ret = strict_strtoul(ascii_value, 16, &ulval); | 566 | ret = kstrtoul(ascii_value, 16, &ulval); |
504 | if (ret) { | 567 | if (ret) { |
505 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); | 568 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); |
506 | return -EINVAL; | 569 | return -EINVAL; |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 3c9d764eb0d8..7c33ed200410 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -126,7 +126,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) | |||
126 | 126 | ||
127 | /* Figure out where to put the new node */ | 127 | /* Figure out where to put the new node */ |
128 | while (*new) { | 128 | while (*new) { |
129 | ext = container_of(*new, struct swsusp_extent, node); | 129 | ext = rb_entry(*new, struct swsusp_extent, node); |
130 | parent = *new; | 130 | parent = *new; |
131 | if (swap_offset < ext->start) { | 131 | if (swap_offset < ext->start) { |
132 | /* Try to merge */ | 132 | /* Try to merge */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a40260885265..6f337068dc4c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -526,6 +526,8 @@ void tick_nohz_irq_exit(void) | |||
526 | if (!ts->inidle) | 526 | if (!ts->inidle) |
527 | return; | 527 | return; |
528 | 528 | ||
529 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
530 | menu_hrtimer_cancel(); | ||
529 | __tick_nohz_idle_enter(ts); | 531 | __tick_nohz_idle_enter(ts); |
530 | } | 532 | } |
531 | 533 | ||
@@ -621,6 +623,8 @@ void tick_nohz_idle_exit(void) | |||
621 | 623 | ||
622 | ts->inidle = 0; | 624 | ts->inidle = 0; |
623 | 625 | ||
626 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
627 | menu_hrtimer_cancel(); | ||
624 | if (ts->idle_active || ts->tick_stopped) | 628 | if (ts->idle_active || ts->tick_stopped) |
625 | now = ktime_get(); | 629 | now = ktime_get(); |
626 | 630 | ||
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 678ce4f1e124..095ab157a521 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h | |||
@@ -641,7 +641,14 @@ do { \ | |||
641 | ************** MIPS ***************** | 641 | ************** MIPS ***************** |
642 | ***************************************/ | 642 | ***************************************/ |
643 | #if defined(__mips__) && W_TYPE_SIZE == 32 | 643 | #if defined(__mips__) && W_TYPE_SIZE == 32 |
644 | #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | 644 | #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 |
645 | #define umul_ppmm(w1, w0, u, v) \ | ||
646 | do { \ | ||
647 | UDItype __ll = (UDItype)(u) * (v); \ | ||
648 | w1 = __ll >> 32; \ | ||
649 | w0 = __ll; \ | ||
650 | } while (0) | ||
651 | #elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | ||
645 | #define umul_ppmm(w1, w0, u, v) \ | 652 | #define umul_ppmm(w1, w0, u, v) \ |
646 | __asm__ ("multu %2,%3" \ | 653 | __asm__ ("multu %2,%3" \ |
647 | : "=l" ((USItype)(w0)), \ | 654 | : "=l" ((USItype)(w0)), \ |
@@ -666,7 +673,15 @@ do { \ | |||
666 | ************** MIPS/64 ************** | 673 | ************** MIPS/64 ************** |
667 | ***************************************/ | 674 | ***************************************/ |
668 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 | 675 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 |
669 | #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | 676 | #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 |
677 | #define umul_ppmm(w1, w0, u, v) \ | ||
678 | do { \ | ||
679 | typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ | ||
680 | __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \ | ||
681 | w1 = __ll >> 64; \ | ||
682 | w0 = __ll; \ | ||
683 | } while (0) | ||
684 | #elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | ||
670 | #define umul_ppmm(w1, w0, u, v) \ | 685 | #define umul_ppmm(w1, w0, u, v) \ |
671 | __asm__ ("dmultu %2,%3" \ | 686 | __asm__ ("dmultu %2,%3" \ |
672 | : "=l" ((UDItype)(w0)), \ | 687 | : "=l" ((UDItype)(w0)), \ |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 434be4ae7a04..f468185b3b28 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
198 | int order = ilog2(BITS_PER_LONG); | 198 | int order = ilog2(BITS_PER_LONG); |
199 | 199 | ||
200 | __free_pages_bootmem(pfn_to_page(start), order); | 200 | __free_pages_bootmem(pfn_to_page(start), order); |
201 | fixup_zone_present_pages(page_to_nid(pfn_to_page(start)), | ||
202 | start, start + BITS_PER_LONG); | ||
203 | count += BITS_PER_LONG; | 201 | count += BITS_PER_LONG; |
204 | start += BITS_PER_LONG; | 202 | start += BITS_PER_LONG; |
205 | } else { | 203 | } else { |
@@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
210 | if (vec & 1) { | 208 | if (vec & 1) { |
211 | page = pfn_to_page(start + off); | 209 | page = pfn_to_page(start + off); |
212 | __free_pages_bootmem(page, 0); | 210 | __free_pages_bootmem(page, 0); |
213 | fixup_zone_present_pages( | ||
214 | page_to_nid(page), | ||
215 | start + off, start + off + 1); | ||
216 | count++; | 211 | count++; |
217 | } | 212 | } |
218 | vec >>= 1; | 213 | vec >>= 1; |
@@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
226 | pages = bdata->node_low_pfn - bdata->node_min_pfn; | 221 | pages = bdata->node_low_pfn - bdata->node_min_pfn; |
227 | pages = bootmem_bootmap_pages(pages); | 222 | pages = bootmem_bootmap_pages(pages); |
228 | count += pages; | 223 | count += pages; |
229 | while (pages--) { | 224 | while (pages--) |
230 | fixup_zone_present_pages(page_to_nid(page), | ||
231 | page_to_pfn(page), page_to_pfn(page) + 1); | ||
232 | __free_pages_bootmem(page++, 0); | 225 | __free_pages_bootmem(page++, 0); |
233 | } | ||
234 | 226 | ||
235 | bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); | 227 | bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); |
236 | 228 | ||
diff --git a/mm/highmem.c b/mm/highmem.c index d517cd16a6eb..2da13a5c50e2 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -98,7 +98,7 @@ struct page *kmap_to_page(void *vaddr) | |||
98 | { | 98 | { |
99 | unsigned long addr = (unsigned long)vaddr; | 99 | unsigned long addr = (unsigned long)vaddr; |
100 | 100 | ||
101 | if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) { | 101 | if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
102 | int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; | 102 | int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; |
103 | return pte_page(pkmap_page_table[i]); | 103 | return pte_page(pkmap_page_table[i]); |
104 | } | 104 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7acf43bf04a2..dd39ba000b31 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | |||
1055 | struct mem_cgroup *memcg) | 1055 | struct mem_cgroup *memcg) |
1056 | { | 1056 | { |
1057 | struct mem_cgroup_per_zone *mz; | 1057 | struct mem_cgroup_per_zone *mz; |
1058 | struct lruvec *lruvec; | ||
1058 | 1059 | ||
1059 | if (mem_cgroup_disabled()) | 1060 | if (mem_cgroup_disabled()) { |
1060 | return &zone->lruvec; | 1061 | lruvec = &zone->lruvec; |
1062 | goto out; | ||
1063 | } | ||
1061 | 1064 | ||
1062 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); | 1065 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); |
1063 | return &mz->lruvec; | 1066 | lruvec = &mz->lruvec; |
1067 | out: | ||
1068 | /* | ||
1069 | * Since a node can be onlined after the mem_cgroup was created, | ||
1070 | * we have to be prepared to initialize lruvec->zone here; | ||
1071 | * and if offlined then reonlined, we need to reinitialize it. | ||
1072 | */ | ||
1073 | if (unlikely(lruvec->zone != zone)) | ||
1074 | lruvec->zone = zone; | ||
1075 | return lruvec; | ||
1064 | } | 1076 | } |
1065 | 1077 | ||
1066 | /* | 1078 | /* |
@@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
1087 | struct mem_cgroup_per_zone *mz; | 1099 | struct mem_cgroup_per_zone *mz; |
1088 | struct mem_cgroup *memcg; | 1100 | struct mem_cgroup *memcg; |
1089 | struct page_cgroup *pc; | 1101 | struct page_cgroup *pc; |
1102 | struct lruvec *lruvec; | ||
1090 | 1103 | ||
1091 | if (mem_cgroup_disabled()) | 1104 | if (mem_cgroup_disabled()) { |
1092 | return &zone->lruvec; | 1105 | lruvec = &zone->lruvec; |
1106 | goto out; | ||
1107 | } | ||
1093 | 1108 | ||
1094 | pc = lookup_page_cgroup(page); | 1109 | pc = lookup_page_cgroup(page); |
1095 | memcg = pc->mem_cgroup; | 1110 | memcg = pc->mem_cgroup; |
@@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
1107 | pc->mem_cgroup = memcg = root_mem_cgroup; | 1122 | pc->mem_cgroup = memcg = root_mem_cgroup; |
1108 | 1123 | ||
1109 | mz = page_cgroup_zoneinfo(memcg, page); | 1124 | mz = page_cgroup_zoneinfo(memcg, page); |
1110 | return &mz->lruvec; | 1125 | lruvec = &mz->lruvec; |
1126 | out: | ||
1127 | /* | ||
1128 | * Since a node can be onlined after the mem_cgroup was created, | ||
1129 | * we have to be prepared to initialize lruvec->zone here; | ||
1130 | * and if offlined then reonlined, we need to reinitialize it. | ||
1131 | */ | ||
1132 | if (unlikely(lruvec->zone != zone)) | ||
1133 | lruvec->zone = zone; | ||
1134 | return lruvec; | ||
1111 | } | 1135 | } |
1112 | 1136 | ||
1113 | /** | 1137 | /** |
@@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) | |||
1452 | static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | 1476 | static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) |
1453 | { | 1477 | { |
1454 | u64 limit; | 1478 | u64 limit; |
1455 | u64 memsw; | ||
1456 | 1479 | ||
1457 | limit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 1480 | limit = res_counter_read_u64(&memcg->res, RES_LIMIT); |
1458 | limit += total_swap_pages << PAGE_SHIFT; | ||
1459 | 1481 | ||
1460 | memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | ||
1461 | /* | 1482 | /* |
1462 | * If memsw is finite and limits the amount of swap space available | 1483 | * Do not consider swap space if we cannot swap due to swappiness |
1463 | * to this memcg, return that limit. | ||
1464 | */ | 1484 | */ |
1465 | return min(limit, memsw); | 1485 | if (mem_cgroup_swappiness(memcg)) { |
1486 | u64 memsw; | ||
1487 | |||
1488 | limit += total_swap_pages << PAGE_SHIFT; | ||
1489 | memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | ||
1490 | |||
1491 | /* | ||
1492 | * If memsw is finite and limits the amount of swap space | ||
1493 | * available to this memcg, return that limit. | ||
1494 | */ | ||
1495 | limit = min(limit, memsw); | ||
1496 | } | ||
1497 | |||
1498 | return limit; | ||
1466 | } | 1499 | } |
1467 | 1500 | ||
1468 | void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | 1501 | void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, |
@@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3688 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, | 3721 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, |
3689 | int node, int zid, enum lru_list lru) | 3722 | int node, int zid, enum lru_list lru) |
3690 | { | 3723 | { |
3691 | struct mem_cgroup_per_zone *mz; | 3724 | struct lruvec *lruvec; |
3692 | unsigned long flags, loop; | 3725 | unsigned long flags, loop; |
3693 | struct list_head *list; | 3726 | struct list_head *list; |
3694 | struct page *busy; | 3727 | struct page *busy; |
3695 | struct zone *zone; | 3728 | struct zone *zone; |
3696 | 3729 | ||
3697 | zone = &NODE_DATA(node)->node_zones[zid]; | 3730 | zone = &NODE_DATA(node)->node_zones[zid]; |
3698 | mz = mem_cgroup_zoneinfo(memcg, node, zid); | 3731 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
3699 | list = &mz->lruvec.lists[lru]; | 3732 | list = &lruvec->lists[lru]; |
3700 | 3733 | ||
3701 | loop = mz->lru_size[lru]; | 3734 | loop = mem_cgroup_get_lru_size(lruvec, lru); |
3702 | /* give some margin against EBUSY etc...*/ | 3735 | /* give some margin against EBUSY etc...*/ |
3703 | loop += 256; | 3736 | loop += 256; |
3704 | busy = NULL; | 3737 | busy = NULL; |
@@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
4736 | 4769 | ||
4737 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 4770 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
4738 | mz = &pn->zoneinfo[zone]; | 4771 | mz = &pn->zoneinfo[zone]; |
4739 | lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); | 4772 | lruvec_init(&mz->lruvec); |
4740 | mz->usage_in_excess = 0; | 4773 | mz->usage_in_excess = 0; |
4741 | mz->on_tree = false; | 4774 | mz->on_tree = false; |
4742 | mz->memcg = memcg; | 4775 | mz->memcg = memcg; |
diff --git a/mm/memory.c b/mm/memory.c index fb135ba4aba9..221fc9ffcab1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2527,9 +2527,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2527 | int ret = 0; | 2527 | int ret = 0; |
2528 | int page_mkwrite = 0; | 2528 | int page_mkwrite = 0; |
2529 | struct page *dirty_page = NULL; | 2529 | struct page *dirty_page = NULL; |
2530 | unsigned long mmun_start; /* For mmu_notifiers */ | 2530 | unsigned long mmun_start = 0; /* For mmu_notifiers */ |
2531 | unsigned long mmun_end; /* For mmu_notifiers */ | 2531 | unsigned long mmun_end = 0; /* For mmu_notifiers */ |
2532 | bool mmun_called = false; /* For mmu_notifiers */ | ||
2533 | 2532 | ||
2534 | old_page = vm_normal_page(vma, address, orig_pte); | 2533 | old_page = vm_normal_page(vma, address, orig_pte); |
2535 | if (!old_page) { | 2534 | if (!old_page) { |
@@ -2708,8 +2707,7 @@ gotten: | |||
2708 | goto oom_free_new; | 2707 | goto oom_free_new; |
2709 | 2708 | ||
2710 | mmun_start = address & PAGE_MASK; | 2709 | mmun_start = address & PAGE_MASK; |
2711 | mmun_end = (address & PAGE_MASK) + PAGE_SIZE; | 2710 | mmun_end = mmun_start + PAGE_SIZE; |
2712 | mmun_called = true; | ||
2713 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 2711 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
2714 | 2712 | ||
2715 | /* | 2713 | /* |
@@ -2778,7 +2776,7 @@ gotten: | |||
2778 | page_cache_release(new_page); | 2776 | page_cache_release(new_page); |
2779 | unlock: | 2777 | unlock: |
2780 | pte_unmap_unlock(page_table, ptl); | 2778 | pte_unmap_unlock(page_table, ptl); |
2781 | if (mmun_called) | 2779 | if (mmun_end > mmun_start) |
2782 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 2780 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
2783 | if (old_page) { | 2781 | if (old_page) { |
2784 | /* | 2782 | /* |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 56b758ae57d2..e4eeacae2b91 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info, struct page *page, | |||
106 | void __ref put_page_bootmem(struct page *page) | 106 | void __ref put_page_bootmem(struct page *page) |
107 | { | 107 | { |
108 | unsigned long type; | 108 | unsigned long type; |
109 | struct zone *zone; | ||
110 | 109 | ||
111 | type = (unsigned long) page->lru.next; | 110 | type = (unsigned long) page->lru.next; |
112 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | 111 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
@@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page) | |||
117 | set_page_private(page, 0); | 116 | set_page_private(page, 0); |
118 | INIT_LIST_HEAD(&page->lru); | 117 | INIT_LIST_HEAD(&page->lru); |
119 | __free_pages_bootmem(page, 0); | 118 | __free_pages_bootmem(page, 0); |
120 | |||
121 | zone = page_zone(page); | ||
122 | zone_span_writelock(zone); | ||
123 | zone->present_pages++; | ||
124 | zone_span_writeunlock(zone); | ||
125 | totalram_pages++; | ||
126 | } | 119 | } |
127 | 120 | ||
128 | } | 121 | } |
@@ -334,8 +334,10 @@ void validate_mm(struct mm_struct *mm) | |||
334 | struct vm_area_struct *vma = mm->mmap; | 334 | struct vm_area_struct *vma = mm->mmap; |
335 | while (vma) { | 335 | while (vma) { |
336 | struct anon_vma_chain *avc; | 336 | struct anon_vma_chain *avc; |
337 | vma_lock_anon_vma(vma); | ||
337 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) | 338 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
338 | anon_vma_interval_tree_verify(avc); | 339 | anon_vma_interval_tree_verify(avc); |
340 | vma_unlock_anon_vma(vma); | ||
339 | vma = vma->vm_next; | 341 | vma = vma->vm_next; |
340 | i++; | 342 | i++; |
341 | } | 343 | } |
diff --git a/mm/mmzone.c b/mm/mmzone.c index 3cef80f6ac79..4596d81b89b1 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -87,7 +87,7 @@ int memmap_valid_within(unsigned long pfn, | |||
87 | } | 87 | } |
88 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 88 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
89 | 89 | ||
90 | void lruvec_init(struct lruvec *lruvec, struct zone *zone) | 90 | void lruvec_init(struct lruvec *lruvec) |
91 | { | 91 | { |
92 | enum lru_list lru; | 92 | enum lru_list lru; |
93 | 93 | ||
@@ -95,8 +95,4 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone) | |||
95 | 95 | ||
96 | for_each_lru(lru) | 96 | for_each_lru(lru) |
97 | INIT_LIST_HEAD(&lruvec->lists[lru]); | 97 | INIT_LIST_HEAD(&lruvec->lists[lru]); |
98 | |||
99 | #ifdef CONFIG_MEMCG | ||
100 | lruvec->zone = zone; | ||
101 | #endif | ||
102 | } | 98 | } |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 714d5d650470..bd82f6b31411 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start, | |||
116 | return 0; | 116 | return 0; |
117 | 117 | ||
118 | __free_pages_memory(start_pfn, end_pfn); | 118 | __free_pages_memory(start_pfn, end_pfn); |
119 | fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT), | ||
120 | start_pfn, end_pfn); | ||
121 | 119 | ||
122 | return end_pfn - start_pfn; | 120 | return end_pfn - start_pfn; |
123 | } | 121 | } |
@@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid) | |||
128 | phys_addr_t start, end, size; | 126 | phys_addr_t start, end, size; |
129 | u64 i; | 127 | u64 i; |
130 | 128 | ||
131 | reset_zone_present_pages(); | ||
132 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) | 129 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) |
133 | count += __free_memory_core(start, end); | 130 | count += __free_memory_core(start, end); |
134 | 131 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e0..bcb72c6e2b2d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) | |||
1405 | 1405 | ||
1406 | mt = get_pageblock_migratetype(page); | 1406 | mt = get_pageblock_migratetype(page); |
1407 | if (unlikely(mt != MIGRATE_ISOLATE)) | 1407 | if (unlikely(mt != MIGRATE_ISOLATE)) |
1408 | __mod_zone_freepage_state(zone, -(1UL << order), mt); | 1408 | __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); |
1409 | 1409 | ||
1410 | if (alloc_order != order) | 1410 | if (alloc_order != order) |
1411 | expand(zone, page, alloc_order, order, | 1411 | expand(zone, page, alloc_order, order, |
@@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4505 | zone->zone_pgdat = pgdat; | 4505 | zone->zone_pgdat = pgdat; |
4506 | 4506 | ||
4507 | zone_pcp_init(zone); | 4507 | zone_pcp_init(zone); |
4508 | lruvec_init(&zone->lruvec, zone); | 4508 | lruvec_init(&zone->lruvec); |
4509 | if (!size) | 4509 | if (!size) |
4510 | continue; | 4510 | continue; |
4511 | 4511 | ||
@@ -6098,37 +6098,3 @@ void dump_page(struct page *page) | |||
6098 | dump_page_flags(page->flags); | 6098 | dump_page_flags(page->flags); |
6099 | mem_cgroup_print_bad_page(page); | 6099 | mem_cgroup_print_bad_page(page); |
6100 | } | 6100 | } |
6101 | |||
6102 | /* reset zone->present_pages */ | ||
6103 | void reset_zone_present_pages(void) | ||
6104 | { | ||
6105 | struct zone *z; | ||
6106 | int i, nid; | ||
6107 | |||
6108 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
6109 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
6110 | z = NODE_DATA(nid)->node_zones + i; | ||
6111 | z->present_pages = 0; | ||
6112 | } | ||
6113 | } | ||
6114 | } | ||
6115 | |||
6116 | /* calculate zone's present pages in buddy system */ | ||
6117 | void fixup_zone_present_pages(int nid, unsigned long start_pfn, | ||
6118 | unsigned long end_pfn) | ||
6119 | { | ||
6120 | struct zone *z; | ||
6121 | unsigned long zone_start_pfn, zone_end_pfn; | ||
6122 | int i; | ||
6123 | |||
6124 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
6125 | z = NODE_DATA(nid)->node_zones + i; | ||
6126 | zone_start_pfn = z->zone_start_pfn; | ||
6127 | zone_end_pfn = zone_start_pfn + z->spanned_pages; | ||
6128 | |||
6129 | /* if the two regions intersect */ | ||
6130 | if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) | ||
6131 | z->present_pages += min(end_pfn, zone_end_pfn) - | ||
6132 | max(start_pfn, zone_start_pfn); | ||
6133 | } | ||
6134 | } | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 67afba5117f2..89341b658bd0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode) | |||
643 | kfree(info->symlink); | 643 | kfree(info->symlink); |
644 | 644 | ||
645 | simple_xattrs_free(&info->xattrs); | 645 | simple_xattrs_free(&info->xattrs); |
646 | BUG_ON(inode->i_blocks); | 646 | WARN_ON(inode->i_blocks); |
647 | shmem_free_inode(inode->i_sb); | 647 | shmem_free_inode(inode->i_sb); |
648 | clear_inode(inode); | 648 | clear_inode(inode); |
649 | } | 649 | } |
@@ -1145,8 +1145,20 @@ repeat: | |||
1145 | if (!error) { | 1145 | if (!error) { |
1146 | error = shmem_add_to_page_cache(page, mapping, index, | 1146 | error = shmem_add_to_page_cache(page, mapping, index, |
1147 | gfp, swp_to_radix_entry(swap)); | 1147 | gfp, swp_to_radix_entry(swap)); |
1148 | /* We already confirmed swap, and make no allocation */ | 1148 | /* |
1149 | VM_BUG_ON(error); | 1149 | * We already confirmed swap under page lock, and make |
1150 | * no memory allocation here, so usually no possibility | ||
1151 | * of error; but free_swap_and_cache() only trylocks a | ||
1152 | * page, so it is just possible that the entry has been | ||
1153 | * truncated or holepunched since swap was confirmed. | ||
1154 | * shmem_undo_range() will have done some of the | ||
1155 | * unaccounting, now delete_from_swap_cache() will do | ||
1156 | * the rest (including mem_cgroup_uncharge_swapcache). | ||
1157 | * Reset swap.val? No, leave it so "failed" goes back to | ||
1158 | * "repeat": reading a hole and writing should succeed. | ||
1159 | */ | ||
1160 | if (error) | ||
1161 | delete_from_swap_cache(page); | ||
1150 | } | 1162 | } |
1151 | if (error) | 1163 | if (error) |
1152 | goto failed; | 1164 | goto failed; |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 71cd288b2001..f91a25547ffe 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1494,9 +1494,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1494 | BUG_ON(!current->mm); | 1494 | BUG_ON(!current->mm); |
1495 | 1495 | ||
1496 | pathname = getname(specialfile); | 1496 | pathname = getname(specialfile); |
1497 | err = PTR_ERR(pathname); | ||
1498 | if (IS_ERR(pathname)) | 1497 | if (IS_ERR(pathname)) |
1499 | goto out; | 1498 | return PTR_ERR(pathname); |
1500 | 1499 | ||
1501 | victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); | 1500 | victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); |
1502 | err = PTR_ERR(victim); | 1501 | err = PTR_ERR(victim); |
@@ -1608,6 +1607,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1608 | out_dput: | 1607 | out_dput: |
1609 | filp_close(victim, NULL); | 1608 | filp_close(victim, NULL); |
1610 | out: | 1609 | out: |
1610 | putname(pathname); | ||
1611 | return err; | 1611 | return err; |
1612 | } | 1612 | } |
1613 | 1613 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b055e9379bc..48550c66f1f2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc) | |||
1760 | return false; | 1760 | return false; |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | #ifdef CONFIG_COMPACTION | ||
1764 | /* | ||
1765 | * If compaction is deferred for sc->order then scale the number of pages | ||
1766 | * reclaimed based on the number of consecutive allocation failures | ||
1767 | */ | ||
1768 | static unsigned long scale_for_compaction(unsigned long pages_for_compaction, | ||
1769 | struct lruvec *lruvec, struct scan_control *sc) | ||
1770 | { | ||
1771 | struct zone *zone = lruvec_zone(lruvec); | ||
1772 | |||
1773 | if (zone->compact_order_failed <= sc->order) | ||
1774 | pages_for_compaction <<= zone->compact_defer_shift; | ||
1775 | return pages_for_compaction; | ||
1776 | } | ||
1777 | #else | ||
1778 | static unsigned long scale_for_compaction(unsigned long pages_for_compaction, | ||
1779 | struct lruvec *lruvec, struct scan_control *sc) | ||
1780 | { | ||
1781 | return pages_for_compaction; | ||
1782 | } | ||
1783 | #endif | ||
1784 | |||
1785 | /* | 1763 | /* |
1786 | * Reclaim/compaction is used for high-order allocation requests. It reclaims | 1764 | * Reclaim/compaction is used for high-order allocation requests. It reclaims |
1787 | * order-0 pages before compacting the zone. should_continue_reclaim() returns | 1765 | * order-0 pages before compacting the zone. should_continue_reclaim() returns |
@@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec, | |||
1829 | * inactive lists are large enough, continue reclaiming | 1807 | * inactive lists are large enough, continue reclaiming |
1830 | */ | 1808 | */ |
1831 | pages_for_compaction = (2UL << sc->order); | 1809 | pages_for_compaction = (2UL << sc->order); |
1832 | |||
1833 | pages_for_compaction = scale_for_compaction(pages_for_compaction, | ||
1834 | lruvec, sc); | ||
1835 | inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); | 1810 | inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); |
1836 | if (nr_swap_pages > 0) | 1811 | if (nr_swap_pages > 0) |
1837 | inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); | 1812 | inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b9a28d2dd3e8..ce0684a1fc83 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -325,6 +325,12 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
325 | 325 | ||
326 | soft_iface->last_rx = jiffies; | 326 | soft_iface->last_rx = jiffies; |
327 | 327 | ||
328 | /* Let the bridge loop avoidance check the packet. If will | ||
329 | * not handle it, we can safely push it up. | ||
330 | */ | ||
331 | if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) | ||
332 | goto out; | ||
333 | |||
328 | if (orig_node) | 334 | if (orig_node) |
329 | batadv_tt_add_temporary_global_entry(bat_priv, orig_node, | 335 | batadv_tt_add_temporary_global_entry(bat_priv, orig_node, |
330 | ethhdr->h_source); | 336 | ethhdr->h_source); |
@@ -332,12 +338,6 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
332 | if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) | 338 | if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) |
333 | goto dropped; | 339 | goto dropped; |
334 | 340 | ||
335 | /* Let the bridge loop avoidance check the packet. If will | ||
336 | * not handle it, we can safely push it up. | ||
337 | */ | ||
338 | if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) | ||
339 | goto out; | ||
340 | |||
341 | netif_rx(skb); | 341 | netif_rx(skb); |
342 | goto out; | 342 | goto out; |
343 | 343 | ||
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 112edd371b2f..baae71585804 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -769,6 +769,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
769 | */ | 769 | */ |
770 | tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; | 770 | tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; |
771 | 771 | ||
772 | /* the change can carry possible "attribute" flags like the | ||
773 | * TT_CLIENT_WIFI, therefore they have to be copied in the | ||
774 | * client entry | ||
775 | */ | ||
776 | tt_global_entry->common.flags |= flags; | ||
777 | |||
772 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only | 778 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only |
773 | * one originator left in the list and we previously received a | 779 | * one originator left in the list and we previously received a |
774 | * delete + roaming change for this originator. | 780 | * delete + roaming change for this originator. |
@@ -1496,7 +1502,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
1496 | 1502 | ||
1497 | memcpy(tt_change->addr, tt_common_entry->addr, | 1503 | memcpy(tt_change->addr, tt_common_entry->addr, |
1498 | ETH_ALEN); | 1504 | ETH_ALEN); |
1499 | tt_change->flags = BATADV_NO_FLAGS; | 1505 | tt_change->flags = tt_common_entry->flags; |
1500 | 1506 | ||
1501 | tt_count++; | 1507 | tt_count++; |
1502 | tt_change++; | 1508 | tt_change++; |
@@ -2450,6 +2456,13 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, | |||
2450 | { | 2456 | { |
2451 | bool ret = false; | 2457 | bool ret = false; |
2452 | 2458 | ||
2459 | /* if the originator is a backbone node (meaning it belongs to the same | ||
2460 | * LAN of this node) the temporary client must not be added because to | ||
2461 | * reach such destination the node must use the LAN instead of the mesh | ||
2462 | */ | ||
2463 | if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) | ||
2464 | goto out; | ||
2465 | |||
2453 | if (!batadv_tt_global_add(bat_priv, orig_node, addr, | 2466 | if (!batadv_tt_global_add(bat_priv, orig_node, addr, |
2454 | BATADV_TT_CLIENT_TEMP, | 2467 | BATADV_TT_CLIENT_TEMP, |
2455 | atomic_read(&orig_node->last_ttvn))) | 2468 | atomic_read(&orig_node->last_ttvn))) |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a0ce706aebd..a0a2f97b9c62 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -1754,11 +1754,11 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1754 | if (hdev->dev_type != HCI_AMP) | 1754 | if (hdev->dev_type != HCI_AMP) |
1755 | set_bit(HCI_AUTO_OFF, &hdev->dev_flags); | 1755 | set_bit(HCI_AUTO_OFF, &hdev->dev_flags); |
1756 | 1756 | ||
1757 | schedule_work(&hdev->power_on); | ||
1758 | |||
1759 | hci_notify(hdev, HCI_DEV_REG); | 1757 | hci_notify(hdev, HCI_DEV_REG); |
1760 | hci_dev_hold(hdev); | 1758 | hci_dev_hold(hdev); |
1761 | 1759 | ||
1760 | schedule_work(&hdev->power_on); | ||
1761 | |||
1762 | return id; | 1762 | return id; |
1763 | 1763 | ||
1764 | err_wqueue: | 1764 | err_wqueue: |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index aa2ea0a8142c..91de4239da66 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, | |||
326 | struct hci_dev *d; | 326 | struct hci_dev *d; |
327 | size_t rp_len; | 327 | size_t rp_len; |
328 | u16 count; | 328 | u16 count; |
329 | int i, err; | 329 | int err; |
330 | 330 | ||
331 | BT_DBG("sock %p", sk); | 331 | BT_DBG("sock %p", sk); |
332 | 332 | ||
@@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, | |||
347 | return -ENOMEM; | 347 | return -ENOMEM; |
348 | } | 348 | } |
349 | 349 | ||
350 | rp->num_controllers = cpu_to_le16(count); | 350 | count = 0; |
351 | |||
352 | i = 0; | ||
353 | list_for_each_entry(d, &hci_dev_list, list) { | 351 | list_for_each_entry(d, &hci_dev_list, list) { |
354 | if (test_bit(HCI_SETUP, &d->dev_flags)) | 352 | if (test_bit(HCI_SETUP, &d->dev_flags)) |
355 | continue; | 353 | continue; |
@@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, | |||
357 | if (!mgmt_valid_hdev(d)) | 355 | if (!mgmt_valid_hdev(d)) |
358 | continue; | 356 | continue; |
359 | 357 | ||
360 | rp->index[i++] = cpu_to_le16(d->id); | 358 | rp->index[count++] = cpu_to_le16(d->id); |
361 | BT_DBG("Added hci%u", d->id); | 359 | BT_DBG("Added hci%u", d->id); |
362 | } | 360 | } |
363 | 361 | ||
362 | rp->num_controllers = cpu_to_le16(count); | ||
363 | rp_len = sizeof(*rp) + (2 * count); | ||
364 | |||
364 | read_unlock(&hci_dev_list_lock); | 365 | read_unlock(&hci_dev_list_lock); |
365 | 366 | ||
366 | err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, | 367 | err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, |
@@ -1366,6 +1367,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1366 | continue; | 1367 | continue; |
1367 | 1368 | ||
1368 | list_del(&match->list); | 1369 | list_del(&match->list); |
1370 | kfree(match); | ||
1369 | found++; | 1371 | found++; |
1370 | } | 1372 | } |
1371 | 1373 | ||
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 2ac8d50861e0..a5923378bdf0 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) | |||
267 | 267 | ||
268 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); | 268 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); |
269 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, | 269 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, |
270 | hcon->dst_type, reason); | 270 | hcon->dst_type, HCI_ERROR_AUTH_FAILURE); |
271 | 271 | ||
272 | cancel_delayed_work_sync(&conn->security_timer); | 272 | cancel_delayed_work_sync(&conn->security_timer); |
273 | 273 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index bda6d004f9f0..c0946cb2b354 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2818,8 +2818,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2818 | if (unlikely(tcpu != next_cpu) && | 2818 | if (unlikely(tcpu != next_cpu) && |
2819 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | 2819 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || |
2820 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | 2820 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - |
2821 | rflow->last_qtail)) >= 0)) | 2821 | rflow->last_qtail)) >= 0)) { |
2822 | tcpu = next_cpu; | ||
2822 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); | 2823 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); |
2824 | } | ||
2823 | 2825 | ||
2824 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | 2826 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { |
2825 | *rflowp = rflow; | 2827 | *rflowp = rflow; |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 87cc17db2d56..b079c7bbc157 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr, | |||
319 | */ | 319 | */ |
320 | ha = list_first_entry(&dev->dev_addrs.list, | 320 | ha = list_first_entry(&dev->dev_addrs.list, |
321 | struct netdev_hw_addr, list); | 321 | struct netdev_hw_addr, list); |
322 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | 322 | if (!memcmp(ha->addr, addr, dev->addr_len) && |
323 | ha->type == addr_type && ha->refcount == 1) | ||
323 | return -ENOENT; | 324 | return -ENOENT; |
324 | 325 | ||
325 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | 326 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index bcf02f608cbf..017a8bacfb27 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -429,6 +429,17 @@ static struct attribute_group netstat_group = { | |||
429 | .name = "statistics", | 429 | .name = "statistics", |
430 | .attrs = netstat_attrs, | 430 | .attrs = netstat_attrs, |
431 | }; | 431 | }; |
432 | |||
433 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) | ||
434 | static struct attribute *wireless_attrs[] = { | ||
435 | NULL | ||
436 | }; | ||
437 | |||
438 | static struct attribute_group wireless_group = { | ||
439 | .name = "wireless", | ||
440 | .attrs = wireless_attrs, | ||
441 | }; | ||
442 | #endif | ||
432 | #endif /* CONFIG_SYSFS */ | 443 | #endif /* CONFIG_SYSFS */ |
433 | 444 | ||
434 | #ifdef CONFIG_RPS | 445 | #ifdef CONFIG_RPS |
@@ -1409,6 +1420,15 @@ int netdev_register_kobject(struct net_device *net) | |||
1409 | groups++; | 1420 | groups++; |
1410 | 1421 | ||
1411 | *groups++ = &netstat_group; | 1422 | *groups++ = &netstat_group; |
1423 | |||
1424 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) | ||
1425 | if (net->ieee80211_ptr) | ||
1426 | *groups++ = &wireless_group; | ||
1427 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) | ||
1428 | else if (net->wireless_handlers) | ||
1429 | *groups++ = &wireless_group; | ||
1430 | #endif | ||
1431 | #endif | ||
1412 | #endif /* CONFIG_SYSFS */ | 1432 | #endif /* CONFIG_SYSFS */ |
1413 | 1433 | ||
1414 | error = device_add(dev); | 1434 | error = device_add(dev); |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5eea4a811042..14bbfcf717ac 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
457 | struct inet_sock *inet = inet_sk(sk); | 457 | struct inet_sock *inet = inet_sk(sk); |
458 | int val = 0, err; | 458 | int val = 0, err; |
459 | 459 | ||
460 | if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | | 460 | switch (optname) { |
461 | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | | 461 | case IP_PKTINFO: |
462 | (1<<IP_RETOPTS) | (1<<IP_TOS) | | 462 | case IP_RECVTTL: |
463 | (1<<IP_TTL) | (1<<IP_HDRINCL) | | 463 | case IP_RECVOPTS: |
464 | (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | | 464 | case IP_RECVTOS: |
465 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | | 465 | case IP_RETOPTS: |
466 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | | 466 | case IP_TOS: |
467 | (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || | 467 | case IP_TTL: |
468 | optname == IP_UNICAST_IF || | 468 | case IP_HDRINCL: |
469 | optname == IP_MULTICAST_TTL || | 469 | case IP_MTU_DISCOVER: |
470 | optname == IP_MULTICAST_ALL || | 470 | case IP_RECVERR: |
471 | optname == IP_MULTICAST_LOOP || | 471 | case IP_ROUTER_ALERT: |
472 | optname == IP_RECVORIGDSTADDR) { | 472 | case IP_FREEBIND: |
473 | case IP_PASSSEC: | ||
474 | case IP_TRANSPARENT: | ||
475 | case IP_MINTTL: | ||
476 | case IP_NODEFRAG: | ||
477 | case IP_UNICAST_IF: | ||
478 | case IP_MULTICAST_TTL: | ||
479 | case IP_MULTICAST_ALL: | ||
480 | case IP_MULTICAST_LOOP: | ||
481 | case IP_RECVORIGDSTADDR: | ||
473 | if (optlen >= sizeof(int)) { | 482 | if (optlen >= sizeof(int)) { |
474 | if (get_user(val, (int __user *) optval)) | 483 | if (get_user(val, (int __user *) optval)) |
475 | return -EFAULT; | 484 | return -EFAULT; |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 1831092f999f..858fddf6482a 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -338,12 +338,17 @@ static int vti_rcv(struct sk_buff *skb) | |||
338 | if (tunnel != NULL) { | 338 | if (tunnel != NULL) { |
339 | struct pcpu_tstats *tstats; | 339 | struct pcpu_tstats *tstats; |
340 | 340 | ||
341 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | ||
342 | return -1; | ||
343 | |||
341 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 344 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
342 | u64_stats_update_begin(&tstats->syncp); | 345 | u64_stats_update_begin(&tstats->syncp); |
343 | tstats->rx_packets++; | 346 | tstats->rx_packets++; |
344 | tstats->rx_bytes += skb->len; | 347 | tstats->rx_bytes += skb->len; |
345 | u64_stats_update_end(&tstats->syncp); | 348 | u64_stats_update_end(&tstats->syncp); |
346 | 349 | ||
350 | skb->mark = 0; | ||
351 | secpath_reset(skb); | ||
347 | skb->dev = tunnel->dev; | 352 | skb->dev = tunnel->dev; |
348 | return 1; | 353 | return 1; |
349 | } | 354 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a8c651216fa6..df251424d816 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1785 | if (dev_out->flags & IFF_LOOPBACK) | 1785 | if (dev_out->flags & IFF_LOOPBACK) |
1786 | flags |= RTCF_LOCAL; | 1786 | flags |= RTCF_LOCAL; |
1787 | 1787 | ||
1788 | do_cache = true; | ||
1788 | if (type == RTN_BROADCAST) { | 1789 | if (type == RTN_BROADCAST) { |
1789 | flags |= RTCF_BROADCAST | RTCF_LOCAL; | 1790 | flags |= RTCF_BROADCAST | RTCF_LOCAL; |
1790 | fi = NULL; | 1791 | fi = NULL; |
@@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1793 | if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, | 1794 | if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, |
1794 | fl4->flowi4_proto)) | 1795 | fl4->flowi4_proto)) |
1795 | flags &= ~RTCF_LOCAL; | 1796 | flags &= ~RTCF_LOCAL; |
1797 | else | ||
1798 | do_cache = false; | ||
1796 | /* If multicast route do not exist use | 1799 | /* If multicast route do not exist use |
1797 | * default one, but do not gateway in this case. | 1800 | * default one, but do not gateway in this case. |
1798 | * Yes, it is hack. | 1801 | * Yes, it is hack. |
@@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1802 | } | 1805 | } |
1803 | 1806 | ||
1804 | fnhe = NULL; | 1807 | fnhe = NULL; |
1805 | do_cache = fi != NULL; | 1808 | do_cache &= fi != NULL; |
1806 | if (fi) { | 1809 | if (do_cache) { |
1807 | struct rtable __rcu **prth; | 1810 | struct rtable __rcu **prth; |
1808 | struct fib_nh *nh = &FIB_RES_NH(*res); | 1811 | struct fib_nh *nh = &FIB_RES_NH(*res); |
1809 | 1812 | ||
@@ -2597,7 +2600,7 @@ int __init ip_rt_init(void) | |||
2597 | pr_err("Unable to create route proc files\n"); | 2600 | pr_err("Unable to create route proc files\n"); |
2598 | #ifdef CONFIG_XFRM | 2601 | #ifdef CONFIG_XFRM |
2599 | xfrm_init(); | 2602 | xfrm_init(); |
2600 | xfrm4_init(ip_rt_max_size); | 2603 | xfrm4_init(); |
2601 | #endif | 2604 | #endif |
2602 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); | 2605 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); |
2603 | 2606 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 197c0008503c..083092e3aed6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1212,7 +1212,7 @@ new_segment: | |||
1212 | wait_for_sndbuf: | 1212 | wait_for_sndbuf: |
1213 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1213 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1214 | wait_for_memory: | 1214 | wait_for_memory: |
1215 | if (copied && likely(!tp->repair)) | 1215 | if (copied) |
1216 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); | 1216 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
1217 | 1217 | ||
1218 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 1218 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
@@ -1223,7 +1223,7 @@ wait_for_memory: | |||
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | out: | 1225 | out: |
1226 | if (copied && likely(!tp->repair)) | 1226 | if (copied) |
1227 | tcp_push(sk, flags, mss_now, tp->nonagle); | 1227 | tcp_push(sk, flags, mss_now, tp->nonagle); |
1228 | release_sock(sk); | 1228 | release_sock(sk); |
1229 | return copied + copied_syn; | 1229 | return copied + copied_syn; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2c2b13a999ea..609ff98aeb47 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5313,11 +5313,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
5313 | goto discard; | 5313 | goto discard; |
5314 | } | 5314 | } |
5315 | 5315 | ||
5316 | /* ts_recent update must be made after we are sure that the packet | ||
5317 | * is in window. | ||
5318 | */ | ||
5319 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
5320 | |||
5321 | /* step 3: check security and precedence [ignored] */ | 5316 | /* step 3: check security and precedence [ignored] */ |
5322 | 5317 | ||
5323 | /* step 4: Check for a SYN | 5318 | /* step 4: Check for a SYN |
@@ -5552,6 +5547,11 @@ step5: | |||
5552 | if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) | 5547 | if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) |
5553 | goto discard; | 5548 | goto discard; |
5554 | 5549 | ||
5550 | /* ts_recent update must be made after we are sure that the packet | ||
5551 | * is in window. | ||
5552 | */ | ||
5553 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
5554 | |||
5555 | tcp_rcv_rtt_measure_ts(sk, skb); | 5555 | tcp_rcv_rtt_measure_ts(sk, skb); |
5556 | 5556 | ||
5557 | /* Process urgent data. */ | 5557 | /* Process urgent data. */ |
@@ -6130,6 +6130,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
6130 | } else | 6130 | } else |
6131 | goto discard; | 6131 | goto discard; |
6132 | 6132 | ||
6133 | /* ts_recent update must be made after we are sure that the packet | ||
6134 | * is in window. | ||
6135 | */ | ||
6136 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
6137 | |||
6133 | /* step 6: check the URG bit */ | 6138 | /* step 6: check the URG bit */ |
6134 | tcp_urg(sk, skb, th); | 6139 | tcp_urg(sk, skb, th); |
6135 | 6140 | ||
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 53bc5847bfa8..f696d7c2e9fa 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -1,7 +1,6 @@ | |||
1 | #include <linux/rcupdate.h> | 1 | #include <linux/rcupdate.h> |
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <linux/jiffies.h> | 3 | #include <linux/jiffies.h> |
4 | #include <linux/bootmem.h> | ||
5 | #include <linux/module.h> | 4 | #include <linux/module.h> |
6 | #include <linux/cache.h> | 5 | #include <linux/cache.h> |
7 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
@@ -9,6 +8,7 @@ | |||
9 | #include <linux/tcp.h> | 8 | #include <linux/tcp.h> |
10 | #include <linux/hash.h> | 9 | #include <linux/hash.h> |
11 | #include <linux/tcp_metrics.h> | 10 | #include <linux/tcp_metrics.h> |
11 | #include <linux/vmalloc.h> | ||
12 | 12 | ||
13 | #include <net/inet_connection_sock.h> | 13 | #include <net/inet_connection_sock.h> |
14 | #include <net/net_namespace.h> | 14 | #include <net/net_namespace.h> |
@@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net) | |||
1034 | net->ipv4.tcp_metrics_hash_log = order_base_2(slots); | 1034 | net->ipv4.tcp_metrics_hash_log = order_base_2(slots); |
1035 | size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; | 1035 | size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; |
1036 | 1036 | ||
1037 | net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL); | 1037 | net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
1038 | if (!net->ipv4.tcp_metrics_hash) | ||
1039 | net->ipv4.tcp_metrics_hash = vzalloc(size); | ||
1040 | |||
1038 | if (!net->ipv4.tcp_metrics_hash) | 1041 | if (!net->ipv4.tcp_metrics_hash) |
1039 | return -ENOMEM; | 1042 | return -ENOMEM; |
1040 | 1043 | ||
@@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net) | |||
1055 | tm = next; | 1058 | tm = next; |
1056 | } | 1059 | } |
1057 | } | 1060 | } |
1058 | kfree(net->ipv4.tcp_metrics_hash); | 1061 | if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) |
1062 | vfree(net->ipv4.tcp_metrics_hash); | ||
1063 | else | ||
1064 | kfree(net->ipv4.tcp_metrics_hash); | ||
1059 | } | 1065 | } |
1060 | 1066 | ||
1061 | static __net_initdata struct pernet_operations tcp_net_metrics_ops = { | 1067 | static __net_initdata struct pernet_operations tcp_net_metrics_ops = { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cfe6ffe1c177..2798706cb063 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1986 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); | 1986 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
1987 | BUG_ON(!tso_segs); | 1987 | BUG_ON(!tso_segs); |
1988 | 1988 | ||
1989 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) | ||
1990 | goto repair; /* Skip network transmission */ | ||
1991 | |||
1989 | cwnd_quota = tcp_cwnd_test(tp, skb); | 1992 | cwnd_quota = tcp_cwnd_test(tp, skb); |
1990 | if (!cwnd_quota) | 1993 | if (!cwnd_quota) |
1991 | break; | 1994 | break; |
@@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2026 | if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) | 2029 | if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) |
2027 | break; | 2030 | break; |
2028 | 2031 | ||
2032 | repair: | ||
2029 | /* Advance the send_head. This one is sent out. | 2033 | /* Advance the send_head. This one is sent out. |
2030 | * This call will increment packets_out. | 2034 | * This call will increment packets_out. |
2031 | */ | 2035 | */ |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 05c5ab8d983c..3be0ac2c1920 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void) | |||
279 | xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); | 279 | xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); |
280 | } | 280 | } |
281 | 281 | ||
282 | void __init xfrm4_init(int rt_max_size) | 282 | void __init xfrm4_init(void) |
283 | { | 283 | { |
284 | /* | ||
285 | * Select a default value for the gc_thresh based on the main route | ||
286 | * table hash size. It seems to me the worst case scenario is when | ||
287 | * we have ipsec operating in transport mode, in which we create a | ||
288 | * dst_entry per socket. The xfrm gc algorithm starts trying to remove | ||
289 | * entries at gc_thresh, and prevents new allocations as 2*gc_thresh | ||
290 | * so lets set an initial xfrm gc_thresh value at the rt_max_size/2. | ||
291 | * That will let us store an ipsec connection per route table entry, | ||
292 | * and start cleaning when were 1/2 full | ||
293 | */ | ||
294 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; | ||
295 | dst_entries_init(&xfrm4_dst_ops); | 284 | dst_entries_init(&xfrm4_dst_ops); |
296 | 285 | ||
297 | xfrm4_state_init(); | 286 | xfrm4_state_init(); |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index c4f934176cab..30647857a375 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) | |||
252 | return NULL; | 252 | return NULL; |
253 | dst->ops->update_pmtu(dst, sk, NULL, mtu); | 253 | dst->ops->update_pmtu(dst, sk, NULL, mtu); |
254 | 254 | ||
255 | return inet6_csk_route_socket(sk, &fl6); | 255 | dst = inet6_csk_route_socket(sk, &fl6); |
256 | return IS_ERR(dst) ? NULL : dst; | ||
256 | } | 257 | } |
257 | EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); | 258 | EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ba6d13d1f1e1..e02faed6d17e 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -827,6 +827,7 @@ pref_skip_coa: | |||
827 | if (val < 0 || val > 255) | 827 | if (val < 0 || val > 255) |
828 | goto e_inval; | 828 | goto e_inval; |
829 | np->min_hopcount = val; | 829 | np->min_hopcount = val; |
830 | retv = 0; | ||
830 | break; | 831 | break; |
831 | case IPV6_DONTFRAG: | 832 | case IPV6_DONTFRAG: |
832 | np->dontfrag = valbool; | 833 | np->dontfrag = valbool; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 05f3a313db88..7371f676cf41 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2594,6 +2594,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, | |||
2594 | else | 2594 | else |
2595 | local->probe_req_reg--; | 2595 | local->probe_req_reg--; |
2596 | 2596 | ||
2597 | if (!local->open_count) | ||
2598 | break; | ||
2599 | |||
2597 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); | 2600 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); |
2598 | break; | 2601 | break; |
2599 | default: | 2602 | default: |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index bf87c70ac6c5..c21e33d1abd0 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -1151,10 +1151,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1151 | 1151 | ||
1152 | mutex_lock(&sdata->u.ibss.mtx); | 1152 | mutex_lock(&sdata->u.ibss.mtx); |
1153 | 1153 | ||
1154 | sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; | ||
1155 | memset(sdata->u.ibss.bssid, 0, ETH_ALEN); | ||
1156 | sdata->u.ibss.ssid_len = 0; | ||
1157 | |||
1158 | active_ibss = ieee80211_sta_active_ibss(sdata); | 1154 | active_ibss = ieee80211_sta_active_ibss(sdata); |
1159 | 1155 | ||
1160 | if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { | 1156 | if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { |
@@ -1175,6 +1171,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1175 | } | 1171 | } |
1176 | } | 1172 | } |
1177 | 1173 | ||
1174 | ifibss->state = IEEE80211_IBSS_MLME_SEARCH; | ||
1175 | memset(ifibss->bssid, 0, ETH_ALEN); | ||
1176 | ifibss->ssid_len = 0; | ||
1177 | |||
1178 | sta_info_flush(sdata->local, sdata); | 1178 | sta_info_flush(sdata->local, sdata); |
1179 | 1179 | ||
1180 | spin_lock_bh(&ifibss->incomplete_lock); | 1180 | spin_lock_bh(&ifibss->incomplete_lock); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8c804550465b..156e5835e37f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1314,6 +1314,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1314 | struct net_device *dev); | 1314 | struct net_device *dev); |
1315 | netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | 1315 | netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, |
1316 | struct net_device *dev); | 1316 | struct net_device *dev); |
1317 | void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, | ||
1318 | struct sk_buff_head *skbs); | ||
1317 | 1319 | ||
1318 | /* HT */ | 1320 | /* HT */ |
1319 | void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, | 1321 | void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index c80c4490351c..f57f597972f8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -871,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
871 | local->hw.wiphy->cipher_suites, | 871 | local->hw.wiphy->cipher_suites, |
872 | sizeof(u32) * local->hw.wiphy->n_cipher_suites, | 872 | sizeof(u32) * local->hw.wiphy->n_cipher_suites, |
873 | GFP_KERNEL); | 873 | GFP_KERNEL); |
874 | if (!suites) | 874 | if (!suites) { |
875 | return -ENOMEM; | 875 | result = -ENOMEM; |
876 | goto fail_wiphy_register; | ||
877 | } | ||
876 | for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { | 878 | for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { |
877 | u32 suite = local->hw.wiphy->cipher_suites[r]; | 879 | u32 suite = local->hw.wiphy->cipher_suites[r]; |
878 | if (suite == WLAN_CIPHER_SUITE_WEP40 || | 880 | if (suite == WLAN_CIPHER_SUITE_WEP40 || |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index c4cdbde24fd3..43e60b5a7546 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -917,7 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, | |||
917 | struct cfg80211_sched_scan_request *req) | 917 | struct cfg80211_sched_scan_request *req) |
918 | { | 918 | { |
919 | struct ieee80211_local *local = sdata->local; | 919 | struct ieee80211_local *local = sdata->local; |
920 | struct ieee80211_sched_scan_ies sched_scan_ies; | 920 | struct ieee80211_sched_scan_ies sched_scan_ies = {}; |
921 | int ret, i; | 921 | int ret, i; |
922 | 922 | ||
923 | mutex_lock(&local->mtx); | 923 | mutex_lock(&local->mtx); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 0a4e4c04db89..d2eb64e12353 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -117,8 +117,8 @@ static void free_sta_work(struct work_struct *wk) | |||
117 | 117 | ||
118 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | 118 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
119 | local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); | 119 | local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); |
120 | __skb_queue_purge(&sta->ps_tx_buf[ac]); | 120 | ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); |
121 | __skb_queue_purge(&sta->tx_filtered[ac]); | 121 | ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); |
122 | } | 122 | } |
123 | 123 | ||
124 | #ifdef CONFIG_MAC80211_MESH | 124 | #ifdef CONFIG_MAC80211_MESH |
@@ -141,7 +141,7 @@ static void free_sta_work(struct work_struct *wk) | |||
141 | tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); | 141 | tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); |
142 | if (!tid_tx) | 142 | if (!tid_tx) |
143 | continue; | 143 | continue; |
144 | __skb_queue_purge(&tid_tx->pending); | 144 | ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); |
145 | kfree(tid_tx); | 145 | kfree(tid_tx); |
146 | } | 146 | } |
147 | 147 | ||
@@ -961,6 +961,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
961 | struct ieee80211_local *local = sdata->local; | 961 | struct ieee80211_local *local = sdata->local; |
962 | struct sk_buff_head pending; | 962 | struct sk_buff_head pending; |
963 | int filtered = 0, buffered = 0, ac; | 963 | int filtered = 0, buffered = 0, ac; |
964 | unsigned long flags; | ||
964 | 965 | ||
965 | clear_sta_flag(sta, WLAN_STA_SP); | 966 | clear_sta_flag(sta, WLAN_STA_SP); |
966 | 967 | ||
@@ -976,12 +977,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
976 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | 977 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
977 | int count = skb_queue_len(&pending), tmp; | 978 | int count = skb_queue_len(&pending), tmp; |
978 | 979 | ||
980 | spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); | ||
979 | skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); | 981 | skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); |
982 | spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); | ||
980 | tmp = skb_queue_len(&pending); | 983 | tmp = skb_queue_len(&pending); |
981 | filtered += tmp - count; | 984 | filtered += tmp - count; |
982 | count = tmp; | 985 | count = tmp; |
983 | 986 | ||
987 | spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); | ||
984 | skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); | 988 | skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); |
989 | spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); | ||
985 | tmp = skb_queue_len(&pending); | 990 | tmp = skb_queue_len(&pending); |
986 | buffered += tmp - count; | 991 | buffered += tmp - count; |
987 | } | 992 | } |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 3af0cc4130f1..101eb88a2b78 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -668,3 +668,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
668 | dev_kfree_skb_any(skb); | 668 | dev_kfree_skb_any(skb); |
669 | } | 669 | } |
670 | EXPORT_SYMBOL(ieee80211_free_txskb); | 670 | EXPORT_SYMBOL(ieee80211_free_txskb); |
671 | |||
672 | void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, | ||
673 | struct sk_buff_head *skbs) | ||
674 | { | ||
675 | struct sk_buff *skb; | ||
676 | |||
677 | while ((skb = __skb_dequeue(skbs))) | ||
678 | ieee80211_free_txskb(hw, skb); | ||
679 | } | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c9bf83f36657..b858ebe41fda 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1358,7 +1358,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1358 | if (tx->skb) | 1358 | if (tx->skb) |
1359 | ieee80211_free_txskb(&tx->local->hw, tx->skb); | 1359 | ieee80211_free_txskb(&tx->local->hw, tx->skb); |
1360 | else | 1360 | else |
1361 | __skb_queue_purge(&tx->skbs); | 1361 | ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); |
1362 | return -1; | 1362 | return -1; |
1363 | } else if (unlikely(res == TX_QUEUED)) { | 1363 | } else if (unlikely(res == TX_QUEUED)) { |
1364 | I802_DEBUG_INC(tx->local->tx_handlers_queued); | 1364 | I802_DEBUG_INC(tx->local->tx_handlers_queued); |
@@ -2120,10 +2120,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
2120 | */ | 2120 | */ |
2121 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 2121 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
2122 | { | 2122 | { |
2123 | struct sk_buff *skb; | ||
2123 | int i; | 2124 | int i; |
2124 | 2125 | ||
2125 | for (i = 0; i < local->hw.queues; i++) | 2126 | for (i = 0; i < local->hw.queues; i++) { |
2126 | skb_queue_purge(&local->pending[i]); | 2127 | while ((skb = skb_dequeue(&local->pending[i])) != NULL) |
2128 | ieee80211_free_txskb(&local->hw, skb); | ||
2129 | } | ||
2127 | } | 2130 | } |
2128 | 2131 | ||
2129 | /* | 2132 | /* |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 239391807ca9..0151ae33c4cd 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1491,6 +1491,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1491 | list_for_each_entry(sdata, &local->interfaces, list) { | 1491 | list_for_each_entry(sdata, &local->interfaces, list) { |
1492 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 1492 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
1493 | continue; | 1493 | continue; |
1494 | if (!sdata->u.mgd.associated) | ||
1495 | continue; | ||
1494 | 1496 | ||
1495 | ieee80211_send_nullfunc(local, sdata, 0); | 1497 | ieee80211_send_nullfunc(local, sdata, 0); |
1496 | } | 1498 | } |
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index ec3dba5dcd62..5c0b78528e55 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c | |||
@@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
173 | return adtfn(set, &nip, timeout, flags); | 173 | return adtfn(set, &nip, timeout, flags); |
174 | } | 174 | } |
175 | 175 | ||
176 | ip_to = ip; | ||
176 | if (tb[IPSET_ATTR_IP_TO]) { | 177 | if (tb[IPSET_ATTR_IP_TO]) { |
177 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 178 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
178 | if (ret) | 179 | if (ret) |
@@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
185 | if (!cidr || cidr > 32) | 186 | if (!cidr || cidr > 32) |
186 | return -IPSET_ERR_INVALID_CIDR; | 187 | return -IPSET_ERR_INVALID_CIDR; |
187 | ip_set_mask_from_to(ip, ip_to, cidr); | 188 | ip_set_mask_from_to(ip, ip_to, cidr); |
188 | } else | 189 | } |
189 | ip_to = ip; | ||
190 | 190 | ||
191 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); | 191 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); |
192 | 192 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 0171f7502fa5..6283351f4eeb 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c | |||
@@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
162 | const struct ip_set_hash *h = set->data; | 162 | const struct ip_set_hash *h = set->data; |
163 | ipset_adtfn adtfn = set->variant->adt[adt]; | 163 | ipset_adtfn adtfn = set->variant->adt[adt]; |
164 | struct hash_ipport4_elem data = { }; | 164 | struct hash_ipport4_elem data = { }; |
165 | u32 ip, ip_to = 0, p = 0, port, port_to; | 165 | u32 ip, ip_to, p = 0, port, port_to; |
166 | u32 timeout = h->timeout; | 166 | u32 timeout = h->timeout; |
167 | bool with_ports = false; | 167 | bool with_ports = false; |
168 | int ret; | 168 | int ret; |
@@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
210 | return ip_set_eexist(ret, flags) ? 0 : ret; | 210 | return ip_set_eexist(ret, flags) ? 0 : ret; |
211 | } | 211 | } |
212 | 212 | ||
213 | ip = ntohl(data.ip); | 213 | ip_to = ip = ntohl(data.ip); |
214 | if (tb[IPSET_ATTR_IP_TO]) { | 214 | if (tb[IPSET_ATTR_IP_TO]) { |
215 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 215 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
216 | if (ret) | 216 | if (ret) |
@@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
223 | if (!cidr || cidr > 32) | 223 | if (!cidr || cidr > 32) |
224 | return -IPSET_ERR_INVALID_CIDR; | 224 | return -IPSET_ERR_INVALID_CIDR; |
225 | ip_set_mask_from_to(ip, ip_to, cidr); | 225 | ip_set_mask_from_to(ip, ip_to, cidr); |
226 | } else | 226 | } |
227 | ip_to = ip; | ||
228 | 227 | ||
229 | port_to = port = ntohs(data.port); | 228 | port_to = port = ntohs(data.port); |
230 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { | 229 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 6344ef551ec8..6a21271c8d5a 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c | |||
@@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
166 | const struct ip_set_hash *h = set->data; | 166 | const struct ip_set_hash *h = set->data; |
167 | ipset_adtfn adtfn = set->variant->adt[adt]; | 167 | ipset_adtfn adtfn = set->variant->adt[adt]; |
168 | struct hash_ipportip4_elem data = { }; | 168 | struct hash_ipportip4_elem data = { }; |
169 | u32 ip, ip_to = 0, p = 0, port, port_to; | 169 | u32 ip, ip_to, p = 0, port, port_to; |
170 | u32 timeout = h->timeout; | 170 | u32 timeout = h->timeout; |
171 | bool with_ports = false; | 171 | bool with_ports = false; |
172 | int ret; | 172 | int ret; |
@@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
218 | return ip_set_eexist(ret, flags) ? 0 : ret; | 218 | return ip_set_eexist(ret, flags) ? 0 : ret; |
219 | } | 219 | } |
220 | 220 | ||
221 | ip = ntohl(data.ip); | 221 | ip_to = ip = ntohl(data.ip); |
222 | if (tb[IPSET_ATTR_IP_TO]) { | 222 | if (tb[IPSET_ATTR_IP_TO]) { |
223 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 223 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
224 | if (ret) | 224 | if (ret) |
@@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
231 | if (!cidr || cidr > 32) | 231 | if (!cidr || cidr > 32) |
232 | return -IPSET_ERR_INVALID_CIDR; | 232 | return -IPSET_ERR_INVALID_CIDR; |
233 | ip_set_mask_from_to(ip, ip_to, cidr); | 233 | ip_set_mask_from_to(ip, ip_to, cidr); |
234 | } else | 234 | } |
235 | ip_to = ip; | ||
236 | 235 | ||
237 | port_to = port = ntohs(data.port); | 236 | port_to = port = ntohs(data.port); |
238 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { | 237 | if (with_ports && tb[IPSET_ATTR_PORT_TO]) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index cb71f9a774e7..2d5cd4ee30eb 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
@@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
215 | const struct ip_set_hash *h = set->data; | 215 | const struct ip_set_hash *h = set->data; |
216 | ipset_adtfn adtfn = set->variant->adt[adt]; | 216 | ipset_adtfn adtfn = set->variant->adt[adt]; |
217 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; | 217 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; |
218 | u32 ip, ip_to = 0, p = 0, port, port_to; | 218 | u32 ip, ip_to, p = 0, port, port_to; |
219 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; | 219 | u32 ip2_from, ip2_to, ip2_last, ip2; |
220 | u32 timeout = h->timeout; | 220 | u32 timeout = h->timeout; |
221 | bool with_ports = false; | 221 | bool with_ports = false; |
222 | u8 cidr; | 222 | u8 cidr; |
@@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
286 | return ip_set_eexist(ret, flags) ? 0 : ret; | 286 | return ip_set_eexist(ret, flags) ? 0 : ret; |
287 | } | 287 | } |
288 | 288 | ||
289 | ip_to = ip; | ||
289 | if (tb[IPSET_ATTR_IP_TO]) { | 290 | if (tb[IPSET_ATTR_IP_TO]) { |
290 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); | 291 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); |
291 | if (ret) | 292 | if (ret) |
@@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
306 | if (port > port_to) | 307 | if (port > port_to) |
307 | swap(port, port_to); | 308 | swap(port, port_to); |
308 | } | 309 | } |
310 | |||
311 | ip2_to = ip2_from; | ||
309 | if (tb[IPSET_ATTR_IP2_TO]) { | 312 | if (tb[IPSET_ATTR_IP2_TO]) { |
310 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); | 313 | ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); |
311 | if (ret) | 314 | if (ret) |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 8847b4d8be06..701c88a20fea 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
@@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu | |||
41 | static LIST_HEAD(cttimeout_list); | 41 | static LIST_HEAD(cttimeout_list); |
42 | 42 | ||
43 | static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { | 43 | static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { |
44 | [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING }, | 44 | [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING, |
45 | .len = CTNL_TIMEOUT_NAME_MAX - 1}, | ||
45 | [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 }, | 46 | [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 }, |
46 | [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 }, | 47 | [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 }, |
47 | [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED }, | 48 | [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED }, |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index cc10d073c338..9e8f4b2801f6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -1210,7 +1210,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) | |||
1210 | local->remote_miu = LLCP_DEFAULT_MIU; | 1210 | local->remote_miu = LLCP_DEFAULT_MIU; |
1211 | local->remote_lto = LLCP_DEFAULT_LTO; | 1211 | local->remote_lto = LLCP_DEFAULT_LTO; |
1212 | 1212 | ||
1213 | list_add(&llcp_devices, &local->list); | 1213 | list_add(&local->list, &llcp_devices); |
1214 | 1214 | ||
1215 | return 0; | 1215 | return 0; |
1216 | } | 1216 | } |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index c3bea269faf4..9966e7b16451 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = { | |||
102 | .open = sctp_snmp_seq_open, | 102 | .open = sctp_snmp_seq_open, |
103 | .read = seq_read, | 103 | .read = seq_read, |
104 | .llseek = seq_lseek, | 104 | .llseek = seq_lseek, |
105 | .release = single_release, | 105 | .release = single_release_net, |
106 | }; | 106 | }; |
107 | 107 | ||
108 | /* Set up the proc fs entry for 'snmp' object. */ | 108 | /* Set up the proc fs entry for 'snmp' object. */ |
@@ -251,7 +251,7 @@ static const struct file_operations sctp_eps_seq_fops = { | |||
251 | .open = sctp_eps_seq_open, | 251 | .open = sctp_eps_seq_open, |
252 | .read = seq_read, | 252 | .read = seq_read, |
253 | .llseek = seq_lseek, | 253 | .llseek = seq_lseek, |
254 | .release = seq_release, | 254 | .release = seq_release_net, |
255 | }; | 255 | }; |
256 | 256 | ||
257 | /* Set up the proc fs entry for 'eps' object. */ | 257 | /* Set up the proc fs entry for 'eps' object. */ |
@@ -372,7 +372,7 @@ static const struct file_operations sctp_assocs_seq_fops = { | |||
372 | .open = sctp_assocs_seq_open, | 372 | .open = sctp_assocs_seq_open, |
373 | .read = seq_read, | 373 | .read = seq_read, |
374 | .llseek = seq_lseek, | 374 | .llseek = seq_lseek, |
375 | .release = seq_release, | 375 | .release = seq_release_net, |
376 | }; | 376 | }; |
377 | 377 | ||
378 | /* Set up the proc fs entry for 'assocs' object. */ | 378 | /* Set up the proc fs entry for 'assocs' object. */ |
@@ -517,7 +517,7 @@ static const struct file_operations sctp_remaddr_seq_fops = { | |||
517 | .open = sctp_remaddr_seq_open, | 517 | .open = sctp_remaddr_seq_open, |
518 | .read = seq_read, | 518 | .read = seq_read, |
519 | .llseek = seq_lseek, | 519 | .llseek = seq_lseek, |
520 | .release = seq_release, | 520 | .release = seq_release_net, |
521 | }; | 521 | }; |
522 | 522 | ||
523 | int __net_init sctp_remaddr_proc_init(struct net *net) | 523 | int __net_init sctp_remaddr_proc_init(struct net *net) |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bcc7d7ee5a51..b75756b05af7 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = { | |||
141 | .reg_rules = { | 141 | .reg_rules = { |
142 | /* IEEE 802.11b/g, channels 1..11 */ | 142 | /* IEEE 802.11b/g, channels 1..11 */ |
143 | REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), | 143 | REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), |
144 | /* IEEE 802.11b/g, channels 12..13. No HT40 | 144 | /* IEEE 802.11b/g, channels 12..13. */ |
145 | * channel fits here. */ | 145 | REG_RULE(2467-10, 2472+10, 40, 6, 20, |
146 | REG_RULE(2467-10, 2472+10, 20, 6, 20, | ||
147 | NL80211_RRF_PASSIVE_SCAN | | 146 | NL80211_RRF_PASSIVE_SCAN | |
148 | NL80211_RRF_NO_IBSS), | 147 | NL80211_RRF_NO_IBSS), |
149 | /* IEEE 802.11 channel 14 - Only JP enables | 148 | /* IEEE 802.11 channel 14 - Only JP enables |
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index bd2e09895553..cdd48600e02a 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h | |||
@@ -12,7 +12,7 @@ extern "C" { | |||
12 | 12 | ||
13 | #include <assert.h> | 13 | #include <assert.h> |
14 | #include <stdio.h> | 14 | #include <stdio.h> |
15 | #include <sys/queue.h> | 15 | #include "list.h" |
16 | #ifndef __cplusplus | 16 | #ifndef __cplusplus |
17 | #include <stdbool.h> | 17 | #include <stdbool.h> |
18 | #endif | 18 | #endif |
@@ -175,12 +175,11 @@ struct menu { | |||
175 | #define MENU_ROOT 0x0002 | 175 | #define MENU_ROOT 0x0002 |
176 | 176 | ||
177 | struct jump_key { | 177 | struct jump_key { |
178 | CIRCLEQ_ENTRY(jump_key) entries; | 178 | struct list_head entries; |
179 | size_t offset; | 179 | size_t offset; |
180 | struct menu *target; | 180 | struct menu *target; |
181 | int index; | 181 | int index; |
182 | }; | 182 | }; |
183 | CIRCLEQ_HEAD(jk_head, jump_key); | ||
184 | 183 | ||
185 | #define JUMP_NB 9 | 184 | #define JUMP_NB 9 |
186 | 185 | ||
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h new file mode 100644 index 000000000000..0ae730be5f49 --- /dev/null +++ b/scripts/kconfig/list.h | |||
@@ -0,0 +1,91 @@ | |||
1 | #ifndef LIST_H | ||
2 | #define LIST_H | ||
3 | |||
4 | /* | ||
5 | * Copied from include/linux/... | ||
6 | */ | ||
7 | |||
8 | #undef offsetof | ||
9 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | ||
10 | |||
11 | /** | ||
12 | * container_of - cast a member of a structure out to the containing structure | ||
13 | * @ptr: the pointer to the member. | ||
14 | * @type: the type of the container struct this is embedded in. | ||
15 | * @member: the name of the member within the struct. | ||
16 | * | ||
17 | */ | ||
18 | #define container_of(ptr, type, member) ({ \ | ||
19 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | ||
20 | (type *)( (char *)__mptr - offsetof(type,member) );}) | ||
21 | |||
22 | |||
23 | struct list_head { | ||
24 | struct list_head *next, *prev; | ||
25 | }; | ||
26 | |||
27 | |||
28 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | ||
29 | |||
30 | #define LIST_HEAD(name) \ | ||
31 | struct list_head name = LIST_HEAD_INIT(name) | ||
32 | |||
33 | /** | ||
34 | * list_entry - get the struct for this entry | ||
35 | * @ptr: the &struct list_head pointer. | ||
36 | * @type: the type of the struct this is embedded in. | ||
37 | * @member: the name of the list_struct within the struct. | ||
38 | */ | ||
39 | #define list_entry(ptr, type, member) \ | ||
40 | container_of(ptr, type, member) | ||
41 | |||
42 | /** | ||
43 | * list_for_each_entry - iterate over list of given type | ||
44 | * @pos: the type * to use as a loop cursor. | ||
45 | * @head: the head for your list. | ||
46 | * @member: the name of the list_struct within the struct. | ||
47 | */ | ||
48 | #define list_for_each_entry(pos, head, member) \ | ||
49 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | ||
50 | &pos->member != (head); \ | ||
51 | pos = list_entry(pos->member.next, typeof(*pos), member)) | ||
52 | |||
53 | /** | ||
54 | * list_empty - tests whether a list is empty | ||
55 | * @head: the list to test. | ||
56 | */ | ||
57 | static inline int list_empty(const struct list_head *head) | ||
58 | { | ||
59 | return head->next == head; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Insert a new entry between two known consecutive entries. | ||
64 | * | ||
65 | * This is only for internal list manipulation where we know | ||
66 | * the prev/next entries already! | ||
67 | */ | ||
68 | static inline void __list_add(struct list_head *_new, | ||
69 | struct list_head *prev, | ||
70 | struct list_head *next) | ||
71 | { | ||
72 | next->prev = _new; | ||
73 | _new->next = next; | ||
74 | _new->prev = prev; | ||
75 | prev->next = _new; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * list_add_tail - add a new entry | ||
80 | * @new: new entry to be added | ||
81 | * @head: list head to add it before | ||
82 | * | ||
83 | * Insert a new entry before the specified head. | ||
84 | * This is useful for implementing queues. | ||
85 | */ | ||
86 | static inline void list_add_tail(struct list_head *_new, struct list_head *head) | ||
87 | { | ||
88 | __list_add(_new, head->prev, head); | ||
89 | } | ||
90 | |||
91 | #endif | ||
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index 1d1c08537f1e..ef1a7381f956 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h | |||
@@ -21,9 +21,9 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu)); | |||
21 | P(menu_get_parent_menu,struct menu *,(struct menu *menu)); | 21 | P(menu_get_parent_menu,struct menu *,(struct menu *menu)); |
22 | P(menu_has_help,bool,(struct menu *menu)); | 22 | P(menu_has_help,bool,(struct menu *menu)); |
23 | P(menu_get_help,const char *,(struct menu *menu)); | 23 | P(menu_get_help,const char *,(struct menu *menu)); |
24 | P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head | 24 | P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head |
25 | *head)); | 25 | *head)); |
26 | P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head | 26 | P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head |
27 | *head)); | 27 | *head)); |
28 | P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help)); | 28 | P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help)); |
29 | 29 | ||
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 48f67448af7b..53975cf87608 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c | |||
@@ -312,7 +312,7 @@ static void set_config_filename(const char *config_filename) | |||
312 | 312 | ||
313 | 313 | ||
314 | struct search_data { | 314 | struct search_data { |
315 | struct jk_head *head; | 315 | struct list_head *head; |
316 | struct menu **targets; | 316 | struct menu **targets; |
317 | int *keys; | 317 | int *keys; |
318 | }; | 318 | }; |
@@ -323,7 +323,7 @@ static void update_text(char *buf, size_t start, size_t end, void *_data) | |||
323 | struct jump_key *pos; | 323 | struct jump_key *pos; |
324 | int k = 0; | 324 | int k = 0; |
325 | 325 | ||
326 | CIRCLEQ_FOREACH(pos, data->head, entries) { | 326 | list_for_each_entry(pos, data->head, entries) { |
327 | if (pos->offset >= start && pos->offset < end) { | 327 | if (pos->offset >= start && pos->offset < end) { |
328 | char header[4]; | 328 | char header[4]; |
329 | 329 | ||
@@ -375,7 +375,7 @@ again: | |||
375 | 375 | ||
376 | sym_arr = sym_re_search(dialog_input); | 376 | sym_arr = sym_re_search(dialog_input); |
377 | do { | 377 | do { |
378 | struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head); | 378 | LIST_HEAD(head); |
379 | struct menu *targets[JUMP_NB]; | 379 | struct menu *targets[JUMP_NB]; |
380 | int keys[JUMP_NB + 1], i; | 380 | int keys[JUMP_NB + 1], i; |
381 | struct search_data data = { | 381 | struct search_data data = { |
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index a3cade659f89..e98a05c8e508 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c | |||
@@ -508,7 +508,7 @@ const char *menu_get_help(struct menu *menu) | |||
508 | } | 508 | } |
509 | 509 | ||
510 | static void get_prompt_str(struct gstr *r, struct property *prop, | 510 | static void get_prompt_str(struct gstr *r, struct property *prop, |
511 | struct jk_head *head) | 511 | struct list_head *head) |
512 | { | 512 | { |
513 | int i, j; | 513 | int i, j; |
514 | struct menu *submenu[8], *menu, *location = NULL; | 514 | struct menu *submenu[8], *menu, *location = NULL; |
@@ -544,12 +544,13 @@ static void get_prompt_str(struct gstr *r, struct property *prop, | |||
544 | } else | 544 | } else |
545 | jump->target = location; | 545 | jump->target = location; |
546 | 546 | ||
547 | if (CIRCLEQ_EMPTY(head)) | 547 | if (list_empty(head)) |
548 | jump->index = 0; | 548 | jump->index = 0; |
549 | else | 549 | else |
550 | jump->index = CIRCLEQ_LAST(head)->index + 1; | 550 | jump->index = list_entry(head->prev, struct jump_key, |
551 | entries)->index + 1; | ||
551 | 552 | ||
552 | CIRCLEQ_INSERT_TAIL(head, jump, entries); | 553 | list_add_tail(&jump->entries, head); |
553 | } | 554 | } |
554 | 555 | ||
555 | if (i > 0) { | 556 | if (i > 0) { |
@@ -573,7 +574,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop, | |||
573 | /* | 574 | /* |
574 | * head is optional and may be NULL | 575 | * head is optional and may be NULL |
575 | */ | 576 | */ |
576 | void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) | 577 | void get_symbol_str(struct gstr *r, struct symbol *sym, |
578 | struct list_head *head) | ||
577 | { | 579 | { |
578 | bool hit; | 580 | bool hit; |
579 | struct property *prop; | 581 | struct property *prop; |
@@ -612,7 +614,7 @@ void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) | |||
612 | str_append(r, "\n\n"); | 614 | str_append(r, "\n\n"); |
613 | } | 615 | } |
614 | 616 | ||
615 | struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head) | 617 | struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) |
616 | { | 618 | { |
617 | struct symbol *sym; | 619 | struct symbol *sym; |
618 | struct gstr res = str_new(); | 620 | struct gstr res = str_new(); |
diff --git a/scripts/sign-file b/scripts/sign-file index 87ca59d36e7e..974a20b661b7 100755 --- a/scripts/sign-file +++ b/scripts/sign-file | |||
@@ -156,12 +156,12 @@ sub asn1_extract($$@) | |||
156 | 156 | ||
157 | if ($l == 0x1) { | 157 | if ($l == 0x1) { |
158 | $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)); | 158 | $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)); |
159 | } elsif ($l = 0x2) { | 159 | } elsif ($l == 0x2) { |
160 | $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2)); | 160 | $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2)); |
161 | } elsif ($l = 0x3) { | 161 | } elsif ($l == 0x3) { |
162 | $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16; | 162 | $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16; |
163 | $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2)); | 163 | $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2)); |
164 | } elsif ($l = 0x4) { | 164 | } elsif ($l == 0x4) { |
165 | $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4)); | 165 | $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4)); |
166 | } else { | 166 | } else { |
167 | die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n"; | 167 | die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n"; |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 842c254396db..b08d20c66c2e 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -164,8 +164,8 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup) | |||
164 | struct dev_exception_item *ex, *tmp; | 164 | struct dev_exception_item *ex, *tmp; |
165 | 165 | ||
166 | list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { | 166 | list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { |
167 | list_del(&ex->list); | 167 | list_del_rcu(&ex->list); |
168 | kfree(ex); | 168 | kfree_rcu(ex, rcu); |
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
@@ -298,7 +298,7 @@ static int may_access(struct dev_cgroup *dev_cgroup, | |||
298 | struct dev_exception_item *ex; | 298 | struct dev_exception_item *ex; |
299 | bool match = false; | 299 | bool match = false; |
300 | 300 | ||
301 | list_for_each_entry(ex, &dev_cgroup->exceptions, list) { | 301 | list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { |
302 | if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) | 302 | if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) |
303 | continue; | 303 | continue; |
304 | if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) | 304 | if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) |
@@ -352,6 +352,8 @@ static int parent_has_perm(struct dev_cgroup *childcg, | |||
352 | */ | 352 | */ |
353 | static inline int may_allow_all(struct dev_cgroup *parent) | 353 | static inline int may_allow_all(struct dev_cgroup *parent) |
354 | { | 354 | { |
355 | if (!parent) | ||
356 | return 1; | ||
355 | return parent->behavior == DEVCG_DEFAULT_ALLOW; | 357 | return parent->behavior == DEVCG_DEFAULT_ALLOW; |
356 | } | 358 | } |
357 | 359 | ||
@@ -376,11 +378,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, | |||
376 | int count, rc; | 378 | int count, rc; |
377 | struct dev_exception_item ex; | 379 | struct dev_exception_item ex; |
378 | struct cgroup *p = devcgroup->css.cgroup; | 380 | struct cgroup *p = devcgroup->css.cgroup; |
379 | struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent); | 381 | struct dev_cgroup *parent = NULL; |
380 | 382 | ||
381 | if (!capable(CAP_SYS_ADMIN)) | 383 | if (!capable(CAP_SYS_ADMIN)) |
382 | return -EPERM; | 384 | return -EPERM; |
383 | 385 | ||
386 | if (p->parent) | ||
387 | parent = cgroup_to_devcgroup(p->parent); | ||
388 | |||
384 | memset(&ex, 0, sizeof(ex)); | 389 | memset(&ex, 0, sizeof(ex)); |
385 | b = buffer; | 390 | b = buffer; |
386 | 391 | ||
@@ -391,11 +396,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, | |||
391 | if (!may_allow_all(parent)) | 396 | if (!may_allow_all(parent)) |
392 | return -EPERM; | 397 | return -EPERM; |
393 | dev_exception_clean(devcgroup); | 398 | dev_exception_clean(devcgroup); |
399 | devcgroup->behavior = DEVCG_DEFAULT_ALLOW; | ||
400 | if (!parent) | ||
401 | break; | ||
402 | |||
394 | rc = dev_exceptions_copy(&devcgroup->exceptions, | 403 | rc = dev_exceptions_copy(&devcgroup->exceptions, |
395 | &parent->exceptions); | 404 | &parent->exceptions); |
396 | if (rc) | 405 | if (rc) |
397 | return rc; | 406 | return rc; |
398 | devcgroup->behavior = DEVCG_DEFAULT_ALLOW; | ||
399 | break; | 407 | break; |
400 | case DEVCG_DENY: | 408 | case DEVCG_DENY: |
401 | dev_exception_clean(devcgroup); | 409 | dev_exception_clean(devcgroup); |
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index 28f911cdd7c7..c5454c0477c3 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c | |||
@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node) | |||
174 | if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { | 174 | if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { |
175 | struct sel_netnode *tail; | 175 | struct sel_netnode *tail; |
176 | tail = list_entry( | 176 | tail = list_entry( |
177 | rcu_dereference(sel_netnode_hash[idx].list.prev), | 177 | rcu_dereference_protected(sel_netnode_hash[idx].list.prev, |
178 | lockdep_is_held(&sel_netnode_lock)), | ||
178 | struct sel_netnode, list); | 179 | struct sel_netnode, list); |
179 | list_del_rcu(&tail->list); | 180 | list_del_rcu(&tail->list); |
180 | kfree_rcu(tail, rcu); | 181 | kfree_rcu(tail, rcu); |
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index 50169bcfd903..7266020c16cb 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c | |||
@@ -2581,9 +2581,14 @@ static u8 snd_es1968_tea575x_get_pins(struct snd_tea575x *tea) | |||
2581 | struct es1968 *chip = tea->private_data; | 2581 | struct es1968 *chip = tea->private_data; |
2582 | unsigned long io = chip->io_port + GPIO_DATA; | 2582 | unsigned long io = chip->io_port + GPIO_DATA; |
2583 | u16 val = inw(io); | 2583 | u16 val = inw(io); |
2584 | 2584 | u8 ret; | |
2585 | return (val & STR_DATA) ? TEA575X_DATA : 0 | | 2585 | |
2586 | (val & STR_MOST) ? TEA575X_MOST : 0; | 2586 | ret = 0; |
2587 | if (val & STR_DATA) | ||
2588 | ret |= TEA575X_DATA; | ||
2589 | if (val & STR_MOST) | ||
2590 | ret |= TEA575X_MOST; | ||
2591 | return ret; | ||
2587 | } | 2592 | } |
2588 | 2593 | ||
2589 | static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output) | 2594 | static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output) |
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index cc2e91d15538..c5806f89be1e 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c | |||
@@ -767,9 +767,14 @@ static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea) | |||
767 | struct fm801 *chip = tea->private_data; | 767 | struct fm801 *chip = tea->private_data; |
768 | unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); | 768 | unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); |
769 | struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip); | 769 | struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip); |
770 | 770 | u8 ret; | |
771 | return (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 | | 771 | |
772 | (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0; | 772 | ret = 0; |
773 | if (reg & FM801_GPIO_GP(gpio.data)) | ||
774 | ret |= TEA575X_DATA; | ||
775 | if (reg & FM801_GPIO_GP(gpio.most)) | ||
776 | ret |= TEA575X_MOST; | ||
777 | return ret; | ||
773 | } | 778 | } |
774 | 779 | ||
775 | static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output) | 780 | static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output) |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 70d4848b5cd0..d010de12335e 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -95,6 +95,7 @@ int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset) | |||
95 | EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset); | 95 | EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset); |
96 | 96 | ||
97 | #ifdef CONFIG_PM | 97 | #ifdef CONFIG_PM |
98 | #define codec_in_pm(codec) ((codec)->in_pm) | ||
98 | static void hda_power_work(struct work_struct *work); | 99 | static void hda_power_work(struct work_struct *work); |
99 | static void hda_keep_power_on(struct hda_codec *codec); | 100 | static void hda_keep_power_on(struct hda_codec *codec); |
100 | #define hda_codec_is_power_on(codec) ((codec)->power_on) | 101 | #define hda_codec_is_power_on(codec) ((codec)->power_on) |
@@ -104,6 +105,7 @@ static inline void hda_call_pm_notify(struct hda_bus *bus, bool power_up) | |||
104 | bus->ops.pm_notify(bus, power_up); | 105 | bus->ops.pm_notify(bus, power_up); |
105 | } | 106 | } |
106 | #else | 107 | #else |
108 | #define codec_in_pm(codec) 0 | ||
107 | static inline void hda_keep_power_on(struct hda_codec *codec) {} | 109 | static inline void hda_keep_power_on(struct hda_codec *codec) {} |
108 | #define hda_codec_is_power_on(codec) 1 | 110 | #define hda_codec_is_power_on(codec) 1 |
109 | #define hda_call_pm_notify(bus, state) {} | 111 | #define hda_call_pm_notify(bus, state) {} |
@@ -228,7 +230,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, | |||
228 | } | 230 | } |
229 | mutex_unlock(&bus->cmd_mutex); | 231 | mutex_unlock(&bus->cmd_mutex); |
230 | snd_hda_power_down(codec); | 232 | snd_hda_power_down(codec); |
231 | if (res && *res == -1 && bus->rirb_error) { | 233 | if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) { |
232 | if (bus->response_reset) { | 234 | if (bus->response_reset) { |
233 | snd_printd("hda_codec: resetting BUS due to " | 235 | snd_printd("hda_codec: resetting BUS due to " |
234 | "fatal communication error\n"); | 236 | "fatal communication error\n"); |
@@ -238,7 +240,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, | |||
238 | goto again; | 240 | goto again; |
239 | } | 241 | } |
240 | /* clear reset-flag when the communication gets recovered */ | 242 | /* clear reset-flag when the communication gets recovered */ |
241 | if (!err) | 243 | if (!err || codec_in_pm(codec)) |
242 | bus->response_reset = 0; | 244 | bus->response_reset = 0; |
243 | return err; | 245 | return err; |
244 | } | 246 | } |
@@ -3616,6 +3618,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) | |||
3616 | { | 3618 | { |
3617 | unsigned int state; | 3619 | unsigned int state; |
3618 | 3620 | ||
3621 | codec->in_pm = 1; | ||
3622 | |||
3619 | if (codec->patch_ops.suspend) | 3623 | if (codec->patch_ops.suspend) |
3620 | codec->patch_ops.suspend(codec); | 3624 | codec->patch_ops.suspend(codec); |
3621 | hda_cleanup_all_streams(codec); | 3625 | hda_cleanup_all_streams(codec); |
@@ -3630,6 +3634,7 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) | |||
3630 | codec->power_transition = 0; | 3634 | codec->power_transition = 0; |
3631 | codec->power_jiffies = jiffies; | 3635 | codec->power_jiffies = jiffies; |
3632 | spin_unlock(&codec->power_lock); | 3636 | spin_unlock(&codec->power_lock); |
3637 | codec->in_pm = 0; | ||
3633 | return state; | 3638 | return state; |
3634 | } | 3639 | } |
3635 | 3640 | ||
@@ -3638,6 +3643,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) | |||
3638 | */ | 3643 | */ |
3639 | static void hda_call_codec_resume(struct hda_codec *codec) | 3644 | static void hda_call_codec_resume(struct hda_codec *codec) |
3640 | { | 3645 | { |
3646 | codec->in_pm = 1; | ||
3647 | |||
3641 | /* set as if powered on for avoiding re-entering the resume | 3648 | /* set as if powered on for avoiding re-entering the resume |
3642 | * in the resume / power-save sequence | 3649 | * in the resume / power-save sequence |
3643 | */ | 3650 | */ |
@@ -3656,6 +3663,8 @@ static void hda_call_codec_resume(struct hda_codec *codec) | |||
3656 | snd_hda_codec_resume_cache(codec); | 3663 | snd_hda_codec_resume_cache(codec); |
3657 | } | 3664 | } |
3658 | snd_hda_jack_report_sync(codec); | 3665 | snd_hda_jack_report_sync(codec); |
3666 | |||
3667 | codec->in_pm = 0; | ||
3659 | snd_hda_power_down(codec); /* flag down before returning */ | 3668 | snd_hda_power_down(codec); /* flag down before returning */ |
3660 | } | 3669 | } |
3661 | #endif /* CONFIG_PM */ | 3670 | #endif /* CONFIG_PM */ |
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 507fe8a917b6..4f4e545c0f4b 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
@@ -869,6 +869,7 @@ struct hda_codec { | |||
869 | unsigned int power_on :1; /* current (global) power-state */ | 869 | unsigned int power_on :1; /* current (global) power-state */ |
870 | unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */ | 870 | unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */ |
871 | unsigned int pm_down_notified:1; /* PM notified to controller */ | 871 | unsigned int pm_down_notified:1; /* PM notified to controller */ |
872 | unsigned int in_pm:1; /* suspend/resume being performed */ | ||
872 | int power_transition; /* power-state in transition */ | 873 | int power_transition; /* power-state in transition */ |
873 | int power_count; /* current (global) power refcount */ | 874 | int power_count; /* current (global) power refcount */ |
874 | struct delayed_work power_work; /* delayed task for powerdown */ | 875 | struct delayed_work power_work; /* delayed task for powerdown */ |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index cd2dbaf1be78..f9d870e554d9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -556,6 +556,12 @@ enum { | |||
556 | #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ | 556 | #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ |
557 | #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ | 557 | #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ |
558 | #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ | 558 | #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ |
559 | #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ | ||
560 | |||
561 | /* quirks for Intel PCH */ | ||
562 | #define AZX_DCAPS_INTEL_PCH \ | ||
563 | (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ | ||
564 | AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME) | ||
559 | 565 | ||
560 | /* quirks for ATI SB / AMD Hudson */ | 566 | /* quirks for ATI SB / AMD Hudson */ |
561 | #define AZX_DCAPS_PRESET_ATI_SB \ | 567 | #define AZX_DCAPS_PRESET_ATI_SB \ |
@@ -2433,6 +2439,9 @@ static void azx_power_notify(struct hda_bus *bus, bool power_up) | |||
2433 | { | 2439 | { |
2434 | struct azx *chip = bus->private_data; | 2440 | struct azx *chip = bus->private_data; |
2435 | 2441 | ||
2442 | if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) | ||
2443 | return; | ||
2444 | |||
2436 | if (power_up) | 2445 | if (power_up) |
2437 | pm_runtime_get_sync(&chip->pci->dev); | 2446 | pm_runtime_get_sync(&chip->pci->dev); |
2438 | else | 2447 | else |
@@ -2548,7 +2557,8 @@ static int azx_runtime_suspend(struct device *dev) | |||
2548 | struct snd_card *card = dev_get_drvdata(dev); | 2557 | struct snd_card *card = dev_get_drvdata(dev); |
2549 | struct azx *chip = card->private_data; | 2558 | struct azx *chip = card->private_data; |
2550 | 2559 | ||
2551 | if (!power_save_controller) | 2560 | if (!power_save_controller || |
2561 | !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) | ||
2552 | return -EAGAIN; | 2562 | return -EAGAIN; |
2553 | 2563 | ||
2554 | azx_stop_chip(chip); | 2564 | azx_stop_chip(chip); |
@@ -3429,39 +3439,30 @@ static void __devexit azx_remove(struct pci_dev *pci) | |||
3429 | static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | 3439 | static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { |
3430 | /* CPT */ | 3440 | /* CPT */ |
3431 | { PCI_DEVICE(0x8086, 0x1c20), | 3441 | { PCI_DEVICE(0x8086, 0x1c20), |
3432 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3442 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3433 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3434 | /* PBG */ | 3443 | /* PBG */ |
3435 | { PCI_DEVICE(0x8086, 0x1d20), | 3444 | { PCI_DEVICE(0x8086, 0x1d20), |
3436 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3445 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3437 | AZX_DCAPS_BUFSIZE}, | ||
3438 | /* Panther Point */ | 3446 | /* Panther Point */ |
3439 | { PCI_DEVICE(0x8086, 0x1e20), | 3447 | { PCI_DEVICE(0x8086, 0x1e20), |
3440 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3448 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3441 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3442 | /* Lynx Point */ | 3449 | /* Lynx Point */ |
3443 | { PCI_DEVICE(0x8086, 0x8c20), | 3450 | { PCI_DEVICE(0x8086, 0x8c20), |
3444 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3451 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3445 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3446 | /* Lynx Point-LP */ | 3452 | /* Lynx Point-LP */ |
3447 | { PCI_DEVICE(0x8086, 0x9c20), | 3453 | { PCI_DEVICE(0x8086, 0x9c20), |
3448 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3454 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3449 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3450 | /* Lynx Point-LP */ | 3455 | /* Lynx Point-LP */ |
3451 | { PCI_DEVICE(0x8086, 0x9c21), | 3456 | { PCI_DEVICE(0x8086, 0x9c21), |
3452 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | | 3457 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
3453 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3454 | /* Haswell */ | 3458 | /* Haswell */ |
3455 | { PCI_DEVICE(0x8086, 0x0c0c), | 3459 | { PCI_DEVICE(0x8086, 0x0c0c), |
3456 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | | 3460 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, |
3457 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3458 | { PCI_DEVICE(0x8086, 0x0d0c), | 3461 | { PCI_DEVICE(0x8086, 0x0d0c), |
3459 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | | 3462 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, |
3460 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3461 | /* 5 Series/3400 */ | 3463 | /* 5 Series/3400 */ |
3462 | { PCI_DEVICE(0x8086, 0x3b56), | 3464 | { PCI_DEVICE(0x8086, 0x3b56), |
3463 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | | 3465 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, |
3464 | AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, | ||
3465 | /* SCH */ | 3466 | /* SCH */ |
3466 | { PCI_DEVICE(0x8086, 0x811b), | 3467 | { PCI_DEVICE(0x8086, 0x811b), |
3467 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | | 3468 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index d5f3a26d608d..3bcb67172358 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -466,6 +466,7 @@ static int parse_output(struct hda_codec *codec) | |||
466 | memcpy(cfg->speaker_pins, cfg->line_out_pins, | 466 | memcpy(cfg->speaker_pins, cfg->line_out_pins, |
467 | sizeof(cfg->speaker_pins)); | 467 | sizeof(cfg->speaker_pins)); |
468 | cfg->line_outs = 0; | 468 | cfg->line_outs = 0; |
469 | memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins)); | ||
469 | } | 470 | } |
470 | 471 | ||
471 | return 0; | 472 | return 0; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c0ce3b1f04b4..ad68d223f8af 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -5407,6 +5407,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
5407 | SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), | 5407 | SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), |
5408 | SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), | 5408 | SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), |
5409 | SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), | 5409 | SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), |
5410 | SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), | ||
5410 | SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), | 5411 | SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), |
5411 | SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), | 5412 | SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), |
5412 | SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), | 5413 | SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), |
@@ -7064,6 +7065,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { | |||
7064 | { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, | 7065 | { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, |
7065 | { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, | 7066 | { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, |
7066 | { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, | 7067 | { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, |
7068 | { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, | ||
7067 | { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", | 7069 | { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", |
7068 | .patch = patch_alc861 }, | 7070 | .patch = patch_alc861 }, |
7069 | { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, | 7071 | { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, |
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index c03b65af3059..054967d8bac2 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c | |||
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(arizona_out_ev); | |||
268 | static unsigned int arizona_sysclk_48k_rates[] = { | 268 | static unsigned int arizona_sysclk_48k_rates[] = { |
269 | 6144000, | 269 | 6144000, |
270 | 12288000, | 270 | 12288000, |
271 | 22579200, | 271 | 24576000, |
272 | 49152000, | 272 | 49152000, |
273 | 73728000, | 273 | 73728000, |
274 | 98304000, | 274 | 98304000, |
@@ -278,7 +278,7 @@ static unsigned int arizona_sysclk_48k_rates[] = { | |||
278 | static unsigned int arizona_sysclk_44k1_rates[] = { | 278 | static unsigned int arizona_sysclk_44k1_rates[] = { |
279 | 5644800, | 279 | 5644800, |
280 | 11289600, | 280 | 11289600, |
281 | 24576000, | 281 | 22579200, |
282 | 45158400, | 282 | 45158400, |
283 | 67737600, | 283 | 67737600, |
284 | 90316800, | 284 | 90316800, |
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index f994af34f552..e3f0a7f3131e 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c | |||
@@ -485,7 +485,7 @@ static int cs4271_probe(struct snd_soc_codec *codec) | |||
485 | gpio_nreset = cs4271plat->gpio_nreset; | 485 | gpio_nreset = cs4271plat->gpio_nreset; |
486 | 486 | ||
487 | if (gpio_nreset >= 0) | 487 | if (gpio_nreset >= 0) |
488 | if (gpio_request(gpio_nreset, "CS4271 Reset")) | 488 | if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset")) |
489 | gpio_nreset = -EINVAL; | 489 | gpio_nreset = -EINVAL; |
490 | if (gpio_nreset >= 0) { | 490 | if (gpio_nreset >= 0) { |
491 | /* Reset codec */ | 491 | /* Reset codec */ |
@@ -535,15 +535,10 @@ static int cs4271_probe(struct snd_soc_codec *codec) | |||
535 | static int cs4271_remove(struct snd_soc_codec *codec) | 535 | static int cs4271_remove(struct snd_soc_codec *codec) |
536 | { | 536 | { |
537 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); | 537 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
538 | int gpio_nreset; | ||
539 | 538 | ||
540 | gpio_nreset = cs4271->gpio_nreset; | 539 | if (gpio_is_valid(cs4271->gpio_nreset)) |
541 | |||
542 | if (gpio_is_valid(gpio_nreset)) { | ||
543 | /* Set codec to the reset state */ | 540 | /* Set codec to the reset state */ |
544 | gpio_set_value(gpio_nreset, 0); | 541 | gpio_set_value(cs4271->gpio_nreset, 0); |
545 | gpio_free(gpio_nreset); | ||
546 | } | ||
547 | 542 | ||
548 | return 0; | 543 | return 0; |
549 | }; | 544 | }; |
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 4d8db3685e96..97a81051e88d 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c | |||
@@ -773,7 +773,6 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) | |||
773 | { | 773 | { |
774 | struct snd_soc_codec *codec = codec_dai->codec; | 774 | struct snd_soc_codec *codec = codec_dai->codec; |
775 | struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec); | 775 | struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec); |
776 | int ret = 0; | ||
777 | u8 iface = 0; | 776 | u8 iface = 0; |
778 | 777 | ||
779 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 778 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
@@ -822,7 +821,7 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) | |||
822 | case SND_SOC_DAIFMT_NB_IF: | 821 | case SND_SOC_DAIFMT_NB_IF: |
823 | break; | 822 | break; |
824 | default: | 823 | default: |
825 | ret = -EINVAL; | 824 | return -EINVAL; |
826 | } | 825 | } |
827 | cs42l52->config.format = iface; | 826 | cs42l52->config.format = iface; |
828 | snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format); | 827 | snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format); |
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index 1722b586bdba..7394e73fa43c 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c | |||
@@ -42,6 +42,556 @@ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); | |||
42 | static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); | 42 | static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); |
43 | static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); | 43 | static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); |
44 | 44 | ||
45 | static const struct reg_default wm5102_sysclk_reva_patch[] = { | ||
46 | { 0x3000, 0x2225 }, | ||
47 | { 0x3001, 0x3a03 }, | ||
48 | { 0x3002, 0x0225 }, | ||
49 | { 0x3003, 0x0801 }, | ||
50 | { 0x3004, 0x6249 }, | ||
51 | { 0x3005, 0x0c04 }, | ||
52 | { 0x3006, 0x0225 }, | ||
53 | { 0x3007, 0x5901 }, | ||
54 | { 0x3008, 0xe249 }, | ||
55 | { 0x3009, 0x030d }, | ||
56 | { 0x300a, 0x0249 }, | ||
57 | { 0x300b, 0x2c01 }, | ||
58 | { 0x300c, 0xe249 }, | ||
59 | { 0x300d, 0x4342 }, | ||
60 | { 0x300e, 0xe249 }, | ||
61 | { 0x300f, 0x73c0 }, | ||
62 | { 0x3010, 0x4249 }, | ||
63 | { 0x3011, 0x0c00 }, | ||
64 | { 0x3012, 0x0225 }, | ||
65 | { 0x3013, 0x1f01 }, | ||
66 | { 0x3014, 0x0225 }, | ||
67 | { 0x3015, 0x1e01 }, | ||
68 | { 0x3016, 0x0225 }, | ||
69 | { 0x3017, 0xfa00 }, | ||
70 | { 0x3018, 0x0000 }, | ||
71 | { 0x3019, 0xf000 }, | ||
72 | { 0x301a, 0x0000 }, | ||
73 | { 0x301b, 0xf000 }, | ||
74 | { 0x301c, 0x0000 }, | ||
75 | { 0x301d, 0xf000 }, | ||
76 | { 0x301e, 0x0000 }, | ||
77 | { 0x301f, 0xf000 }, | ||
78 | { 0x3020, 0x0000 }, | ||
79 | { 0x3021, 0xf000 }, | ||
80 | { 0x3022, 0x0000 }, | ||
81 | { 0x3023, 0xf000 }, | ||
82 | { 0x3024, 0x0000 }, | ||
83 | { 0x3025, 0xf000 }, | ||
84 | { 0x3026, 0x0000 }, | ||
85 | { 0x3027, 0xf000 }, | ||
86 | { 0x3028, 0x0000 }, | ||
87 | { 0x3029, 0xf000 }, | ||
88 | { 0x302a, 0x0000 }, | ||
89 | { 0x302b, 0xf000 }, | ||
90 | { 0x302c, 0x0000 }, | ||
91 | { 0x302d, 0xf000 }, | ||
92 | { 0x302e, 0x0000 }, | ||
93 | { 0x302f, 0xf000 }, | ||
94 | { 0x3030, 0x0225 }, | ||
95 | { 0x3031, 0x1a01 }, | ||
96 | { 0x3032, 0x0225 }, | ||
97 | { 0x3033, 0x1e00 }, | ||
98 | { 0x3034, 0x0225 }, | ||
99 | { 0x3035, 0x1f00 }, | ||
100 | { 0x3036, 0x6225 }, | ||
101 | { 0x3037, 0xf800 }, | ||
102 | { 0x3038, 0x0000 }, | ||
103 | { 0x3039, 0xf000 }, | ||
104 | { 0x303a, 0x0000 }, | ||
105 | { 0x303b, 0xf000 }, | ||
106 | { 0x303c, 0x0000 }, | ||
107 | { 0x303d, 0xf000 }, | ||
108 | { 0x303e, 0x0000 }, | ||
109 | { 0x303f, 0xf000 }, | ||
110 | { 0x3040, 0x2226 }, | ||
111 | { 0x3041, 0x3a03 }, | ||
112 | { 0x3042, 0x0226 }, | ||
113 | { 0x3043, 0x0801 }, | ||
114 | { 0x3044, 0x6249 }, | ||
115 | { 0x3045, 0x0c06 }, | ||
116 | { 0x3046, 0x0226 }, | ||
117 | { 0x3047, 0x5901 }, | ||
118 | { 0x3048, 0xe249 }, | ||
119 | { 0x3049, 0x030d }, | ||
120 | { 0x304a, 0x0249 }, | ||
121 | { 0x304b, 0x2c01 }, | ||
122 | { 0x304c, 0xe249 }, | ||
123 | { 0x304d, 0x4342 }, | ||
124 | { 0x304e, 0xe249 }, | ||
125 | { 0x304f, 0x73c0 }, | ||
126 | { 0x3050, 0x4249 }, | ||
127 | { 0x3051, 0x0c00 }, | ||
128 | { 0x3052, 0x0226 }, | ||
129 | { 0x3053, 0x1f01 }, | ||
130 | { 0x3054, 0x0226 }, | ||
131 | { 0x3055, 0x1e01 }, | ||
132 | { 0x3056, 0x0226 }, | ||
133 | { 0x3057, 0xfa00 }, | ||
134 | { 0x3058, 0x0000 }, | ||
135 | { 0x3059, 0xf000 }, | ||
136 | { 0x305a, 0x0000 }, | ||
137 | { 0x305b, 0xf000 }, | ||
138 | { 0x305c, 0x0000 }, | ||
139 | { 0x305d, 0xf000 }, | ||
140 | { 0x305e, 0x0000 }, | ||
141 | { 0x305f, 0xf000 }, | ||
142 | { 0x3060, 0x0000 }, | ||
143 | { 0x3061, 0xf000 }, | ||
144 | { 0x3062, 0x0000 }, | ||
145 | { 0x3063, 0xf000 }, | ||
146 | { 0x3064, 0x0000 }, | ||
147 | { 0x3065, 0xf000 }, | ||
148 | { 0x3066, 0x0000 }, | ||
149 | { 0x3067, 0xf000 }, | ||
150 | { 0x3068, 0x0000 }, | ||
151 | { 0x3069, 0xf000 }, | ||
152 | { 0x306a, 0x0000 }, | ||
153 | { 0x306b, 0xf000 }, | ||
154 | { 0x306c, 0x0000 }, | ||
155 | { 0x306d, 0xf000 }, | ||
156 | { 0x306e, 0x0000 }, | ||
157 | { 0x306f, 0xf000 }, | ||
158 | { 0x3070, 0x0226 }, | ||
159 | { 0x3071, 0x1a01 }, | ||
160 | { 0x3072, 0x0226 }, | ||
161 | { 0x3073, 0x1e00 }, | ||
162 | { 0x3074, 0x0226 }, | ||
163 | { 0x3075, 0x1f00 }, | ||
164 | { 0x3076, 0x6226 }, | ||
165 | { 0x3077, 0xf800 }, | ||
166 | { 0x3078, 0x0000 }, | ||
167 | { 0x3079, 0xf000 }, | ||
168 | { 0x307a, 0x0000 }, | ||
169 | { 0x307b, 0xf000 }, | ||
170 | { 0x307c, 0x0000 }, | ||
171 | { 0x307d, 0xf000 }, | ||
172 | { 0x307e, 0x0000 }, | ||
173 | { 0x307f, 0xf000 }, | ||
174 | { 0x3080, 0x2227 }, | ||
175 | { 0x3081, 0x3a03 }, | ||
176 | { 0x3082, 0x0227 }, | ||
177 | { 0x3083, 0x0801 }, | ||
178 | { 0x3084, 0x6255 }, | ||
179 | { 0x3085, 0x0c04 }, | ||
180 | { 0x3086, 0x0227 }, | ||
181 | { 0x3087, 0x5901 }, | ||
182 | { 0x3088, 0xe255 }, | ||
183 | { 0x3089, 0x030d }, | ||
184 | { 0x308a, 0x0255 }, | ||
185 | { 0x308b, 0x2c01 }, | ||
186 | { 0x308c, 0xe255 }, | ||
187 | { 0x308d, 0x4342 }, | ||
188 | { 0x308e, 0xe255 }, | ||
189 | { 0x308f, 0x73c0 }, | ||
190 | { 0x3090, 0x4255 }, | ||
191 | { 0x3091, 0x0c00 }, | ||
192 | { 0x3092, 0x0227 }, | ||
193 | { 0x3093, 0x1f01 }, | ||
194 | { 0x3094, 0x0227 }, | ||
195 | { 0x3095, 0x1e01 }, | ||
196 | { 0x3096, 0x0227 }, | ||
197 | { 0x3097, 0xfa00 }, | ||
198 | { 0x3098, 0x0000 }, | ||
199 | { 0x3099, 0xf000 }, | ||
200 | { 0x309a, 0x0000 }, | ||
201 | { 0x309b, 0xf000 }, | ||
202 | { 0x309c, 0x0000 }, | ||
203 | { 0x309d, 0xf000 }, | ||
204 | { 0x309e, 0x0000 }, | ||
205 | { 0x309f, 0xf000 }, | ||
206 | { 0x30a0, 0x0000 }, | ||
207 | { 0x30a1, 0xf000 }, | ||
208 | { 0x30a2, 0x0000 }, | ||
209 | { 0x30a3, 0xf000 }, | ||
210 | { 0x30a4, 0x0000 }, | ||
211 | { 0x30a5, 0xf000 }, | ||
212 | { 0x30a6, 0x0000 }, | ||
213 | { 0x30a7, 0xf000 }, | ||
214 | { 0x30a8, 0x0000 }, | ||
215 | { 0x30a9, 0xf000 }, | ||
216 | { 0x30aa, 0x0000 }, | ||
217 | { 0x30ab, 0xf000 }, | ||
218 | { 0x30ac, 0x0000 }, | ||
219 | { 0x30ad, 0xf000 }, | ||
220 | { 0x30ae, 0x0000 }, | ||
221 | { 0x30af, 0xf000 }, | ||
222 | { 0x30b0, 0x0227 }, | ||
223 | { 0x30b1, 0x1a01 }, | ||
224 | { 0x30b2, 0x0227 }, | ||
225 | { 0x30b3, 0x1e00 }, | ||
226 | { 0x30b4, 0x0227 }, | ||
227 | { 0x30b5, 0x1f00 }, | ||
228 | { 0x30b6, 0x6227 }, | ||
229 | { 0x30b7, 0xf800 }, | ||
230 | { 0x30b8, 0x0000 }, | ||
231 | { 0x30b9, 0xf000 }, | ||
232 | { 0x30ba, 0x0000 }, | ||
233 | { 0x30bb, 0xf000 }, | ||
234 | { 0x30bc, 0x0000 }, | ||
235 | { 0x30bd, 0xf000 }, | ||
236 | { 0x30be, 0x0000 }, | ||
237 | { 0x30bf, 0xf000 }, | ||
238 | { 0x30c0, 0x2228 }, | ||
239 | { 0x30c1, 0x3a03 }, | ||
240 | { 0x30c2, 0x0228 }, | ||
241 | { 0x30c3, 0x0801 }, | ||
242 | { 0x30c4, 0x6255 }, | ||
243 | { 0x30c5, 0x0c06 }, | ||
244 | { 0x30c6, 0x0228 }, | ||
245 | { 0x30c7, 0x5901 }, | ||
246 | { 0x30c8, 0xe255 }, | ||
247 | { 0x30c9, 0x030d }, | ||
248 | { 0x30ca, 0x0255 }, | ||
249 | { 0x30cb, 0x2c01 }, | ||
250 | { 0x30cc, 0xe255 }, | ||
251 | { 0x30cd, 0x4342 }, | ||
252 | { 0x30ce, 0xe255 }, | ||
253 | { 0x30cf, 0x73c0 }, | ||
254 | { 0x30d0, 0x4255 }, | ||
255 | { 0x30d1, 0x0c00 }, | ||
256 | { 0x30d2, 0x0228 }, | ||
257 | { 0x30d3, 0x1f01 }, | ||
258 | { 0x30d4, 0x0228 }, | ||
259 | { 0x30d5, 0x1e01 }, | ||
260 | { 0x30d6, 0x0228 }, | ||
261 | { 0x30d7, 0xfa00 }, | ||
262 | { 0x30d8, 0x0000 }, | ||
263 | { 0x30d9, 0xf000 }, | ||
264 | { 0x30da, 0x0000 }, | ||
265 | { 0x30db, 0xf000 }, | ||
266 | { 0x30dc, 0x0000 }, | ||
267 | { 0x30dd, 0xf000 }, | ||
268 | { 0x30de, 0x0000 }, | ||
269 | { 0x30df, 0xf000 }, | ||
270 | { 0x30e0, 0x0000 }, | ||
271 | { 0x30e1, 0xf000 }, | ||
272 | { 0x30e2, 0x0000 }, | ||
273 | { 0x30e3, 0xf000 }, | ||
274 | { 0x30e4, 0x0000 }, | ||
275 | { 0x30e5, 0xf000 }, | ||
276 | { 0x30e6, 0x0000 }, | ||
277 | { 0x30e7, 0xf000 }, | ||
278 | { 0x30e8, 0x0000 }, | ||
279 | { 0x30e9, 0xf000 }, | ||
280 | { 0x30ea, 0x0000 }, | ||
281 | { 0x30eb, 0xf000 }, | ||
282 | { 0x30ec, 0x0000 }, | ||
283 | { 0x30ed, 0xf000 }, | ||
284 | { 0x30ee, 0x0000 }, | ||
285 | { 0x30ef, 0xf000 }, | ||
286 | { 0x30f0, 0x0228 }, | ||
287 | { 0x30f1, 0x1a01 }, | ||
288 | { 0x30f2, 0x0228 }, | ||
289 | { 0x30f3, 0x1e00 }, | ||
290 | { 0x30f4, 0x0228 }, | ||
291 | { 0x30f5, 0x1f00 }, | ||
292 | { 0x30f6, 0x6228 }, | ||
293 | { 0x30f7, 0xf800 }, | ||
294 | { 0x30f8, 0x0000 }, | ||
295 | { 0x30f9, 0xf000 }, | ||
296 | { 0x30fa, 0x0000 }, | ||
297 | { 0x30fb, 0xf000 }, | ||
298 | { 0x30fc, 0x0000 }, | ||
299 | { 0x30fd, 0xf000 }, | ||
300 | { 0x30fe, 0x0000 }, | ||
301 | { 0x30ff, 0xf000 }, | ||
302 | { 0x3100, 0x222b }, | ||
303 | { 0x3101, 0x3a03 }, | ||
304 | { 0x3102, 0x222b }, | ||
305 | { 0x3103, 0x5803 }, | ||
306 | { 0x3104, 0xe26f }, | ||
307 | { 0x3105, 0x030d }, | ||
308 | { 0x3106, 0x626f }, | ||
309 | { 0x3107, 0x2c01 }, | ||
310 | { 0x3108, 0xe26f }, | ||
311 | { 0x3109, 0x4342 }, | ||
312 | { 0x310a, 0xe26f }, | ||
313 | { 0x310b, 0x73c0 }, | ||
314 | { 0x310c, 0x026f }, | ||
315 | { 0x310d, 0x0c00 }, | ||
316 | { 0x310e, 0x022b }, | ||
317 | { 0x310f, 0x1f01 }, | ||
318 | { 0x3110, 0x022b }, | ||
319 | { 0x3111, 0x1e01 }, | ||
320 | { 0x3112, 0x022b }, | ||
321 | { 0x3113, 0xfa00 }, | ||
322 | { 0x3114, 0x0000 }, | ||
323 | { 0x3115, 0xf000 }, | ||
324 | { 0x3116, 0x0000 }, | ||
325 | { 0x3117, 0xf000 }, | ||
326 | { 0x3118, 0x0000 }, | ||
327 | { 0x3119, 0xf000 }, | ||
328 | { 0x311a, 0x0000 }, | ||
329 | { 0x311b, 0xf000 }, | ||
330 | { 0x311c, 0x0000 }, | ||
331 | { 0x311d, 0xf000 }, | ||
332 | { 0x311e, 0x0000 }, | ||
333 | { 0x311f, 0xf000 }, | ||
334 | { 0x3120, 0x022b }, | ||
335 | { 0x3121, 0x0a01 }, | ||
336 | { 0x3122, 0x022b }, | ||
337 | { 0x3123, 0x1e00 }, | ||
338 | { 0x3124, 0x022b }, | ||
339 | { 0x3125, 0x1f00 }, | ||
340 | { 0x3126, 0x622b }, | ||
341 | { 0x3127, 0xf800 }, | ||
342 | { 0x3128, 0x0000 }, | ||
343 | { 0x3129, 0xf000 }, | ||
344 | { 0x312a, 0x0000 }, | ||
345 | { 0x312b, 0xf000 }, | ||
346 | { 0x312c, 0x0000 }, | ||
347 | { 0x312d, 0xf000 }, | ||
348 | { 0x312e, 0x0000 }, | ||
349 | { 0x312f, 0xf000 }, | ||
350 | { 0x3130, 0x0000 }, | ||
351 | { 0x3131, 0xf000 }, | ||
352 | { 0x3132, 0x0000 }, | ||
353 | { 0x3133, 0xf000 }, | ||
354 | { 0x3134, 0x0000 }, | ||
355 | { 0x3135, 0xf000 }, | ||
356 | { 0x3136, 0x0000 }, | ||
357 | { 0x3137, 0xf000 }, | ||
358 | { 0x3138, 0x0000 }, | ||
359 | { 0x3139, 0xf000 }, | ||
360 | { 0x313a, 0x0000 }, | ||
361 | { 0x313b, 0xf000 }, | ||
362 | { 0x313c, 0x0000 }, | ||
363 | { 0x313d, 0xf000 }, | ||
364 | { 0x313e, 0x0000 }, | ||
365 | { 0x313f, 0xf000 }, | ||
366 | { 0x3140, 0x0000 }, | ||
367 | { 0x3141, 0xf000 }, | ||
368 | { 0x3142, 0x0000 }, | ||
369 | { 0x3143, 0xf000 }, | ||
370 | { 0x3144, 0x0000 }, | ||
371 | { 0x3145, 0xf000 }, | ||
372 | { 0x3146, 0x0000 }, | ||
373 | { 0x3147, 0xf000 }, | ||
374 | { 0x3148, 0x0000 }, | ||
375 | { 0x3149, 0xf000 }, | ||
376 | { 0x314a, 0x0000 }, | ||
377 | { 0x314b, 0xf000 }, | ||
378 | { 0x314c, 0x0000 }, | ||
379 | { 0x314d, 0xf000 }, | ||
380 | { 0x314e, 0x0000 }, | ||
381 | { 0x314f, 0xf000 }, | ||
382 | { 0x3150, 0x0000 }, | ||
383 | { 0x3151, 0xf000 }, | ||
384 | { 0x3152, 0x0000 }, | ||
385 | { 0x3153, 0xf000 }, | ||
386 | { 0x3154, 0x0000 }, | ||
387 | { 0x3155, 0xf000 }, | ||
388 | { 0x3156, 0x0000 }, | ||
389 | { 0x3157, 0xf000 }, | ||
390 | { 0x3158, 0x0000 }, | ||
391 | { 0x3159, 0xf000 }, | ||
392 | { 0x315a, 0x0000 }, | ||
393 | { 0x315b, 0xf000 }, | ||
394 | { 0x315c, 0x0000 }, | ||
395 | { 0x315d, 0xf000 }, | ||
396 | { 0x315e, 0x0000 }, | ||
397 | { 0x315f, 0xf000 }, | ||
398 | { 0x3160, 0x0000 }, | ||
399 | { 0x3161, 0xf000 }, | ||
400 | { 0x3162, 0x0000 }, | ||
401 | { 0x3163, 0xf000 }, | ||
402 | { 0x3164, 0x0000 }, | ||
403 | { 0x3165, 0xf000 }, | ||
404 | { 0x3166, 0x0000 }, | ||
405 | { 0x3167, 0xf000 }, | ||
406 | { 0x3168, 0x0000 }, | ||
407 | { 0x3169, 0xf000 }, | ||
408 | { 0x316a, 0x0000 }, | ||
409 | { 0x316b, 0xf000 }, | ||
410 | { 0x316c, 0x0000 }, | ||
411 | { 0x316d, 0xf000 }, | ||
412 | { 0x316e, 0x0000 }, | ||
413 | { 0x316f, 0xf000 }, | ||
414 | { 0x3170, 0x0000 }, | ||
415 | { 0x3171, 0xf000 }, | ||
416 | { 0x3172, 0x0000 }, | ||
417 | { 0x3173, 0xf000 }, | ||
418 | { 0x3174, 0x0000 }, | ||
419 | { 0x3175, 0xf000 }, | ||
420 | { 0x3176, 0x0000 }, | ||
421 | { 0x3177, 0xf000 }, | ||
422 | { 0x3178, 0x0000 }, | ||
423 | { 0x3179, 0xf000 }, | ||
424 | { 0x317a, 0x0000 }, | ||
425 | { 0x317b, 0xf000 }, | ||
426 | { 0x317c, 0x0000 }, | ||
427 | { 0x317d, 0xf000 }, | ||
428 | { 0x317e, 0x0000 }, | ||
429 | { 0x317f, 0xf000 }, | ||
430 | { 0x3180, 0x2001 }, | ||
431 | { 0x3181, 0xf101 }, | ||
432 | { 0x3182, 0x0000 }, | ||
433 | { 0x3183, 0xf000 }, | ||
434 | { 0x3184, 0x0000 }, | ||
435 | { 0x3185, 0xf000 }, | ||
436 | { 0x3186, 0x0000 }, | ||
437 | { 0x3187, 0xf000 }, | ||
438 | { 0x3188, 0x0000 }, | ||
439 | { 0x3189, 0xf000 }, | ||
440 | { 0x318a, 0x0000 }, | ||
441 | { 0x318b, 0xf000 }, | ||
442 | { 0x318c, 0x0000 }, | ||
443 | { 0x318d, 0xf000 }, | ||
444 | { 0x318e, 0x0000 }, | ||
445 | { 0x318f, 0xf000 }, | ||
446 | { 0x3190, 0x0000 }, | ||
447 | { 0x3191, 0xf000 }, | ||
448 | { 0x3192, 0x0000 }, | ||
449 | { 0x3193, 0xf000 }, | ||
450 | { 0x3194, 0x0000 }, | ||
451 | { 0x3195, 0xf000 }, | ||
452 | { 0x3196, 0x0000 }, | ||
453 | { 0x3197, 0xf000 }, | ||
454 | { 0x3198, 0x0000 }, | ||
455 | { 0x3199, 0xf000 }, | ||
456 | { 0x319a, 0x0000 }, | ||
457 | { 0x319b, 0xf000 }, | ||
458 | { 0x319c, 0x0000 }, | ||
459 | { 0x319d, 0xf000 }, | ||
460 | { 0x319e, 0x0000 }, | ||
461 | { 0x319f, 0xf000 }, | ||
462 | { 0x31a0, 0x0000 }, | ||
463 | { 0x31a1, 0xf000 }, | ||
464 | { 0x31a2, 0x0000 }, | ||
465 | { 0x31a3, 0xf000 }, | ||
466 | { 0x31a4, 0x0000 }, | ||
467 | { 0x31a5, 0xf000 }, | ||
468 | { 0x31a6, 0x0000 }, | ||
469 | { 0x31a7, 0xf000 }, | ||
470 | { 0x31a8, 0x0000 }, | ||
471 | { 0x31a9, 0xf000 }, | ||
472 | { 0x31aa, 0x0000 }, | ||
473 | { 0x31ab, 0xf000 }, | ||
474 | { 0x31ac, 0x0000 }, | ||
475 | { 0x31ad, 0xf000 }, | ||
476 | { 0x31ae, 0x0000 }, | ||
477 | { 0x31af, 0xf000 }, | ||
478 | { 0x31b0, 0x0000 }, | ||
479 | { 0x31b1, 0xf000 }, | ||
480 | { 0x31b2, 0x0000 }, | ||
481 | { 0x31b3, 0xf000 }, | ||
482 | { 0x31b4, 0x0000 }, | ||
483 | { 0x31b5, 0xf000 }, | ||
484 | { 0x31b6, 0x0000 }, | ||
485 | { 0x31b7, 0xf000 }, | ||
486 | { 0x31b8, 0x0000 }, | ||
487 | { 0x31b9, 0xf000 }, | ||
488 | { 0x31ba, 0x0000 }, | ||
489 | { 0x31bb, 0xf000 }, | ||
490 | { 0x31bc, 0x0000 }, | ||
491 | { 0x31bd, 0xf000 }, | ||
492 | { 0x31be, 0x0000 }, | ||
493 | { 0x31bf, 0xf000 }, | ||
494 | { 0x31c0, 0x0000 }, | ||
495 | { 0x31c1, 0xf000 }, | ||
496 | { 0x31c2, 0x0000 }, | ||
497 | { 0x31c3, 0xf000 }, | ||
498 | { 0x31c4, 0x0000 }, | ||
499 | { 0x31c5, 0xf000 }, | ||
500 | { 0x31c6, 0x0000 }, | ||
501 | { 0x31c7, 0xf000 }, | ||
502 | { 0x31c8, 0x0000 }, | ||
503 | { 0x31c9, 0xf000 }, | ||
504 | { 0x31ca, 0x0000 }, | ||
505 | { 0x31cb, 0xf000 }, | ||
506 | { 0x31cc, 0x0000 }, | ||
507 | { 0x31cd, 0xf000 }, | ||
508 | { 0x31ce, 0x0000 }, | ||
509 | { 0x31cf, 0xf000 }, | ||
510 | { 0x31d0, 0x0000 }, | ||
511 | { 0x31d1, 0xf000 }, | ||
512 | { 0x31d2, 0x0000 }, | ||
513 | { 0x31d3, 0xf000 }, | ||
514 | { 0x31d4, 0x0000 }, | ||
515 | { 0x31d5, 0xf000 }, | ||
516 | { 0x31d6, 0x0000 }, | ||
517 | { 0x31d7, 0xf000 }, | ||
518 | { 0x31d8, 0x0000 }, | ||
519 | { 0x31d9, 0xf000 }, | ||
520 | { 0x31da, 0x0000 }, | ||
521 | { 0x31db, 0xf000 }, | ||
522 | { 0x31dc, 0x0000 }, | ||
523 | { 0x31dd, 0xf000 }, | ||
524 | { 0x31de, 0x0000 }, | ||
525 | { 0x31df, 0xf000 }, | ||
526 | { 0x31e0, 0x0000 }, | ||
527 | { 0x31e1, 0xf000 }, | ||
528 | { 0x31e2, 0x0000 }, | ||
529 | { 0x31e3, 0xf000 }, | ||
530 | { 0x31e4, 0x0000 }, | ||
531 | { 0x31e5, 0xf000 }, | ||
532 | { 0x31e6, 0x0000 }, | ||
533 | { 0x31e7, 0xf000 }, | ||
534 | { 0x31e8, 0x0000 }, | ||
535 | { 0x31e9, 0xf000 }, | ||
536 | { 0x31ea, 0x0000 }, | ||
537 | { 0x31eb, 0xf000 }, | ||
538 | { 0x31ec, 0x0000 }, | ||
539 | { 0x31ed, 0xf000 }, | ||
540 | { 0x31ee, 0x0000 }, | ||
541 | { 0x31ef, 0xf000 }, | ||
542 | { 0x31f0, 0x0000 }, | ||
543 | { 0x31f1, 0xf000 }, | ||
544 | { 0x31f2, 0x0000 }, | ||
545 | { 0x31f3, 0xf000 }, | ||
546 | { 0x31f4, 0x0000 }, | ||
547 | { 0x31f5, 0xf000 }, | ||
548 | { 0x31f6, 0x0000 }, | ||
549 | { 0x31f7, 0xf000 }, | ||
550 | { 0x31f8, 0x0000 }, | ||
551 | { 0x31f9, 0xf000 }, | ||
552 | { 0x31fa, 0x0000 }, | ||
553 | { 0x31fb, 0xf000 }, | ||
554 | { 0x31fc, 0x0000 }, | ||
555 | { 0x31fd, 0xf000 }, | ||
556 | { 0x31fe, 0x0000 }, | ||
557 | { 0x31ff, 0xf000 }, | ||
558 | { 0x024d, 0xff50 }, | ||
559 | { 0x0252, 0xff50 }, | ||
560 | { 0x0259, 0x0112 }, | ||
561 | { 0x025e, 0x0112 }, | ||
562 | }; | ||
563 | |||
564 | static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, | ||
565 | struct snd_kcontrol *kcontrol, int event) | ||
566 | { | ||
567 | struct snd_soc_codec *codec = w->codec; | ||
568 | struct arizona *arizona = dev_get_drvdata(codec->dev); | ||
569 | struct regmap *regmap = codec->control_data; | ||
570 | const struct reg_default *patch = NULL; | ||
571 | int i, patch_size; | ||
572 | |||
573 | switch (arizona->rev) { | ||
574 | case 0: | ||
575 | patch = wm5102_sysclk_reva_patch; | ||
576 | patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch); | ||
577 | break; | ||
578 | } | ||
579 | |||
580 | switch (event) { | ||
581 | case SND_SOC_DAPM_POST_PMU: | ||
582 | if (patch) | ||
583 | for (i = 0; i < patch_size; i++) | ||
584 | regmap_write(regmap, patch[i].reg, | ||
585 | patch[i].def); | ||
586 | break; | ||
587 | |||
588 | default: | ||
589 | break; | ||
590 | } | ||
591 | |||
592 | return 0; | ||
593 | } | ||
594 | |||
45 | static const struct snd_kcontrol_new wm5102_snd_controls[] = { | 595 | static const struct snd_kcontrol_new wm5102_snd_controls[] = { |
46 | SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, | 596 | SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, |
47 | ARIZONA_IN1_OSR_SHIFT, 1, 0), | 597 | ARIZONA_IN1_OSR_SHIFT, 1, 0), |
@@ -297,7 +847,7 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux = | |||
297 | 847 | ||
298 | static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = { | 848 | static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = { |
299 | SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, | 849 | SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, |
300 | 0, NULL, 0), | 850 | 0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU), |
301 | SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, | 851 | SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, |
302 | ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), | 852 | ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), |
303 | SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, | 853 | SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, |
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 5421fd9fbcb5..4c0a8e496131 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c | |||
@@ -782,7 +782,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream, | |||
782 | wm8978->mclk_idx = -1; | 782 | wm8978->mclk_idx = -1; |
783 | f_sel = wm8978->f_mclk; | 783 | f_sel = wm8978->f_mclk; |
784 | } else { | 784 | } else { |
785 | if (!wm8978->f_pllout) { | 785 | if (!wm8978->f_opclk) { |
786 | /* We only enter here, if OPCLK is not used */ | 786 | /* We only enter here, if OPCLK is not used */ |
787 | int ret = wm8978_configure_pll(codec); | 787 | int ret = wm8978_configure_pll(codec); |
788 | if (ret < 0) | 788 | if (ret < 0) |
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index b9f16598324c..2ba08148655f 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c | |||
@@ -71,7 +71,6 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) | |||
71 | printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", | 71 | printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", |
72 | __func__, cause); | 72 | __func__, cause); |
73 | writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); | 73 | writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); |
74 | return IRQ_HANDLED; | ||
75 | } | 74 | } |
76 | 75 | ||
77 | /* we've enabled only bytes interrupts ... */ | 76 | /* we've enabled only bytes interrupts ... */ |
@@ -178,7 +177,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) | |||
178 | } | 177 | } |
179 | 178 | ||
180 | dram = mv_mbus_dram_info(); | 179 | dram = mv_mbus_dram_info(); |
181 | addr = virt_to_phys(substream->dma_buffer.area); | 180 | addr = substream->dma_buffer.addr; |
182 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 181 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
183 | prdata->play_stream = substream; | 182 | prdata->play_stream = substream; |
184 | kirkwood_dma_conf_mbus_windows(priv->io, | 183 | kirkwood_dma_conf_mbus_windows(priv->io, |
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 542538d10ab7..1d5db484d2df 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
@@ -95,7 +95,7 @@ static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate) | |||
95 | do { | 95 | do { |
96 | cpu_relax(); | 96 | cpu_relax(); |
97 | value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); | 97 | value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); |
98 | value &= KIRKWOOD_DCO_SPCR_STATUS; | 98 | value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK; |
99 | } while (value == 0); | 99 | } while (value == 0); |
100 | } | 100 | } |
101 | 101 | ||
@@ -180,67 +180,72 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream, | |||
180 | int cmd, struct snd_soc_dai *dai) | 180 | int cmd, struct snd_soc_dai *dai) |
181 | { | 181 | { |
182 | struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); | 182 | struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); |
183 | unsigned long value; | 183 | uint32_t ctl, value; |
184 | 184 | ||
185 | /* | 185 | ctl = readl(priv->io + KIRKWOOD_PLAYCTL); |
186 | * specs says KIRKWOOD_PLAYCTL must be read 2 times before | 186 | if (ctl & KIRKWOOD_PLAYCTL_PAUSE) { |
187 | * changing it. So read 1 time here and 1 later. | 187 | unsigned timeout = 5000; |
188 | */ | 188 | /* |
189 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 189 | * The Armada510 spec says that if we enter pause mode, the |
190 | * busy bit must be read back as clear _twice_. Make sure | ||
191 | * we respect that otherwise we get DMA underruns. | ||
192 | */ | ||
193 | do { | ||
194 | value = ctl; | ||
195 | ctl = readl(priv->io + KIRKWOOD_PLAYCTL); | ||
196 | if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)) | ||
197 | break; | ||
198 | udelay(1); | ||
199 | } while (timeout--); | ||
200 | |||
201 | if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY) | ||
202 | dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n", | ||
203 | ctl); | ||
204 | } | ||
190 | 205 | ||
191 | switch (cmd) { | 206 | switch (cmd) { |
192 | case SNDRV_PCM_TRIGGER_START: | 207 | case SNDRV_PCM_TRIGGER_START: |
193 | /* stop audio, enable interrupts */ | ||
194 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | ||
195 | value |= KIRKWOOD_PLAYCTL_PAUSE; | ||
196 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | ||
197 | |||
198 | value = readl(priv->io + KIRKWOOD_INT_MASK); | 208 | value = readl(priv->io + KIRKWOOD_INT_MASK); |
199 | value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES; | 209 | value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES; |
200 | writel(value, priv->io + KIRKWOOD_INT_MASK); | 210 | writel(value, priv->io + KIRKWOOD_INT_MASK); |
201 | 211 | ||
202 | /* configure audio & enable i2s playback */ | 212 | /* configure audio & enable i2s playback */ |
203 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 213 | ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK; |
204 | value &= ~KIRKWOOD_PLAYCTL_BURST_MASK; | 214 | ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE |
205 | value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE | ||
206 | | KIRKWOOD_PLAYCTL_SPDIF_EN); | 215 | | KIRKWOOD_PLAYCTL_SPDIF_EN); |
207 | 216 | ||
208 | if (priv->burst == 32) | 217 | if (priv->burst == 32) |
209 | value |= KIRKWOOD_PLAYCTL_BURST_32; | 218 | ctl |= KIRKWOOD_PLAYCTL_BURST_32; |
210 | else | 219 | else |
211 | value |= KIRKWOOD_PLAYCTL_BURST_128; | 220 | ctl |= KIRKWOOD_PLAYCTL_BURST_128; |
212 | value |= KIRKWOOD_PLAYCTL_I2S_EN; | 221 | ctl |= KIRKWOOD_PLAYCTL_I2S_EN; |
213 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | 222 | writel(ctl, priv->io + KIRKWOOD_PLAYCTL); |
214 | break; | 223 | break; |
215 | 224 | ||
216 | case SNDRV_PCM_TRIGGER_STOP: | 225 | case SNDRV_PCM_TRIGGER_STOP: |
217 | /* stop audio, disable interrupts */ | 226 | /* stop audio, disable interrupts */ |
218 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 227 | ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; |
219 | value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; | 228 | writel(ctl, priv->io + KIRKWOOD_PLAYCTL); |
220 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | ||
221 | 229 | ||
222 | value = readl(priv->io + KIRKWOOD_INT_MASK); | 230 | value = readl(priv->io + KIRKWOOD_INT_MASK); |
223 | value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES; | 231 | value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES; |
224 | writel(value, priv->io + KIRKWOOD_INT_MASK); | 232 | writel(value, priv->io + KIRKWOOD_INT_MASK); |
225 | 233 | ||
226 | /* disable all playbacks */ | 234 | /* disable all playbacks */ |
227 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 235 | ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); |
228 | value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); | 236 | writel(ctl, priv->io + KIRKWOOD_PLAYCTL); |
229 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | ||
230 | break; | 237 | break; |
231 | 238 | ||
232 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 239 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
233 | case SNDRV_PCM_TRIGGER_SUSPEND: | 240 | case SNDRV_PCM_TRIGGER_SUSPEND: |
234 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 241 | ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; |
235 | value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; | 242 | writel(ctl, priv->io + KIRKWOOD_PLAYCTL); |
236 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | ||
237 | break; | 243 | break; |
238 | 244 | ||
239 | case SNDRV_PCM_TRIGGER_RESUME: | 245 | case SNDRV_PCM_TRIGGER_RESUME: |
240 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 246 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
241 | value = readl(priv->io + KIRKWOOD_PLAYCTL); | 247 | ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); |
242 | value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); | 248 | writel(ctl, priv->io + KIRKWOOD_PLAYCTL); |
243 | writel(value, priv->io + KIRKWOOD_PLAYCTL); | ||
244 | break; | 249 | break; |
245 | 250 | ||
246 | default: | 251 | default: |
@@ -260,11 +265,6 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream, | |||
260 | 265 | ||
261 | switch (cmd) { | 266 | switch (cmd) { |
262 | case SNDRV_PCM_TRIGGER_START: | 267 | case SNDRV_PCM_TRIGGER_START: |
263 | /* stop audio, enable interrupts */ | ||
264 | value = readl(priv->io + KIRKWOOD_RECCTL); | ||
265 | value |= KIRKWOOD_RECCTL_PAUSE; | ||
266 | writel(value, priv->io + KIRKWOOD_RECCTL); | ||
267 | |||
268 | value = readl(priv->io + KIRKWOOD_INT_MASK); | 268 | value = readl(priv->io + KIRKWOOD_INT_MASK); |
269 | value |= KIRKWOOD_INT_CAUSE_REC_BYTES; | 269 | value |= KIRKWOOD_INT_CAUSE_REC_BYTES; |
270 | writel(value, priv->io + KIRKWOOD_INT_MASK); | 270 | writel(value, priv->io + KIRKWOOD_INT_MASK); |
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index aa037b292f3d..c294fbb523fc 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c | |||
@@ -523,16 +523,24 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd, | |||
523 | 523 | ||
524 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 524 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
525 | /* | 525 | /* |
526 | * write a data to saif data register to trigger | 526 | * write data to saif data register to trigger |
527 | * the transfer | 527 | * the transfer. |
528 | * For 24-bit format the 32-bit FIFO register stores | ||
529 | * only one channel, so we need to write twice. | ||
530 | * This is also safe for the other non 24-bit formats. | ||
528 | */ | 531 | */ |
529 | __raw_writel(0, saif->base + SAIF_DATA); | 532 | __raw_writel(0, saif->base + SAIF_DATA); |
533 | __raw_writel(0, saif->base + SAIF_DATA); | ||
530 | } else { | 534 | } else { |
531 | /* | 535 | /* |
532 | * read a data from saif data register to trigger | 536 | * read data from saif data register to trigger |
533 | * the receive | 537 | * the receive. |
538 | * For 24-bit format the 32-bit FIFO register stores | ||
539 | * only one channel, so we need to read twice. | ||
540 | * This is also safe for the other non 24-bit formats. | ||
534 | */ | 541 | */ |
535 | __raw_readl(saif->base + SAIF_DATA); | 542 | __raw_readl(saif->base + SAIF_DATA); |
543 | __raw_readl(saif->base + SAIF_DATA); | ||
536 | } | 544 | } |
537 | 545 | ||
538 | master_saif->ongoing = 1; | 546 | master_saif->ongoing = 1; |
@@ -812,3 +820,4 @@ module_platform_driver(mxs_saif_driver); | |||
812 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | 820 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); |
813 | MODULE_DESCRIPTION("MXS ASoC SAIF driver"); | 821 | MODULE_DESCRIPTION("MXS ASoC SAIF driver"); |
814 | MODULE_LICENSE("GPL"); | 822 | MODULE_LICENSE("GPL"); |
823 | MODULE_ALIAS("platform:mxs-saif"); | ||
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index e7b83179aca2..3c7c3a59ed39 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig | |||
@@ -207,6 +207,8 @@ config SND_SOC_BELLS | |||
207 | select SND_SOC_WM5102 | 207 | select SND_SOC_WM5102 |
208 | select SND_SOC_WM5110 | 208 | select SND_SOC_WM5110 |
209 | select SND_SOC_WM9081 | 209 | select SND_SOC_WM9081 |
210 | select SND_SOC_WM0010 | ||
211 | select SND_SOC_WM1250_EV1 | ||
210 | 212 | ||
211 | config SND_SOC_LOWLAND | 213 | config SND_SOC_LOWLAND |
212 | tristate "Audio support for Wolfson Lowland" | 214 | tristate "Audio support for Wolfson Lowland" |
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c index b0d46d63d55e..a2ca1567b9e4 100644 --- a/sound/soc/samsung/bells.c +++ b/sound/soc/samsung/bells.c | |||
@@ -212,7 +212,7 @@ static struct snd_soc_dai_link bells_dai_wm5102[] = { | |||
212 | { | 212 | { |
213 | .name = "Sub", | 213 | .name = "Sub", |
214 | .stream_name = "Sub", | 214 | .stream_name = "Sub", |
215 | .cpu_dai_name = "wm5110-aif3", | 215 | .cpu_dai_name = "wm5102-aif3", |
216 | .codec_dai_name = "wm9081-hifi", | 216 | .codec_dai_name = "wm9081-hifi", |
217 | .codec_name = "wm9081.1-006c", | 217 | .codec_name = "wm9081.1-006c", |
218 | .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | 218 | .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d1198627fc40..10d21be383f6 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -2786,8 +2786,9 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, | |||
2786 | val = (ucontrol->value.integer.value[0] + min) & mask; | 2786 | val = (ucontrol->value.integer.value[0] + min) & mask; |
2787 | val = val << shift; | 2787 | val = val << shift; |
2788 | 2788 | ||
2789 | if (snd_soc_update_bits_locked(codec, reg, val_mask, val)) | 2789 | err = snd_soc_update_bits_locked(codec, reg, val_mask, val); |
2790 | return err; | 2790 | if (err < 0) |
2791 | return err; | ||
2791 | 2792 | ||
2792 | if (snd_soc_volsw_is_stereo(mc)) { | 2793 | if (snd_soc_volsw_is_stereo(mc)) { |
2793 | val_mask = mask << rshift; | 2794 | val_mask = mask << rshift; |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index d0a4be38dc0f..6e35bcae02df 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -3745,7 +3745,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) | |||
3745 | { | 3745 | { |
3746 | struct snd_soc_codec *codec; | 3746 | struct snd_soc_codec *codec; |
3747 | 3747 | ||
3748 | list_for_each_entry(codec, &card->codec_dev_list, list) { | 3748 | list_for_each_entry(codec, &card->codec_dev_list, card_list) { |
3749 | soc_dapm_shutdown_codec(&codec->dapm); | 3749 | soc_dapm_shutdown_codec(&codec->dapm); |
3750 | if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) | 3750 | if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) |
3751 | snd_soc_dapm_set_bias_level(&codec->dapm, | 3751 | snd_soc_dapm_set_bias_level(&codec->dapm, |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 282f0fc9fed1..dbf7999d18b4 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -559,9 +559,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, | |||
559 | return; | 559 | return; |
560 | 560 | ||
561 | card = chip->card; | 561 | card = chip->card; |
562 | mutex_lock(®ister_mutex); | ||
563 | down_write(&chip->shutdown_rwsem); | 562 | down_write(&chip->shutdown_rwsem); |
564 | chip->shutdown = 1; | 563 | chip->shutdown = 1; |
564 | up_write(&chip->shutdown_rwsem); | ||
565 | |||
566 | mutex_lock(®ister_mutex); | ||
565 | chip->num_interfaces--; | 567 | chip->num_interfaces--; |
566 | if (chip->num_interfaces <= 0) { | 568 | if (chip->num_interfaces <= 0) { |
567 | snd_card_disconnect(card); | 569 | snd_card_disconnect(card); |
@@ -582,11 +584,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, | |||
582 | snd_usb_mixer_disconnect(p); | 584 | snd_usb_mixer_disconnect(p); |
583 | } | 585 | } |
584 | usb_chip[chip->index] = NULL; | 586 | usb_chip[chip->index] = NULL; |
585 | up_write(&chip->shutdown_rwsem); | ||
586 | mutex_unlock(®ister_mutex); | 587 | mutex_unlock(®ister_mutex); |
587 | snd_card_free_when_closed(card); | 588 | snd_card_free_when_closed(card); |
588 | } else { | 589 | } else { |
589 | up_write(&chip->shutdown_rwsem); | ||
590 | mutex_unlock(®ister_mutex); | 590 | mutex_unlock(®ister_mutex); |
591 | } | 591 | } |
592 | } | 592 | } |
diff --git a/sound/usb/midi.c b/sound/usb/midi.c index c83f6143c0eb..eeefbce3873c 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c | |||
@@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint { | |||
148 | struct snd_usb_midi_out_endpoint* ep; | 148 | struct snd_usb_midi_out_endpoint* ep; |
149 | struct snd_rawmidi_substream *substream; | 149 | struct snd_rawmidi_substream *substream; |
150 | int active; | 150 | int active; |
151 | bool autopm_reference; | ||
151 | uint8_t cable; /* cable number << 4 */ | 152 | uint8_t cable; /* cable number << 4 */ |
152 | uint8_t state; | 153 | uint8_t state; |
153 | #define STATE_UNKNOWN 0 | 154 | #define STATE_UNKNOWN 0 |
@@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) | |||
1076 | return -ENXIO; | 1077 | return -ENXIO; |
1077 | } | 1078 | } |
1078 | err = usb_autopm_get_interface(umidi->iface); | 1079 | err = usb_autopm_get_interface(umidi->iface); |
1079 | if (err < 0) | 1080 | port->autopm_reference = err >= 0; |
1081 | if (err < 0 && err != -EACCES) | ||
1080 | return -EIO; | 1082 | return -EIO; |
1081 | substream->runtime->private_data = port; | 1083 | substream->runtime->private_data = port; |
1082 | port->state = STATE_UNKNOWN; | 1084 | port->state = STATE_UNKNOWN; |
@@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) | |||
1087 | static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) | 1089 | static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) |
1088 | { | 1090 | { |
1089 | struct snd_usb_midi* umidi = substream->rmidi->private_data; | 1091 | struct snd_usb_midi* umidi = substream->rmidi->private_data; |
1092 | struct usbmidi_out_port *port = substream->runtime->private_data; | ||
1090 | 1093 | ||
1091 | substream_open(substream, 0); | 1094 | substream_open(substream, 0); |
1092 | usb_autopm_put_interface(umidi->iface); | 1095 | if (port->autopm_reference) |
1096 | usb_autopm_put_interface(umidi->iface); | ||
1093 | return 0; | 1097 | return 0; |
1094 | } | 1098 | } |
1095 | 1099 | ||
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 5c12a3fe8c3e..ef6fa24fc473 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -459,7 +459,7 @@ static int configure_endpoint(struct snd_usb_substream *subs) | |||
459 | return ret; | 459 | return ret; |
460 | 460 | ||
461 | if (subs->sync_endpoint) | 461 | if (subs->sync_endpoint) |
462 | ret = snd_usb_endpoint_set_params(subs->data_endpoint, | 462 | ret = snd_usb_endpoint_set_params(subs->sync_endpoint, |
463 | subs->pcm_format, | 463 | subs->pcm_format, |
464 | subs->channels, | 464 | subs->channels, |
465 | subs->period_bytes, | 465 | subs->period_bytes, |
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore index 8a83dd2ffc11..d42073f12609 100644 --- a/tools/power/cpupower/.gitignore +++ b/tools/power/cpupower/.gitignore | |||
@@ -20,3 +20,10 @@ utils/cpufreq-set.o | |||
20 | utils/cpufreq-aperf.o | 20 | utils/cpufreq-aperf.o |
21 | cpupower | 21 | cpupower |
22 | bench/cpufreq-bench | 22 | bench/cpufreq-bench |
23 | debug/kernel/Module.symvers | ||
24 | debug/i386/centrino-decode | ||
25 | debug/i386/dump_psb | ||
26 | debug/i386/intel_gsic | ||
27 | debug/i386/powernow-k8-decode | ||
28 | debug/x86_64/centrino-decode | ||
29 | debug/x86_64/powernow-k8-decode | ||
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index cf397bd26d0c..d875a74a3bdf 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -253,7 +253,8 @@ clean: | |||
253 | | xargs rm -f | 253 | | xargs rm -f |
254 | -rm -f $(OUTPUT)cpupower | 254 | -rm -f $(OUTPUT)cpupower |
255 | -rm -f $(OUTPUT)libcpupower.so* | 255 | -rm -f $(OUTPUT)libcpupower.so* |
256 | -rm -rf $(OUTPUT)po/*.{gmo,pot} | 256 | -rm -rf $(OUTPUT)po/*.gmo |
257 | -rm -rf $(OUTPUT)po/*.pot | ||
257 | $(MAKE) -C bench O=$(OUTPUT) clean | 258 | $(MAKE) -C bench O=$(OUTPUT) clean |
258 | 259 | ||
259 | 260 | ||
diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile index 3ba158f0e287..c05cc0ac80c7 100644 --- a/tools/power/cpupower/debug/i386/Makefile +++ b/tools/power/cpupower/debug/i386/Makefile | |||
@@ -26,7 +26,10 @@ $(OUTPUT)powernow-k8-decode: powernow-k8-decode.c | |||
26 | all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode | 26 | all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode |
27 | 27 | ||
28 | clean: | 28 | clean: |
29 | rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode} | 29 | rm -rf $(OUTPUT)centrino-decode |
30 | rm -rf $(OUTPUT)dump_psb | ||
31 | rm -rf $(OUTPUT)intel_gsic | ||
32 | rm -rf $(OUTPUT)powernow-k8-decode | ||
30 | 33 | ||
31 | install: | 34 | install: |
32 | $(INSTALL) -d $(DESTDIR)${bindir} | 35 | $(INSTALL) -d $(DESTDIR)${bindir} |
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1 index 1141c2073719..e01c35d13b6e 100644 --- a/tools/power/cpupower/man/cpupower-monitor.1 +++ b/tools/power/cpupower/man/cpupower-monitor.1 | |||
@@ -7,11 +7,11 @@ cpupower\-monitor \- Report processor frequency and idle statistics | |||
7 | .RB "\-l" | 7 | .RB "\-l" |
8 | 8 | ||
9 | .B cpupower monitor | 9 | .B cpupower monitor |
10 | .RB [ "\-m <mon1>," [ "<mon2>,..." ] ] | 10 | .RB [ -c ] [ "\-m <mon1>," [ "<mon2>,..." ] ] |
11 | .RB [ "\-i seconds" ] | 11 | .RB [ "\-i seconds" ] |
12 | .br | 12 | .br |
13 | .B cpupower monitor | 13 | .B cpupower monitor |
14 | .RB [ "\-m <mon1>," [ "<mon2>,..." ] ] | 14 | .RB [ -c ][ "\-m <mon1>," [ "<mon2>,..." ] ] |
15 | .RB command | 15 | .RB command |
16 | .br | 16 | .br |
17 | .SH DESCRIPTION | 17 | .SH DESCRIPTION |
@@ -64,6 +64,17 @@ Only display specific monitors. Use the monitor string(s) provided by \-l option | |||
64 | Measure intervall. | 64 | Measure intervall. |
65 | .RE | 65 | .RE |
66 | .PP | 66 | .PP |
67 | \-c | ||
68 | .RS 4 | ||
69 | Schedule the process on every core before starting and ending measuring. | ||
70 | This could be needed for the Idle_Stats monitor when no other MSR based | ||
71 | monitor (has to be run on the core that is measured) is run in parallel. | ||
72 | This is to wake up the processors from deeper sleep states and let the | ||
73 | kernel re | ||
74 | -account its cpuidle (C-state) information before reading the | ||
75 | cpuidle timings from sysfs. | ||
76 | .RE | ||
77 | .PP | ||
67 | command | 78 | command |
68 | .RS 4 | 79 | .RS 4 |
69 | Measure idle and frequency characteristics of an arbitrary command/workload. | 80 | Measure idle and frequency characteristics of an arbitrary command/workload. |
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 906895d21cce..93b0aa74ca03 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c | |||
@@ -158,6 +158,8 @@ out: | |||
158 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; | 158 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; |
159 | case 0x2A: /* SNB */ | 159 | case 0x2A: /* SNB */ |
160 | case 0x2D: /* SNB Xeon */ | 160 | case 0x2D: /* SNB Xeon */ |
161 | case 0x3A: /* IVB */ | ||
162 | case 0x3E: /* IVB Xeon */ | ||
161 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; | 163 | cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; |
162 | cpu_info->caps |= CPUPOWER_CAP_IS_SNB; | 164 | cpu_info->caps |= CPUPOWER_CAP_IS_SNB; |
163 | break; | 165 | break; |
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 2eb584cf2f55..aa9e95486a2d 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h | |||
@@ -92,6 +92,14 @@ extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info); | |||
92 | extern struct cpupower_cpu_info cpupower_cpu_info; | 92 | extern struct cpupower_cpu_info cpupower_cpu_info; |
93 | /* cpuid and cpuinfo helpers **************************/ | 93 | /* cpuid and cpuinfo helpers **************************/ |
94 | 94 | ||
95 | struct cpuid_core_info { | ||
96 | int pkg; | ||
97 | int core; | ||
98 | int cpu; | ||
99 | |||
100 | /* flags */ | ||
101 | unsigned int is_online:1; | ||
102 | }; | ||
95 | 103 | ||
96 | /* CPU topology/hierarchy parsing ******************/ | 104 | /* CPU topology/hierarchy parsing ******************/ |
97 | struct cpupower_topology { | 105 | struct cpupower_topology { |
@@ -101,18 +109,12 @@ struct cpupower_topology { | |||
101 | unsigned int threads; /* per core */ | 109 | unsigned int threads; /* per core */ |
102 | 110 | ||
103 | /* Array gets mallocated with cores entries, holding per core info */ | 111 | /* Array gets mallocated with cores entries, holding per core info */ |
104 | struct { | 112 | struct cpuid_core_info *core_info; |
105 | int pkg; | ||
106 | int core; | ||
107 | int cpu; | ||
108 | |||
109 | /* flags */ | ||
110 | unsigned int is_online:1; | ||
111 | } *core_info; | ||
112 | }; | 113 | }; |
113 | 114 | ||
114 | extern int get_cpu_topology(struct cpupower_topology *cpu_top); | 115 | extern int get_cpu_topology(struct cpupower_topology *cpu_top); |
115 | extern void cpu_topology_release(struct cpupower_topology cpu_top); | 116 | extern void cpu_topology_release(struct cpupower_topology cpu_top); |
117 | |||
116 | /* CPU topology/hierarchy parsing ******************/ | 118 | /* CPU topology/hierarchy parsing ******************/ |
117 | 119 | ||
118 | /* X86 ONLY ****************************************/ | 120 | /* X86 ONLY ****************************************/ |
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 96e28c124b5c..38ab91629463 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c | |||
@@ -37,25 +37,6 @@ unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) | |||
37 | return (unsigned int) numread; | 37 | return (unsigned int) numread; |
38 | } | 38 | } |
39 | 39 | ||
40 | static unsigned int sysfs_write_file(const char *path, | ||
41 | const char *value, size_t len) | ||
42 | { | ||
43 | int fd; | ||
44 | ssize_t numwrite; | ||
45 | |||
46 | fd = open(path, O_WRONLY); | ||
47 | if (fd == -1) | ||
48 | return 0; | ||
49 | |||
50 | numwrite = write(fd, value, len); | ||
51 | if (numwrite < 1) { | ||
52 | close(fd); | ||
53 | return 0; | ||
54 | } | ||
55 | close(fd); | ||
56 | return (unsigned int) numwrite; | ||
57 | } | ||
58 | |||
59 | /* | 40 | /* |
60 | * Detect whether a CPU is online | 41 | * Detect whether a CPU is online |
61 | * | 42 | * |
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 4eae2c47ba48..c13120af519b 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c | |||
@@ -20,9 +20,8 @@ | |||
20 | #include <helpers/sysfs.h> | 20 | #include <helpers/sysfs.h> |
21 | 21 | ||
22 | /* returns -1 on failure, 0 on success */ | 22 | /* returns -1 on failure, 0 on success */ |
23 | int sysfs_topology_read_file(unsigned int cpu, const char *fname) | 23 | static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) |
24 | { | 24 | { |
25 | unsigned long value; | ||
26 | char linebuf[MAX_LINE_LEN]; | 25 | char linebuf[MAX_LINE_LEN]; |
27 | char *endp; | 26 | char *endp; |
28 | char path[SYSFS_PATH_MAX]; | 27 | char path[SYSFS_PATH_MAX]; |
@@ -31,20 +30,12 @@ int sysfs_topology_read_file(unsigned int cpu, const char *fname) | |||
31 | cpu, fname); | 30 | cpu, fname); |
32 | if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) | 31 | if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) |
33 | return -1; | 32 | return -1; |
34 | value = strtoul(linebuf, &endp, 0); | 33 | *result = strtol(linebuf, &endp, 0); |
35 | if (endp == linebuf || errno == ERANGE) | 34 | if (endp == linebuf || errno == ERANGE) |
36 | return -1; | 35 | return -1; |
37 | return value; | 36 | return 0; |
38 | } | 37 | } |
39 | 38 | ||
40 | struct cpuid_core_info { | ||
41 | unsigned int pkg; | ||
42 | unsigned int thread; | ||
43 | unsigned int cpu; | ||
44 | /* flags */ | ||
45 | unsigned int is_online:1; | ||
46 | }; | ||
47 | |||
48 | static int __compare(const void *t1, const void *t2) | 39 | static int __compare(const void *t1, const void *t2) |
49 | { | 40 | { |
50 | struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; | 41 | struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; |
@@ -53,9 +44,9 @@ static int __compare(const void *t1, const void *t2) | |||
53 | return -1; | 44 | return -1; |
54 | else if (top1->pkg > top2->pkg) | 45 | else if (top1->pkg > top2->pkg) |
55 | return 1; | 46 | return 1; |
56 | else if (top1->thread < top2->thread) | 47 | else if (top1->core < top2->core) |
57 | return -1; | 48 | return -1; |
58 | else if (top1->thread > top2->thread) | 49 | else if (top1->core > top2->core) |
59 | return 1; | 50 | return 1; |
60 | else if (top1->cpu < top2->cpu) | 51 | else if (top1->cpu < top2->cpu) |
61 | return -1; | 52 | return -1; |
@@ -73,28 +64,42 @@ static int __compare(const void *t1, const void *t2) | |||
73 | */ | 64 | */ |
74 | int get_cpu_topology(struct cpupower_topology *cpu_top) | 65 | int get_cpu_topology(struct cpupower_topology *cpu_top) |
75 | { | 66 | { |
76 | int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF); | 67 | int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); |
77 | 68 | ||
78 | cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus); | 69 | cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); |
79 | if (cpu_top->core_info == NULL) | 70 | if (cpu_top->core_info == NULL) |
80 | return -ENOMEM; | 71 | return -ENOMEM; |
81 | cpu_top->pkgs = cpu_top->cores = 0; | 72 | cpu_top->pkgs = cpu_top->cores = 0; |
82 | for (cpu = 0; cpu < cpus; cpu++) { | 73 | for (cpu = 0; cpu < cpus; cpu++) { |
83 | cpu_top->core_info[cpu].cpu = cpu; | 74 | cpu_top->core_info[cpu].cpu = cpu; |
84 | cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); | 75 | cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); |
85 | cpu_top->core_info[cpu].pkg = | 76 | if(sysfs_topology_read_file( |
86 | sysfs_topology_read_file(cpu, "physical_package_id"); | 77 | cpu, |
87 | if ((int)cpu_top->core_info[cpu].pkg != -1 && | 78 | "physical_package_id", |
88 | cpu_top->core_info[cpu].pkg > cpu_top->pkgs) | 79 | &(cpu_top->core_info[cpu].pkg)) < 0) |
89 | cpu_top->pkgs = cpu_top->core_info[cpu].pkg; | 80 | return -1; |
90 | cpu_top->core_info[cpu].core = | 81 | if(sysfs_topology_read_file( |
91 | sysfs_topology_read_file(cpu, "core_id"); | 82 | cpu, |
83 | "core_id", | ||
84 | &(cpu_top->core_info[cpu].core)) < 0) | ||
85 | return -1; | ||
92 | } | 86 | } |
93 | cpu_top->pkgs++; | ||
94 | 87 | ||
95 | qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), | 88 | qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), |
96 | __compare); | 89 | __compare); |
97 | 90 | ||
91 | /* Count the number of distinct pkgs values. This works | ||
92 | because the primary sort of the core_info struct was just | ||
93 | done by pkg value. */ | ||
94 | last_pkg = cpu_top->core_info[0].pkg; | ||
95 | for(cpu = 1; cpu < cpus; cpu++) { | ||
96 | if(cpu_top->core_info[cpu].pkg != last_pkg) { | ||
97 | last_pkg = cpu_top->core_info[cpu].pkg; | ||
98 | cpu_top->pkgs++; | ||
99 | } | ||
100 | } | ||
101 | cpu_top->pkgs++; | ||
102 | |||
98 | /* Intel's cores count is not consecutively numbered, there may | 103 | /* Intel's cores count is not consecutively numbered, there may |
99 | * be a core_id of 3, but none of 2. Assume there always is 0 | 104 | * be a core_id of 3, but none of 2. Assume there always is 0 |
100 | * Get amount of cores by counting duplicates in a package | 105 | * Get amount of cores by counting duplicates in a package |
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index 0d6571e418db..c4bae9203a69 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c | |||
@@ -39,6 +39,7 @@ static int mode; | |||
39 | static int interval = 1; | 39 | static int interval = 1; |
40 | static char *show_monitors_param; | 40 | static char *show_monitors_param; |
41 | static struct cpupower_topology cpu_top; | 41 | static struct cpupower_topology cpu_top; |
42 | static unsigned int wake_cpus; | ||
42 | 43 | ||
43 | /* ToDo: Document this in the manpage */ | 44 | /* ToDo: Document this in the manpage */ |
44 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; | 45 | static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; |
@@ -84,7 +85,7 @@ int fill_string_with_spaces(char *s, int n) | |||
84 | void print_header(int topology_depth) | 85 | void print_header(int topology_depth) |
85 | { | 86 | { |
86 | int unsigned mon; | 87 | int unsigned mon; |
87 | int state, need_len, pr_mon_len; | 88 | int state, need_len; |
88 | cstate_t s; | 89 | cstate_t s; |
89 | char buf[128] = ""; | 90 | char buf[128] = ""; |
90 | int percent_width = 4; | 91 | int percent_width = 4; |
@@ -93,7 +94,6 @@ void print_header(int topology_depth) | |||
93 | printf("%s|", buf); | 94 | printf("%s|", buf); |
94 | 95 | ||
95 | for (mon = 0; mon < avail_monitors; mon++) { | 96 | for (mon = 0; mon < avail_monitors; mon++) { |
96 | pr_mon_len = 0; | ||
97 | need_len = monitors[mon]->hw_states_num * (percent_width + 3) | 97 | need_len = monitors[mon]->hw_states_num * (percent_width + 3) |
98 | - 1; | 98 | - 1; |
99 | if (mon != 0) { | 99 | if (mon != 0) { |
@@ -315,16 +315,28 @@ int fork_it(char **argv) | |||
315 | int do_interval_measure(int i) | 315 | int do_interval_measure(int i) |
316 | { | 316 | { |
317 | unsigned int num; | 317 | unsigned int num; |
318 | int cpu; | ||
319 | |||
320 | if (wake_cpus) | ||
321 | for (cpu = 0; cpu < cpu_count; cpu++) | ||
322 | bind_cpu(cpu); | ||
318 | 323 | ||
319 | for (num = 0; num < avail_monitors; num++) { | 324 | for (num = 0; num < avail_monitors; num++) { |
320 | dprint("HW C-state residency monitor: %s - States: %d\n", | 325 | dprint("HW C-state residency monitor: %s - States: %d\n", |
321 | monitors[num]->name, monitors[num]->hw_states_num); | 326 | monitors[num]->name, monitors[num]->hw_states_num); |
322 | monitors[num]->start(); | 327 | monitors[num]->start(); |
323 | } | 328 | } |
329 | |||
324 | sleep(i); | 330 | sleep(i); |
331 | |||
332 | if (wake_cpus) | ||
333 | for (cpu = 0; cpu < cpu_count; cpu++) | ||
334 | bind_cpu(cpu); | ||
335 | |||
325 | for (num = 0; num < avail_monitors; num++) | 336 | for (num = 0; num < avail_monitors; num++) |
326 | monitors[num]->stop(); | 337 | monitors[num]->stop(); |
327 | 338 | ||
339 | |||
328 | return 0; | 340 | return 0; |
329 | } | 341 | } |
330 | 342 | ||
@@ -333,7 +345,7 @@ static void cmdline(int argc, char *argv[]) | |||
333 | int opt; | 345 | int opt; |
334 | progname = basename(argv[0]); | 346 | progname = basename(argv[0]); |
335 | 347 | ||
336 | while ((opt = getopt(argc, argv, "+li:m:")) != -1) { | 348 | while ((opt = getopt(argc, argv, "+lci:m:")) != -1) { |
337 | switch (opt) { | 349 | switch (opt) { |
338 | case 'l': | 350 | case 'l': |
339 | if (mode) | 351 | if (mode) |
@@ -352,6 +364,9 @@ static void cmdline(int argc, char *argv[]) | |||
352 | mode = show; | 364 | mode = show; |
353 | show_monitors_param = optarg; | 365 | show_monitors_param = optarg; |
354 | break; | 366 | break; |
367 | case 'c': | ||
368 | wake_cpus = 1; | ||
369 | break; | ||
355 | default: | 370 | default: |
356 | print_wrong_arg_exit(); | 371 | print_wrong_arg_exit(); |
357 | } | 372 | } |
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h index 9312ee1f2dbc..9e43f3371fbc 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h | |||
@@ -65,4 +65,21 @@ extern long long timespec_diff_us(struct timespec start, struct timespec end); | |||
65 | "could be inaccurate\n"), mes, ov); \ | 65 | "could be inaccurate\n"), mes, ov); \ |
66 | } | 66 | } |
67 | 67 | ||
68 | |||
69 | /* Taken over from x86info project sources -> return 0 on success */ | ||
70 | #include <sched.h> | ||
71 | #include <sys/types.h> | ||
72 | #include <unistd.h> | ||
73 | static inline int bind_cpu(int cpu) | ||
74 | { | ||
75 | cpu_set_t set; | ||
76 | |||
77 | if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { | ||
78 | CPU_ZERO(&set); | ||
79 | CPU_SET(cpu, &set); | ||
80 | return sched_setaffinity(getpid(), sizeof(set), &set); | ||
81 | } | ||
82 | return 1; | ||
83 | } | ||
84 | |||
68 | #endif /* __CPUIDLE_INFO_HW__ */ | 85 | #endif /* __CPUIDLE_INFO_HW__ */ |
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c index a1bc07cd53e1..a99b43b97d6d 100644 --- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c | |||
@@ -150,9 +150,15 @@ static struct cpuidle_monitor *snb_register(void) | |||
150 | || cpupower_cpu_info.family != 6) | 150 | || cpupower_cpu_info.family != 6) |
151 | return NULL; | 151 | return NULL; |
152 | 152 | ||
153 | if (cpupower_cpu_info.model != 0x2A | 153 | switch (cpupower_cpu_info.model) { |
154 | && cpupower_cpu_info.model != 0x2D) | 154 | case 0x2A: /* SNB */ |
155 | case 0x2D: /* SNB Xeon */ | ||
156 | case 0x3A: /* IVB */ | ||
157 | case 0x3E: /* IVB Xeon */ | ||
158 | break; | ||
159 | default: | ||
155 | return NULL; | 160 | return NULL; |
161 | } | ||
156 | 162 | ||
157 | is_valid = calloc(cpu_count, sizeof(int)); | 163 | is_valid = calloc(cpu_count, sizeof(int)); |
158 | for (num = 0; num < SNB_CSTATE_COUNT; num++) { | 164 | for (num = 0; num < SNB_CSTATE_COUNT; num++) { |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 2655ae9a3ad8..ea095abbe97e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -206,8 +206,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) | |||
206 | retval = pread(fd, msr, sizeof *msr, offset); | 206 | retval = pread(fd, msr, sizeof *msr, offset); |
207 | close(fd); | 207 | close(fd); |
208 | 208 | ||
209 | if (retval != sizeof *msr) | 209 | if (retval != sizeof *msr) { |
210 | fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset); | ||
210 | return -1; | 211 | return -1; |
212 | } | ||
211 | 213 | ||
212 | return 0; | 214 | return 0; |
213 | } | 215 | } |
@@ -1101,7 +1103,9 @@ void turbostat_loop() | |||
1101 | 1103 | ||
1102 | restart: | 1104 | restart: |
1103 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); | 1105 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); |
1104 | if (retval) { | 1106 | if (retval < -1) { |
1107 | exit(retval); | ||
1108 | } else if (retval == -1) { | ||
1105 | re_initialize(); | 1109 | re_initialize(); |
1106 | goto restart; | 1110 | goto restart; |
1107 | } | 1111 | } |
@@ -1114,7 +1118,9 @@ restart: | |||
1114 | } | 1118 | } |
1115 | sleep(interval_sec); | 1119 | sleep(interval_sec); |
1116 | retval = for_all_cpus(get_counters, ODD_COUNTERS); | 1120 | retval = for_all_cpus(get_counters, ODD_COUNTERS); |
1117 | if (retval) { | 1121 | if (retval < -1) { |
1122 | exit(retval); | ||
1123 | } else if (retval == -1) { | ||
1118 | re_initialize(); | 1124 | re_initialize(); |
1119 | goto restart; | 1125 | goto restart; |
1120 | } | 1126 | } |
@@ -1126,7 +1132,9 @@ restart: | |||
1126 | flush_stdout(); | 1132 | flush_stdout(); |
1127 | sleep(interval_sec); | 1133 | sleep(interval_sec); |
1128 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); | 1134 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); |
1129 | if (retval) { | 1135 | if (retval < -1) { |
1136 | exit(retval); | ||
1137 | } else if (retval == -1) { | ||
1130 | re_initialize(); | 1138 | re_initialize(); |
1131 | goto restart; | 1139 | goto restart; |
1132 | } | 1140 | } |
@@ -1545,8 +1553,11 @@ void turbostat_init() | |||
1545 | int fork_it(char **argv) | 1553 | int fork_it(char **argv) |
1546 | { | 1554 | { |
1547 | pid_t child_pid; | 1555 | pid_t child_pid; |
1556 | int status; | ||
1548 | 1557 | ||
1549 | for_all_cpus(get_counters, EVEN_COUNTERS); | 1558 | status = for_all_cpus(get_counters, EVEN_COUNTERS); |
1559 | if (status) | ||
1560 | exit(status); | ||
1550 | /* clear affinity side-effect of get_counters() */ | 1561 | /* clear affinity side-effect of get_counters() */ |
1551 | sched_setaffinity(0, cpu_present_setsize, cpu_present_set); | 1562 | sched_setaffinity(0, cpu_present_setsize, cpu_present_set); |
1552 | gettimeofday(&tv_even, (struct timezone *)NULL); | 1563 | gettimeofday(&tv_even, (struct timezone *)NULL); |
@@ -1556,7 +1567,6 @@ int fork_it(char **argv) | |||
1556 | /* child */ | 1567 | /* child */ |
1557 | execvp(argv[0], argv); | 1568 | execvp(argv[0], argv); |
1558 | } else { | 1569 | } else { |
1559 | int status; | ||
1560 | 1570 | ||
1561 | /* parent */ | 1571 | /* parent */ |
1562 | if (child_pid == -1) { | 1572 | if (child_pid == -1) { |
@@ -1568,7 +1578,7 @@ int fork_it(char **argv) | |||
1568 | signal(SIGQUIT, SIG_IGN); | 1578 | signal(SIGQUIT, SIG_IGN); |
1569 | if (waitpid(child_pid, &status, 0) == -1) { | 1579 | if (waitpid(child_pid, &status, 0) == -1) { |
1570 | perror("wait"); | 1580 | perror("wait"); |
1571 | exit(1); | 1581 | exit(status); |
1572 | } | 1582 | } |
1573 | } | 1583 | } |
1574 | /* | 1584 | /* |
@@ -1585,7 +1595,7 @@ int fork_it(char **argv) | |||
1585 | 1595 | ||
1586 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); | 1596 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); |
1587 | 1597 | ||
1588 | return 0; | 1598 | return status; |
1589 | } | 1599 | } |
1590 | 1600 | ||
1591 | void cmdline(int argc, char **argv) | 1601 | void cmdline(int argc, char **argv) |
@@ -1594,7 +1604,7 @@ void cmdline(int argc, char **argv) | |||
1594 | 1604 | ||
1595 | progname = argv[0]; | 1605 | progname = argv[0]; |
1596 | 1606 | ||
1597 | while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) { | 1607 | while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) { |
1598 | switch (opt) { | 1608 | switch (opt) { |
1599 | case 'p': | 1609 | case 'p': |
1600 | show_core_only++; | 1610 | show_core_only++; |