diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 22:48:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 22:48:26 -0500 |
commit | 2af78448fff61e13392daf4f770cfbcf9253316a (patch) | |
tree | 6c0494284dd1dd737d5f76ee19c553618e8d0e54 | |
parent | 5e04f4b4290e03deb91b074087ae8d7c169d947d (diff) | |
parent | f5b6d45f8cf688f51140fd21f1da3b90562762a9 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
Pull thermal management updates from Zhang Rui:
"Highlights:
- introduction of Dove thermal sensor driver.
- introduction of Kirkwood thermal sensor driver.
- introduction of intel_powerclamp thermal cooling device driver.
- add interrupt and DT support for rcar thermal driver.
- add thermal emulation support which allows platform thermal driver
to do software/hardware emulation for thermal issues."
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux: (36 commits)
thermal: rcar: remove __devinitconst
thermal: return an error on failure to register thermal class
Thermal: rename thermal governor Kconfig option to avoid generic naming
thermal: exynos: Use the new thermal trend type for quick cooling action.
Thermal: exynos: Add support for temperature falling interrupt.
Thermal: Dove: Add Themal sensor support for Dove.
thermal: Add support for the thermal sensor on Kirkwood SoCs
thermal: rcar: add Device Tree support
thermal: rcar: remove machine_power_off() from rcar_thermal_notify()
thermal: rcar: add interrupt support
thermal: rcar: add read/write functions for common/priv data
thermal: rcar: multi channel support
thermal: rcar: use mutex lock instead of spin lock
thermal: rcar: enable CPCTL to use hardware TSC deciding
thermal: rcar: use parenthesis on macro
Thermal: fix a build warning when CONFIG_THERMAL_EMULATION cleared
Thermal: fix a wrong comment
thermal: sysfs: Add a new sysfs node emul_temp for thermal emulation
PM: intel_powerclamp: off by one in start_power_clamp()
thermal: exynos: Miscellaneous fixes to support falling threshold interrupt
...
23 files changed, 2337 insertions, 269 deletions
diff --git a/Documentation/devicetree/bindings/thermal/dove-thermal.txt b/Documentation/devicetree/bindings/thermal/dove-thermal.txt new file mode 100644 index 000000000000..6f474677d472 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/dove-thermal.txt | |||
@@ -0,0 +1,18 @@ | |||
1 | * Dove Thermal | ||
2 | |||
3 | This driver is for Dove SoCs which contain a thermal sensor. | ||
4 | |||
5 | Required properties: | ||
6 | - compatible : "marvell,dove-thermal" | ||
7 | - reg : Address range of the thermal registers | ||
8 | |||
9 | The reg properties should contain two ranges. The first is for the | ||
10 | three Thermal Manager registers, while the second range contains the | ||
11 | Thermal Diode Control Registers. | ||
12 | |||
13 | Example: | ||
14 | |||
15 | thermal@10078 { | ||
16 | compatible = "marvell,dove-thermal"; | ||
17 | reg = <0xd001c 0x0c>, <0xd005c 0x08>; | ||
18 | }; | ||
diff --git a/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt b/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt new file mode 100644 index 000000000000..8c0f5eb86da7 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt | |||
@@ -0,0 +1,15 @@ | |||
1 | * Kirkwood Thermal | ||
2 | |||
3 | This version is for Kirkwood 88F8262 & 88F6283 SoCs. Other kirkwoods | ||
4 | don't contain a thermal sensor. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible : "marvell,kirkwood-thermal" | ||
8 | - reg : Address range of the thermal registers | ||
9 | |||
10 | Example: | ||
11 | |||
12 | thermal@10078 { | ||
13 | compatible = "marvell,kirkwood-thermal"; | ||
14 | reg = <0x10078 0x4>; | ||
15 | }; | ||
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt new file mode 100644 index 000000000000..28ef498a66e5 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | * Renesas R-Car Thermal | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "renesas,rcar-thermal" | ||
5 | - reg : Address range of the thermal registers. | ||
6 | The 1st reg will be recognized as common register | ||
7 | if it has "interrupts". | ||
8 | |||
9 | Option properties: | ||
10 | |||
11 | - interrupts : use interrupt | ||
12 | |||
13 | Example (non interrupt support): | ||
14 | |||
15 | thermal@e61f0100 { | ||
16 | compatible = "renesas,rcar-thermal"; | ||
17 | reg = <0xe61f0100 0x38>; | ||
18 | }; | ||
19 | |||
20 | Example (interrupt support): | ||
21 | |||
22 | thermal@e61f0000 { | ||
23 | compatible = "renesas,rcar-thermal"; | ||
24 | reg = <0xe61f0000 0x14 | ||
25 | 0xe61f0100 0x38 | ||
26 | 0xe61f0200 0x38 | ||
27 | 0xe61f0300 0x38>; | ||
28 | interrupts = <0 69 4>; | ||
29 | }; | ||
diff --git a/Documentation/thermal/exynos_thermal_emulation b/Documentation/thermal/exynos_thermal_emulation new file mode 100644 index 000000000000..b73bbfb697bb --- /dev/null +++ b/Documentation/thermal/exynos_thermal_emulation | |||
@@ -0,0 +1,53 @@ | |||
1 | EXYNOS EMULATION MODE | ||
2 | ======================== | ||
3 | |||
4 | Copyright (C) 2012 Samsung Electronics | ||
5 | |||
6 | Written by Jonghwa Lee <jonghwa3.lee@samsung.com> | ||
7 | |||
8 | Description | ||
9 | ----------- | ||
10 | |||
11 | Exynos 4x12 (4212, 4412) and 5 series provide emulation mode for thermal management unit. | ||
12 | Thermal emulation mode supports software debug for TMU's operation. User can set temperature | ||
13 | manually with software code and TMU will read current temperature from user value not from | ||
14 | sensor's value. | ||
15 | |||
16 | Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available. | ||
17 | When it's enabled, sysfs node will be created under | ||
18 | /sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'. | ||
19 | |||
20 | The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any | ||
21 | temperature you want to update to sysfs node, it automatically enable emulation mode and | ||
22 | current temperature will be changed into it. | ||
23 | (Exynos also supports user changable delay time which would be used to delay of | ||
24 | changing temperature. However, this node only uses same delay of real sensing time, 938us.) | ||
25 | |||
26 | Exynos emulation mode requires synchronous of value changing and enabling. It means when you | ||
27 | want to update the any value of delay or next temperature, then you have to enable emulation | ||
28 | mode at the same time. (Or you have to keep the mode enabling.) If you don't, it fails to | ||
29 | change the value to updated one and just use last succeessful value repeatedly. That's why | ||
30 | this node gives users the right to change termerpature only. Just one interface makes it more | ||
31 | simply to use. | ||
32 | |||
33 | Disabling emulation mode only requires writing value 0 to sysfs node. | ||
34 | |||
35 | |||
36 | TEMP 120 | | ||
37 | | | ||
38 | 100 | | ||
39 | | | ||
40 | 80 | | ||
41 | | +----------- | ||
42 | 60 | | | | ||
43 | | +-------------| | | ||
44 | 40 | | | | | ||
45 | | | | | | ||
46 | 20 | | | +---------- | ||
47 | | | | | | | ||
48 | 0 |______________|_____________|__________|__________|_________ | ||
49 | A A A A TIME | ||
50 | |<----->| |<----->| |<----->| | | ||
51 | | 938us | | | | | | | ||
52 | emulation : 0 50 | 70 | 20 | 0 | ||
53 | current temp : sensor 50 70 20 sensor | ||
diff --git a/Documentation/thermal/intel_powerclamp.txt b/Documentation/thermal/intel_powerclamp.txt new file mode 100644 index 000000000000..332de4a39b5a --- /dev/null +++ b/Documentation/thermal/intel_powerclamp.txt | |||
@@ -0,0 +1,307 @@ | |||
1 | ======================= | ||
2 | INTEL POWERCLAMP DRIVER | ||
3 | ======================= | ||
4 | By: Arjan van de Ven <arjan@linux.intel.com> | ||
5 | Jacob Pan <jacob.jun.pan@linux.intel.com> | ||
6 | |||
7 | Contents: | ||
8 | (*) Introduction | ||
9 | - Goals and Objectives | ||
10 | |||
11 | (*) Theory of Operation | ||
12 | - Idle Injection | ||
13 | - Calibration | ||
14 | |||
15 | (*) Performance Analysis | ||
16 | - Effectiveness and Limitations | ||
17 | - Power vs Performance | ||
18 | - Scalability | ||
19 | - Calibration | ||
20 | - Comparison with Alternative Techniques | ||
21 | |||
22 | (*) Usage and Interfaces | ||
23 | - Generic Thermal Layer (sysfs) | ||
24 | - Kernel APIs (TBD) | ||
25 | |||
26 | ============ | ||
27 | INTRODUCTION | ||
28 | ============ | ||
29 | |||
30 | Consider the situation where a system’s power consumption must be | ||
31 | reduced at runtime, due to power budget, thermal constraint, or noise | ||
32 | level, and where active cooling is not preferred. Software managed | ||
33 | passive power reduction must be performed to prevent the hardware | ||
34 | actions that are designed for catastrophic scenarios. | ||
35 | |||
36 | Currently, P-states, T-states (clock modulation), and CPU offlining | ||
37 | are used for CPU throttling. | ||
38 | |||
39 | On Intel CPUs, C-states provide effective power reduction, but so far | ||
40 | they’re only used opportunistically, based on workload. With the | ||
41 | development of intel_powerclamp driver, the method of synchronizing | ||
42 | idle injection across all online CPU threads was introduced. The goal | ||
43 | is to achieve forced and controllable C-state residency. | ||
44 | |||
45 | Test/Analysis has been made in the areas of power, performance, | ||
46 | scalability, and user experience. In many cases, clear advantage is | ||
47 | shown over taking the CPU offline or modulating the CPU clock. | ||
48 | |||
49 | |||
50 | =================== | ||
51 | THEORY OF OPERATION | ||
52 | =================== | ||
53 | |||
54 | Idle Injection | ||
55 | -------------- | ||
56 | |||
57 | On modern Intel processors (Nehalem or later), package level C-state | ||
58 | residency is available in MSRs, thus also available to the kernel. | ||
59 | |||
60 | These MSRs are: | ||
61 | #define MSR_PKG_C2_RESIDENCY 0x60D | ||
62 | #define MSR_PKG_C3_RESIDENCY 0x3F8 | ||
63 | #define MSR_PKG_C6_RESIDENCY 0x3F9 | ||
64 | #define MSR_PKG_C7_RESIDENCY 0x3FA | ||
65 | |||
66 | If the kernel can also inject idle time to the system, then a | ||
67 | closed-loop control system can be established that manages package | ||
68 | level C-state. The intel_powerclamp driver is conceived as such a | ||
69 | control system, where the target set point is a user-selected idle | ||
70 | ratio (based on power reduction), and the error is the difference | ||
71 | between the actual package level C-state residency ratio and the target idle | ||
72 | ratio. | ||
73 | |||
74 | Injection is controlled by high priority kernel threads, spawned for | ||
75 | each online CPU. | ||
76 | |||
77 | These kernel threads, with SCHED_FIFO class, are created to perform | ||
78 | clamping actions of controlled duty ratio and duration. Each per-CPU | ||
79 | thread synchronizes its idle time and duration, based on the rounding | ||
80 | of jiffies, so accumulated errors can be prevented to avoid a jittery | ||
81 | effect. Threads are also bound to the CPU such that they cannot be | ||
82 | migrated, unless the CPU is taken offline. In this case, threads | ||
83 | belong to the offlined CPUs will be terminated immediately. | ||
84 | |||
85 | Running as SCHED_FIFO and relatively high priority, also allows such | ||
86 | scheme to work for both preemptable and non-preemptable kernels. | ||
87 | Alignment of idle time around jiffies ensures scalability for HZ | ||
88 | values. This effect can be better visualized using a Perf timechart. | ||
89 | The following diagram shows the behavior of kernel thread | ||
90 | kidle_inject/cpu. During idle injection, it runs monitor/mwait idle | ||
91 | for a given "duration", then relinquishes the CPU to other tasks, | ||
92 | until the next time interval. | ||
93 | |||
94 | The NOHZ schedule tick is disabled during idle time, but interrupts | ||
95 | are not masked. Tests show that the extra wakeups from scheduler tick | ||
96 | have a dramatic impact on the effectiveness of the powerclamp driver | ||
97 | on large scale systems (Westmere system with 80 processors). | ||
98 | |||
99 | CPU0 | ||
100 | ____________ ____________ | ||
101 | kidle_inject/0 | sleep | mwait | sleep | | ||
102 | _________| |________| |_______ | ||
103 | duration | ||
104 | CPU1 | ||
105 | ____________ ____________ | ||
106 | kidle_inject/1 | sleep | mwait | sleep | | ||
107 | _________| |________| |_______ | ||
108 | ^ | ||
109 | | | ||
110 | | | ||
111 | roundup(jiffies, interval) | ||
112 | |||
113 | Only one CPU is allowed to collect statistics and update global | ||
114 | control parameters. This CPU is referred to as the controlling CPU in | ||
115 | this document. The controlling CPU is elected at runtime, with a | ||
116 | policy that favors BSP, taking into account the possibility of a CPU | ||
117 | hot-plug. | ||
118 | |||
119 | In terms of dynamics of the idle control system, package level idle | ||
120 | time is considered largely as a non-causal system where its behavior | ||
121 | cannot be based on the past or current input. Therefore, the | ||
122 | intel_powerclamp driver attempts to enforce the desired idle time | ||
123 | instantly as given input (target idle ratio). After injection, | ||
124 | powerclamp moniors the actual idle for a given time window and adjust | ||
125 | the next injection accordingly to avoid over/under correction. | ||
126 | |||
127 | When used in a causal control system, such as a temperature control, | ||
128 | it is up to the user of this driver to implement algorithms where | ||
129 | past samples and outputs are included in the feedback. For example, a | ||
130 | PID-based thermal controller can use the powerclamp driver to | ||
131 | maintain a desired target temperature, based on integral and | ||
132 | derivative gains of the past samples. | ||
133 | |||
134 | |||
135 | |||
136 | Calibration | ||
137 | ----------- | ||
138 | During scalability testing, it is observed that synchronized actions | ||
139 | among CPUs become challenging as the number of cores grows. This is | ||
140 | also true for the ability of a system to enter package level C-states. | ||
141 | |||
142 | To make sure the intel_powerclamp driver scales well, online | ||
143 | calibration is implemented. The goals for doing such a calibration | ||
144 | are: | ||
145 | |||
146 | a) determine the effective range of idle injection ratio | ||
147 | b) determine the amount of compensation needed at each target ratio | ||
148 | |||
149 | Compensation to each target ratio consists of two parts: | ||
150 | |||
151 | a) steady state error compensation | ||
152 | This is to offset the error occurring when the system can | ||
153 | enter idle without extra wakeups (such as external interrupts). | ||
154 | |||
155 | b) dynamic error compensation | ||
156 | When an excessive amount of wakeups occurs during idle, an | ||
157 | additional idle ratio can be added to quiet interrupts, by | ||
158 | slowing down CPU activities. | ||
159 | |||
160 | A debugfs file is provided for the user to examine compensation | ||
161 | progress and results, such as on a Westmere system. | ||
162 | [jacob@nex01 ~]$ cat | ||
163 | /sys/kernel/debug/intel_powerclamp/powerclamp_calib | ||
164 | controlling cpu: 0 | ||
165 | pct confidence steady dynamic (compensation) | ||
166 | 0 0 0 0 | ||
167 | 1 1 0 0 | ||
168 | 2 1 1 0 | ||
169 | 3 3 1 0 | ||
170 | 4 3 1 0 | ||
171 | 5 3 1 0 | ||
172 | 6 3 1 0 | ||
173 | 7 3 1 0 | ||
174 | 8 3 1 0 | ||
175 | ... | ||
176 | 30 3 2 0 | ||
177 | 31 3 2 0 | ||
178 | 32 3 1 0 | ||
179 | 33 3 2 0 | ||
180 | 34 3 1 0 | ||
181 | 35 3 2 0 | ||
182 | 36 3 1 0 | ||
183 | 37 3 2 0 | ||
184 | 38 3 1 0 | ||
185 | 39 3 2 0 | ||
186 | 40 3 3 0 | ||
187 | 41 3 1 0 | ||
188 | 42 3 2 0 | ||
189 | 43 3 1 0 | ||
190 | 44 3 1 0 | ||
191 | 45 3 2 0 | ||
192 | 46 3 3 0 | ||
193 | 47 3 0 0 | ||
194 | 48 3 2 0 | ||
195 | 49 3 3 0 | ||
196 | |||
197 | Calibration occurs during runtime. No offline method is available. | ||
198 | Steady state compensation is used only when confidence levels of all | ||
199 | adjacent ratios have reached satisfactory level. A confidence level | ||
200 | is accumulated based on clean data collected at runtime. Data | ||
201 | collected during a period without extra interrupts is considered | ||
202 | clean. | ||
203 | |||
204 | To compensate for excessive amounts of wakeup during idle, additional | ||
205 | idle time is injected when such a condition is detected. Currently, | ||
206 | we have a simple algorithm to double the injection ratio. A possible | ||
207 | enhancement might be to throttle the offending IRQ, such as delaying | ||
208 | EOI for level triggered interrupts. But it is a challenge to be | ||
209 | non-intrusive to the scheduler or the IRQ core code. | ||
210 | |||
211 | |||
212 | CPU Online/Offline | ||
213 | ------------------ | ||
214 | Per-CPU kernel threads are started/stopped upon receiving | ||
215 | notifications of CPU hotplug activities. The intel_powerclamp driver | ||
216 | keeps track of clamping kernel threads, even after they are migrated | ||
217 | to other CPUs, after a CPU offline event. | ||
218 | |||
219 | |||
220 | ===================== | ||
221 | Performance Analysis | ||
222 | ===================== | ||
223 | This section describes the general performance data collected on | ||
224 | multiple systems, including Westmere (80P) and Ivy Bridge (4P, 8P). | ||
225 | |||
226 | Effectiveness and Limitations | ||
227 | ----------------------------- | ||
228 | The maximum range that idle injection is allowed is capped at 50 | ||
229 | percent. As mentioned earlier, since interrupts are allowed during | ||
230 | forced idle time, excessive interrupts could result in less | ||
231 | effectiveness. The extreme case would be doing a ping -f to generated | ||
232 | flooded network interrupts without much CPU acknowledgement. In this | ||
233 | case, little can be done from the idle injection threads. In most | ||
234 | normal cases, such as scp a large file, applications can be throttled | ||
235 | by the powerclamp driver, since slowing down the CPU also slows down | ||
236 | network protocol processing, which in turn reduces interrupts. | ||
237 | |||
238 | When control parameters change at runtime by the controlling CPU, it | ||
239 | may take an additional period for the rest of the CPUs to catch up | ||
240 | with the changes. During this time, idle injection is out of sync, | ||
241 | thus not able to enter package C- states at the expected ratio. But | ||
242 | this effect is minor, in that in most cases change to the target | ||
243 | ratio is updated much less frequently than the idle injection | ||
244 | frequency. | ||
245 | |||
246 | Scalability | ||
247 | ----------- | ||
248 | Tests also show a minor, but measurable, difference between the 4P/8P | ||
249 | Ivy Bridge system and the 80P Westmere server under 50% idle ratio. | ||
250 | More compensation is needed on Westmere for the same amount of | ||
251 | target idle ratio. The compensation also increases as the idle ratio | ||
252 | gets larger. The above reason constitutes the need for the | ||
253 | calibration code. | ||
254 | |||
255 | On the IVB 8P system, compared to an offline CPU, powerclamp can | ||
256 | achieve up to 40% better performance per watt. (measured by a spin | ||
257 | counter summed over per CPU counting threads spawned for all running | ||
258 | CPUs). | ||
259 | |||
260 | ==================== | ||
261 | Usage and Interfaces | ||
262 | ==================== | ||
263 | The powerclamp driver is registered to the generic thermal layer as a | ||
264 | cooling device. Currently, it’s not bound to any thermal zones. | ||
265 | |||
266 | jacob@chromoly:/sys/class/thermal/cooling_device14$ grep . * | ||
267 | cur_state:0 | ||
268 | max_state:50 | ||
269 | type:intel_powerclamp | ||
270 | |||
271 | Example usage: | ||
272 | - To inject 25% idle time | ||
273 | $ sudo sh -c "echo 25 > /sys/class/thermal/cooling_device80/cur_state | ||
274 | " | ||
275 | |||
276 | If the system is not busy and has more than 25% idle time already, | ||
277 | then the powerclamp driver will not start idle injection. Using Top | ||
278 | will not show idle injection kernel threads. | ||
279 | |||
280 | If the system is busy (spin test below) and has less than 25% natural | ||
281 | idle time, powerclamp kernel threads will do idle injection, which | ||
282 | appear running to the scheduler. But the overall system idle is still | ||
283 | reflected. In this example, 24.1% idle is shown. This helps the | ||
284 | system admin or user determine the cause of slowdown, when a | ||
285 | powerclamp driver is in action. | ||
286 | |||
287 | |||
288 | Tasks: 197 total, 1 running, 196 sleeping, 0 stopped, 0 zombie | ||
289 | Cpu(s): 71.2%us, 4.7%sy, 0.0%ni, 24.1%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st | ||
290 | Mem: 3943228k total, 1689632k used, 2253596k free, 74960k buffers | ||
291 | Swap: 4087804k total, 0k used, 4087804k free, 945336k cached | ||
292 | |||
293 | PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND | ||
294 | 3352 jacob 20 0 262m 644 428 S 286 0.0 0:17.16 spin | ||
295 | 3341 root -51 0 0 0 0 D 25 0.0 0:01.62 kidle_inject/0 | ||
296 | 3344 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/3 | ||
297 | 3342 root -51 0 0 0 0 D 25 0.0 0:01.61 kidle_inject/1 | ||
298 | 3343 root -51 0 0 0 0 D 25 0.0 0:01.60 kidle_inject/2 | ||
299 | 2935 jacob 20 0 696m 125m 35m S 5 3.3 0:31.11 firefox | ||
300 | 1546 root 20 0 158m 20m 6640 S 3 0.5 0:26.97 Xorg | ||
301 | 2100 jacob 20 0 1223m 88m 30m S 3 2.3 0:23.68 compiz | ||
302 | |||
303 | Tests have shown that by using the powerclamp driver as a cooling | ||
304 | device, a PID based userspace thermal controller can manage to | ||
305 | control CPU temperature effectively, when no other thermal influence | ||
306 | is added. For example, a UltraBook user can compile the kernel under | ||
307 | certain temperature (below most active trip points). | ||
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt index 88c02334e356..6859661c9d31 100644 --- a/Documentation/thermal/sysfs-api.txt +++ b/Documentation/thermal/sysfs-api.txt | |||
@@ -55,6 +55,8 @@ temperature) and throttle appropriate devices. | |||
55 | .get_trip_type: get the type of certain trip point. | 55 | .get_trip_type: get the type of certain trip point. |
56 | .get_trip_temp: get the temperature above which the certain trip point | 56 | .get_trip_temp: get the temperature above which the certain trip point |
57 | will be fired. | 57 | will be fired. |
58 | .set_emul_temp: set the emulation temperature which helps in debugging | ||
59 | different threshold temperature points. | ||
58 | 60 | ||
59 | 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz) | 61 | 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz) |
60 | 62 | ||
@@ -153,6 +155,7 @@ Thermal zone device sys I/F, created once it's registered: | |||
153 | |---trip_point_[0-*]_temp: Trip point temperature | 155 | |---trip_point_[0-*]_temp: Trip point temperature |
154 | |---trip_point_[0-*]_type: Trip point type | 156 | |---trip_point_[0-*]_type: Trip point type |
155 | |---trip_point_[0-*]_hyst: Hysteresis value for this trip point | 157 | |---trip_point_[0-*]_hyst: Hysteresis value for this trip point |
158 | |---emul_temp: Emulated temperature set node | ||
156 | 159 | ||
157 | Thermal cooling device sys I/F, created once it's registered: | 160 | Thermal cooling device sys I/F, created once it's registered: |
158 | /sys/class/thermal/cooling_device[0-*]: | 161 | /sys/class/thermal/cooling_device[0-*]: |
@@ -252,6 +255,16 @@ passive | |||
252 | Valid values: 0 (disabled) or greater than 1000 | 255 | Valid values: 0 (disabled) or greater than 1000 |
253 | RW, Optional | 256 | RW, Optional |
254 | 257 | ||
258 | emul_temp | ||
259 | Interface to set the emulated temperature method in thermal zone | ||
260 | (sensor). After setting this temperature, the thermal zone may pass | ||
261 | this temperature to platform emulation function if registered or | ||
262 | cache it locally. This is useful in debugging different temperature | ||
263 | threshold and its associated cooling action. This is write only node | ||
264 | and writing 0 on this node should disable emulation. | ||
265 | Unit: millidegree Celsius | ||
266 | WO, Optional | ||
267 | |||
255 | ***************************** | 268 | ***************************** |
256 | * Cooling device attributes * | 269 | * Cooling device attributes * |
257 | ***************************** | 270 | ***************************** |
@@ -329,8 +342,9 @@ The framework includes a simple notification mechanism, in the form of a | |||
329 | netlink event. Netlink socket initialization is done during the _init_ | 342 | netlink event. Netlink socket initialization is done during the _init_ |
330 | of the framework. Drivers which intend to use the notification mechanism | 343 | of the framework. Drivers which intend to use the notification mechanism |
331 | just need to call thermal_generate_netlink_event() with two arguments viz | 344 | just need to call thermal_generate_netlink_event() with two arguments viz |
332 | (originator, event). Typically the originator will be an integer assigned | 345 | (originator, event). The originator is a pointer to struct thermal_zone_device |
333 | to a thermal_zone_device when it registers itself with the framework. The | 346 | from where the event has been originated. An integer which represents the |
347 | thermal zone device will be used in the message to identify the zone. The | ||
334 | event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL, | 348 | event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL, |
335 | THERMAL_DEV_FAULT}. Notification can be sent when the current temperature | 349 | THERMAL_DEV_FAULT}. Notification can be sent when the current temperature |
336 | crosses any of the configured thresholds. | 350 | crosses any of the configured thresholds. |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index f84f5c57de35..60308053fdb2 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -509,3 +509,4 @@ void local_touch_nmi(void) | |||
509 | { | 509 | { |
510 | __this_cpu_write(last_nmi_rip, 0); | 510 | __this_cpu_write(last_nmi_rip, 0); |
511 | } | 511 | } |
512 | EXPORT_SYMBOL_GPL(local_touch_nmi); | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c2c77d1ac499..a764f165b589 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -29,14 +29,14 @@ choice | |||
29 | 29 | ||
30 | config THERMAL_DEFAULT_GOV_STEP_WISE | 30 | config THERMAL_DEFAULT_GOV_STEP_WISE |
31 | bool "step_wise" | 31 | bool "step_wise" |
32 | select STEP_WISE | 32 | select THERMAL_GOV_STEP_WISE |
33 | help | 33 | help |
34 | Use the step_wise governor as default. This throttles the | 34 | Use the step_wise governor as default. This throttles the |
35 | devices one step at a time. | 35 | devices one step at a time. |
36 | 36 | ||
37 | config THERMAL_DEFAULT_GOV_FAIR_SHARE | 37 | config THERMAL_DEFAULT_GOV_FAIR_SHARE |
38 | bool "fair_share" | 38 | bool "fair_share" |
39 | select FAIR_SHARE | 39 | select THERMAL_GOV_FAIR_SHARE |
40 | help | 40 | help |
41 | Use the fair_share governor as default. This throttles the | 41 | Use the fair_share governor as default. This throttles the |
42 | devices based on their 'contribution' to a zone. The | 42 | devices based on their 'contribution' to a zone. The |
@@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE | |||
44 | 44 | ||
45 | config THERMAL_DEFAULT_GOV_USER_SPACE | 45 | config THERMAL_DEFAULT_GOV_USER_SPACE |
46 | bool "user_space" | 46 | bool "user_space" |
47 | select USER_SPACE | 47 | select THERMAL_GOV_USER_SPACE |
48 | help | 48 | help |
49 | Select this if you want to let the user space manage the | 49 | Select this if you want to let the user space manage the |
50 | lpatform thermals. | 50 | lpatform thermals. |
51 | 51 | ||
52 | endchoice | 52 | endchoice |
53 | 53 | ||
54 | config FAIR_SHARE | 54 | config THERMAL_GOV_FAIR_SHARE |
55 | bool "Fair-share thermal governor" | 55 | bool "Fair-share thermal governor" |
56 | help | 56 | help |
57 | Enable this to manage platform thermals using fair-share governor. | 57 | Enable this to manage platform thermals using fair-share governor. |
58 | 58 | ||
59 | config STEP_WISE | 59 | config THERMAL_GOV_STEP_WISE |
60 | bool "Step_wise thermal governor" | 60 | bool "Step_wise thermal governor" |
61 | help | 61 | help |
62 | Enable this to manage platform thermals using a simple linear | 62 | Enable this to manage platform thermals using a simple linear |
63 | 63 | ||
64 | config USER_SPACE | 64 | config THERMAL_GOV_USER_SPACE |
65 | bool "User_space thermal governor" | 65 | bool "User_space thermal governor" |
66 | help | 66 | help |
67 | Enable this to let the user space manage the platform thermals. | 67 | Enable this to let the user space manage the platform thermals. |
@@ -78,6 +78,14 @@ config CPU_THERMAL | |||
78 | and not the ACPI interface. | 78 | and not the ACPI interface. |
79 | If you want this support, you should say Y here. | 79 | If you want this support, you should say Y here. |
80 | 80 | ||
81 | config THERMAL_EMULATION | ||
82 | bool "Thermal emulation mode support" | ||
83 | help | ||
84 | Enable this option to make a emul_temp sysfs node in thermal zone | ||
85 | directory to support temperature emulation. With emulation sysfs node, | ||
86 | user can manually input temperature and test the different trip | ||
87 | threshold behaviour for simulation purpose. | ||
88 | |||
81 | config SPEAR_THERMAL | 89 | config SPEAR_THERMAL |
82 | bool "SPEAr thermal sensor driver" | 90 | bool "SPEAr thermal sensor driver" |
83 | depends on PLAT_SPEAR | 91 | depends on PLAT_SPEAR |
@@ -93,6 +101,14 @@ config RCAR_THERMAL | |||
93 | Enable this to plug the R-Car thermal sensor driver into the Linux | 101 | Enable this to plug the R-Car thermal sensor driver into the Linux |
94 | thermal framework | 102 | thermal framework |
95 | 103 | ||
104 | config KIRKWOOD_THERMAL | ||
105 | tristate "Temperature sensor on Marvell Kirkwood SoCs" | ||
106 | depends on ARCH_KIRKWOOD | ||
107 | depends on OF | ||
108 | help | ||
109 | Support for the Kirkwood thermal sensor driver into the Linux thermal | ||
110 | framework. Only kirkwood 88F6282 and 88F6283 have this sensor. | ||
111 | |||
96 | config EXYNOS_THERMAL | 112 | config EXYNOS_THERMAL |
97 | tristate "Temperature sensor on Samsung EXYNOS" | 113 | tristate "Temperature sensor on Samsung EXYNOS" |
98 | depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5) | 114 | depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5) |
@@ -101,6 +117,23 @@ config EXYNOS_THERMAL | |||
101 | If you say yes here you get support for TMU (Thermal Management | 117 | If you say yes here you get support for TMU (Thermal Management |
102 | Unit) on SAMSUNG EXYNOS series of SoC. | 118 | Unit) on SAMSUNG EXYNOS series of SoC. |
103 | 119 | ||
120 | config EXYNOS_THERMAL_EMUL | ||
121 | bool "EXYNOS TMU emulation mode support" | ||
122 | depends on EXYNOS_THERMAL | ||
123 | help | ||
124 | Exynos 4412 and 4414 and 5 series has emulation mode on TMU. | ||
125 | Enable this option will be make sysfs node in exynos thermal platform | ||
126 | device directory to support emulation mode. With emulation mode sysfs | ||
127 | node, you can manually input temperature to TMU for simulation purpose. | ||
128 | |||
129 | config DOVE_THERMAL | ||
130 | tristate "Temperature sensor on Marvell Dove SoCs" | ||
131 | depends on ARCH_DOVE | ||
132 | depends on OF | ||
133 | help | ||
134 | Support for the Dove thermal sensor driver in the Linux thermal | ||
135 | framework. | ||
136 | |||
104 | config DB8500_THERMAL | 137 | config DB8500_THERMAL |
105 | bool "DB8500 thermal management" | 138 | bool "DB8500 thermal management" |
106 | depends on ARCH_U8500 | 139 | depends on ARCH_U8500 |
@@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING | |||
122 | bound cpufreq cooling device turns active to set CPU frequency low to | 155 | bound cpufreq cooling device turns active to set CPU frequency low to |
123 | cool down the CPU. | 156 | cool down the CPU. |
124 | 157 | ||
158 | config INTEL_POWERCLAMP | ||
159 | tristate "Intel PowerClamp idle injection driver" | ||
160 | depends on THERMAL | ||
161 | depends on X86 | ||
162 | depends on CPU_SUP_INTEL | ||
163 | help | ||
164 | Enable this to enable Intel PowerClamp idle injection driver. This | ||
165 | enforce idle time which results in more package C-state residency. The | ||
166 | user interface is exposed via generic thermal framework. | ||
167 | |||
125 | endif | 168 | endif |
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index d8da683245fc..d3a2b38c31e8 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile | |||
@@ -5,9 +5,9 @@ | |||
5 | obj-$(CONFIG_THERMAL) += thermal_sys.o | 5 | obj-$(CONFIG_THERMAL) += thermal_sys.o |
6 | 6 | ||
7 | # governors | 7 | # governors |
8 | obj-$(CONFIG_FAIR_SHARE) += fair_share.o | 8 | obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o |
9 | obj-$(CONFIG_STEP_WISE) += step_wise.o | 9 | obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o |
10 | obj-$(CONFIG_USER_SPACE) += user_space.o | 10 | obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o |
11 | 11 | ||
12 | # cpufreq cooling | 12 | # cpufreq cooling |
13 | obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o | 13 | obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o |
@@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o | |||
15 | # platform thermal drivers | 15 | # platform thermal drivers |
16 | obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o | 16 | obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o |
17 | obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o | 17 | obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o |
18 | obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o | ||
18 | obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o | 19 | obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o |
20 | obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o | ||
19 | obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o | 21 | obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o |
20 | obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o | 22 | obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o |
23 | obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o | ||
24 | |||
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index c33fa5315d6b..8dc44cbb3e09 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -111,8 +111,8 @@ static int is_cpufreq_valid(int cpu) | |||
111 | /** | 111 | /** |
112 | * get_cpu_frequency - get the absolute value of frequency from level. | 112 | * get_cpu_frequency - get the absolute value of frequency from level. |
113 | * @cpu: cpu for which frequency is fetched. | 113 | * @cpu: cpu for which frequency is fetched. |
114 | * @level: level of frequency of the CPU | 114 | * @level: level of frequency, equals cooling state of cpu cooling device |
115 | * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc | 115 | * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc |
116 | */ | 116 | */ |
117 | static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) | 117 | static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) |
118 | { | 118 | { |
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c index 4cf8e72af90a..21419851fc02 100644 --- a/drivers/thermal/db8500_cpufreq_cooling.c +++ b/drivers/thermal/db8500_cpufreq_cooling.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/cpufreq.h> | 21 | #include <linux/cpufreq.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/of.h> | ||
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
26 | 27 | ||
@@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = { | |||
73 | { .compatible = "stericsson,db8500-cpufreq-cooling" }, | 74 | { .compatible = "stericsson,db8500-cpufreq-cooling" }, |
74 | {}, | 75 | {}, |
75 | }; | 76 | }; |
76 | #else | ||
77 | #define db8500_cpufreq_cooling_match NULL | ||
78 | #endif | 77 | #endif |
79 | 78 | ||
80 | static struct platform_driver db8500_cpufreq_cooling_driver = { | 79 | static struct platform_driver db8500_cpufreq_cooling_driver = { |
81 | .driver = { | 80 | .driver = { |
82 | .owner = THIS_MODULE, | 81 | .owner = THIS_MODULE, |
83 | .name = "db8500-cpufreq-cooling", | 82 | .name = "db8500-cpufreq-cooling", |
84 | .of_match_table = db8500_cpufreq_cooling_match, | 83 | .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match), |
85 | }, | 84 | }, |
86 | .probe = db8500_cpufreq_cooling_probe, | 85 | .probe = db8500_cpufreq_cooling_probe, |
87 | .suspend = db8500_cpufreq_cooling_suspend, | 86 | .suspend = db8500_cpufreq_cooling_suspend, |
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index ec71ade3e317..61ce60a35921 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c | |||
@@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = { | |||
508 | { .compatible = "stericsson,db8500-thermal" }, | 508 | { .compatible = "stericsson,db8500-thermal" }, |
509 | {}, | 509 | {}, |
510 | }; | 510 | }; |
511 | #else | ||
512 | #define db8500_thermal_match NULL | ||
513 | #endif | 511 | #endif |
514 | 512 | ||
515 | static struct platform_driver db8500_thermal_driver = { | 513 | static struct platform_driver db8500_thermal_driver = { |
516 | .driver = { | 514 | .driver = { |
517 | .owner = THIS_MODULE, | 515 | .owner = THIS_MODULE, |
518 | .name = "db8500-thermal", | 516 | .name = "db8500-thermal", |
519 | .of_match_table = db8500_thermal_match, | 517 | .of_match_table = of_match_ptr(db8500_thermal_match), |
520 | }, | 518 | }, |
521 | .probe = db8500_thermal_probe, | 519 | .probe = db8500_thermal_probe, |
522 | .suspend = db8500_thermal_suspend, | 520 | .suspend = db8500_thermal_suspend, |
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c new file mode 100644 index 000000000000..7b0bfa0e7a9c --- /dev/null +++ b/drivers/thermal/dove_thermal.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Dove thermal sensor driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch> | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/thermal.h> | ||
24 | |||
25 | #define DOVE_THERMAL_TEMP_OFFSET 1 | ||
26 | #define DOVE_THERMAL_TEMP_MASK 0x1FF | ||
27 | |||
28 | /* Dove Thermal Manager Control and Status Register */ | ||
29 | #define PMU_TM_DISABLE_OFFS 0 | ||
30 | #define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS) | ||
31 | |||
32 | /* Dove Theraml Diode Control 0 Register */ | ||
33 | #define PMU_TDC0_SW_RST_MASK (0x1 << 1) | ||
34 | #define PMU_TDC0_SEL_VCAL_OFFS 5 | ||
35 | #define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS) | ||
36 | #define PMU_TDC0_REF_CAL_CNT_OFFS 11 | ||
37 | #define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS) | ||
38 | #define PMU_TDC0_AVG_NUM_OFFS 25 | ||
39 | #define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS) | ||
40 | |||
41 | /* Dove Thermal Diode Control 1 Register */ | ||
42 | #define PMU_TEMP_DIOD_CTRL1_REG 0x04 | ||
43 | #define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10) | ||
44 | |||
45 | /* Dove Thermal Sensor Dev Structure */ | ||
46 | struct dove_thermal_priv { | ||
47 | void __iomem *sensor; | ||
48 | void __iomem *control; | ||
49 | }; | ||
50 | |||
51 | static int dove_init_sensor(const struct dove_thermal_priv *priv) | ||
52 | { | ||
53 | u32 reg; | ||
54 | u32 i; | ||
55 | |||
56 | /* Configure the Diode Control Register #0 */ | ||
57 | reg = readl_relaxed(priv->control); | ||
58 | |||
59 | /* Use average of 2 */ | ||
60 | reg &= ~PMU_TDC0_AVG_NUM_MASK; | ||
61 | reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS); | ||
62 | |||
63 | /* Reference calibration value */ | ||
64 | reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; | ||
65 | reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS); | ||
66 | |||
67 | /* Set the high level reference for calibration */ | ||
68 | reg &= ~PMU_TDC0_SEL_VCAL_MASK; | ||
69 | reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS); | ||
70 | writel(reg, priv->control); | ||
71 | |||
72 | /* Reset the sensor */ | ||
73 | reg = readl_relaxed(priv->control); | ||
74 | writel((reg | PMU_TDC0_SW_RST_MASK), priv->control); | ||
75 | writel(reg, priv->control); | ||
76 | |||
77 | /* Enable the sensor */ | ||
78 | reg = readl_relaxed(priv->sensor); | ||
79 | reg &= ~PMU_TM_DISABLE_MASK; | ||
80 | writel(reg, priv->sensor); | ||
81 | |||
82 | /* Poll the sensor for the first reading */ | ||
83 | for (i = 0; i < 1000000; i++) { | ||
84 | reg = readl_relaxed(priv->sensor); | ||
85 | if (reg & DOVE_THERMAL_TEMP_MASK) | ||
86 | break; | ||
87 | } | ||
88 | |||
89 | if (i == 1000000) | ||
90 | return -EIO; | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int dove_get_temp(struct thermal_zone_device *thermal, | ||
96 | unsigned long *temp) | ||
97 | { | ||
98 | unsigned long reg; | ||
99 | struct dove_thermal_priv *priv = thermal->devdata; | ||
100 | |||
101 | /* Valid check */ | ||
102 | reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG); | ||
103 | if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) { | ||
104 | dev_err(&thermal->device, | ||
105 | "Temperature sensor reading not valid\n"); | ||
106 | return -EIO; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Calculate temperature. See Section 8.10.1 of 88AP510, | ||
111 | * Documentation/arm/Marvell/README | ||
112 | */ | ||
113 | reg = readl_relaxed(priv->sensor); | ||
114 | reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK; | ||
115 | *temp = ((2281638UL - (7298*reg)) / 10); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static struct thermal_zone_device_ops ops = { | ||
121 | .get_temp = dove_get_temp, | ||
122 | }; | ||
123 | |||
124 | static const struct of_device_id dove_thermal_id_table[] = { | ||
125 | { .compatible = "marvell,dove-thermal" }, | ||
126 | {} | ||
127 | }; | ||
128 | |||
129 | static int dove_thermal_probe(struct platform_device *pdev) | ||
130 | { | ||
131 | struct thermal_zone_device *thermal = NULL; | ||
132 | struct dove_thermal_priv *priv; | ||
133 | struct resource *res; | ||
134 | int ret; | ||
135 | |||
136 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
137 | if (!res) { | ||
138 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
139 | return -ENODEV; | ||
140 | } | ||
141 | |||
142 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
143 | if (!priv) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | ||
147 | if (!priv->sensor) { | ||
148 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | ||
149 | return -EADDRNOTAVAIL; | ||
150 | } | ||
151 | |||
152 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
153 | if (!res) { | ||
154 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | priv->control = devm_request_and_ioremap(&pdev->dev, res); | ||
158 | if (!priv->control) { | ||
159 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | ||
160 | return -EADDRNOTAVAIL; | ||
161 | } | ||
162 | |||
163 | ret = dove_init_sensor(priv); | ||
164 | if (ret) { | ||
165 | dev_err(&pdev->dev, "Failed to initialize sensor\n"); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | thermal = thermal_zone_device_register("dove_thermal", 0, 0, | ||
170 | priv, &ops, NULL, 0, 0); | ||
171 | if (IS_ERR(thermal)) { | ||
172 | dev_err(&pdev->dev, | ||
173 | "Failed to register thermal zone device\n"); | ||
174 | return PTR_ERR(thermal); | ||
175 | } | ||
176 | |||
177 | platform_set_drvdata(pdev, thermal); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static int dove_thermal_exit(struct platform_device *pdev) | ||
183 | { | ||
184 | struct thermal_zone_device *dove_thermal = | ||
185 | platform_get_drvdata(pdev); | ||
186 | |||
187 | thermal_zone_device_unregister(dove_thermal); | ||
188 | platform_set_drvdata(pdev, NULL); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | MODULE_DEVICE_TABLE(of, dove_thermal_id_table); | ||
194 | |||
195 | static struct platform_driver dove_thermal_driver = { | ||
196 | .probe = dove_thermal_probe, | ||
197 | .remove = dove_thermal_exit, | ||
198 | .driver = { | ||
199 | .name = "dove_thermal", | ||
200 | .owner = THIS_MODULE, | ||
201 | .of_match_table = of_match_ptr(dove_thermal_id_table), | ||
202 | }, | ||
203 | }; | ||
204 | |||
205 | module_platform_driver(dove_thermal_driver); | ||
206 | |||
207 | MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); | ||
208 | MODULE_DESCRIPTION("Dove thermal driver"); | ||
209 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index bada1308318b..e04ebd8671ac 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c | |||
@@ -82,7 +82,7 @@ | |||
82 | 82 | ||
83 | #define EXYNOS_TRIMINFO_RELOAD 0x1 | 83 | #define EXYNOS_TRIMINFO_RELOAD 0x1 |
84 | #define EXYNOS_TMU_CLEAR_RISE_INT 0x111 | 84 | #define EXYNOS_TMU_CLEAR_RISE_INT 0x111 |
85 | #define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16) | 85 | #define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12) |
86 | #define EXYNOS_MUX_ADDR_VALUE 6 | 86 | #define EXYNOS_MUX_ADDR_VALUE 6 |
87 | #define EXYNOS_MUX_ADDR_SHIFT 20 | 87 | #define EXYNOS_MUX_ADDR_SHIFT 20 |
88 | #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 | 88 | #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 |
@@ -94,11 +94,20 @@ | |||
94 | #define SENSOR_NAME_LEN 16 | 94 | #define SENSOR_NAME_LEN 16 |
95 | #define MAX_TRIP_COUNT 8 | 95 | #define MAX_TRIP_COUNT 8 |
96 | #define MAX_COOLING_DEVICE 4 | 96 | #define MAX_COOLING_DEVICE 4 |
97 | #define MAX_THRESHOLD_LEVS 4 | ||
97 | 98 | ||
98 | #define ACTIVE_INTERVAL 500 | 99 | #define ACTIVE_INTERVAL 500 |
99 | #define IDLE_INTERVAL 10000 | 100 | #define IDLE_INTERVAL 10000 |
100 | #define MCELSIUS 1000 | 101 | #define MCELSIUS 1000 |
101 | 102 | ||
103 | #ifdef CONFIG_EXYNOS_THERMAL_EMUL | ||
104 | #define EXYNOS_EMUL_TIME 0x57F0 | ||
105 | #define EXYNOS_EMUL_TIME_SHIFT 16 | ||
106 | #define EXYNOS_EMUL_DATA_SHIFT 8 | ||
107 | #define EXYNOS_EMUL_DATA_MASK 0xFF | ||
108 | #define EXYNOS_EMUL_ENABLE 0x1 | ||
109 | #endif /* CONFIG_EXYNOS_THERMAL_EMUL */ | ||
110 | |||
102 | /* CPU Zone information */ | 111 | /* CPU Zone information */ |
103 | #define PANIC_ZONE 4 | 112 | #define PANIC_ZONE 4 |
104 | #define WARN_ZONE 3 | 113 | #define WARN_ZONE 3 |
@@ -125,6 +134,7 @@ struct exynos_tmu_data { | |||
125 | struct thermal_trip_point_conf { | 134 | struct thermal_trip_point_conf { |
126 | int trip_val[MAX_TRIP_COUNT]; | 135 | int trip_val[MAX_TRIP_COUNT]; |
127 | int trip_count; | 136 | int trip_count; |
137 | u8 trigger_falling; | ||
128 | }; | 138 | }; |
129 | 139 | ||
130 | struct thermal_cooling_conf { | 140 | struct thermal_cooling_conf { |
@@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal, | |||
174 | 184 | ||
175 | mutex_lock(&th_zone->therm_dev->lock); | 185 | mutex_lock(&th_zone->therm_dev->lock); |
176 | 186 | ||
177 | if (mode == THERMAL_DEVICE_ENABLED) | 187 | if (mode == THERMAL_DEVICE_ENABLED && |
188 | !th_zone->sensor_conf->trip_data.trigger_falling) | ||
178 | th_zone->therm_dev->polling_delay = IDLE_INTERVAL; | 189 | th_zone->therm_dev->polling_delay = IDLE_INTERVAL; |
179 | else | 190 | else |
180 | th_zone->therm_dev->polling_delay = 0; | 191 | th_zone->therm_dev->polling_delay = 0; |
@@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal, | |||
284 | case MONITOR_ZONE: | 295 | case MONITOR_ZONE: |
285 | case WARN_ZONE: | 296 | case WARN_ZONE: |
286 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, | 297 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, |
287 | level, level)) { | 298 | level, 0)) { |
288 | pr_err("error binding cdev inst %d\n", i); | 299 | pr_err("error binding cdev inst %d\n", i); |
289 | ret = -EINVAL; | 300 | ret = -EINVAL; |
290 | } | 301 | } |
@@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal, | |||
362 | static int exynos_get_trend(struct thermal_zone_device *thermal, | 373 | static int exynos_get_trend(struct thermal_zone_device *thermal, |
363 | int trip, enum thermal_trend *trend) | 374 | int trip, enum thermal_trend *trend) |
364 | { | 375 | { |
365 | if (thermal->temperature >= trip) | 376 | int ret; |
366 | *trend = THERMAL_TREND_RAISING; | 377 | unsigned long trip_temp; |
378 | |||
379 | ret = exynos_get_trip_temp(thermal, trip, &trip_temp); | ||
380 | if (ret < 0) | ||
381 | return ret; | ||
382 | |||
383 | if (thermal->temperature >= trip_temp) | ||
384 | *trend = THERMAL_TREND_RAISE_FULL; | ||
367 | else | 385 | else |
368 | *trend = THERMAL_TREND_DROPPING; | 386 | *trend = THERMAL_TREND_DROP_FULL; |
369 | 387 | ||
370 | return 0; | 388 | return 0; |
371 | } | 389 | } |
@@ -413,7 +431,8 @@ static void exynos_report_trigger(void) | |||
413 | break; | 431 | break; |
414 | } | 432 | } |
415 | 433 | ||
416 | if (th_zone->mode == THERMAL_DEVICE_ENABLED) { | 434 | if (th_zone->mode == THERMAL_DEVICE_ENABLED && |
435 | !th_zone->sensor_conf->trip_data.trigger_falling) { | ||
417 | if (i > 0) | 436 | if (i > 0) |
418 | th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL; | 437 | th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL; |
419 | else | 438 | else |
@@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) | |||
452 | 471 | ||
453 | th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name, | 472 | th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name, |
454 | EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0, | 473 | EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0, |
455 | IDLE_INTERVAL); | 474 | sensor_conf->trip_data.trigger_falling ? |
475 | 0 : IDLE_INTERVAL); | ||
456 | 476 | ||
457 | if (IS_ERR(th_zone->therm_dev)) { | 477 | if (IS_ERR(th_zone->therm_dev)) { |
458 | pr_err("Failed to register thermal zone device\n"); | 478 | pr_err("Failed to register thermal zone device\n"); |
@@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev) | |||
559 | { | 579 | { |
560 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | 580 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); |
561 | struct exynos_tmu_platform_data *pdata = data->pdata; | 581 | struct exynos_tmu_platform_data *pdata = data->pdata; |
562 | unsigned int status, trim_info, rising_threshold; | 582 | unsigned int status, trim_info; |
563 | int ret = 0, threshold_code; | 583 | unsigned int rising_threshold = 0, falling_threshold = 0; |
584 | int ret = 0, threshold_code, i, trigger_levs = 0; | ||
564 | 585 | ||
565 | mutex_lock(&data->lock); | 586 | mutex_lock(&data->lock); |
566 | clk_enable(data->clk); | 587 | clk_enable(data->clk); |
@@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev) | |||
585 | (data->temp_error2 != 0)) | 606 | (data->temp_error2 != 0)) |
586 | data->temp_error1 = pdata->efuse_value; | 607 | data->temp_error1 = pdata->efuse_value; |
587 | 608 | ||
609 | /* Count trigger levels to be enabled */ | ||
610 | for (i = 0; i < MAX_THRESHOLD_LEVS; i++) | ||
611 | if (pdata->trigger_levels[i]) | ||
612 | trigger_levs++; | ||
613 | |||
588 | if (data->soc == SOC_ARCH_EXYNOS4210) { | 614 | if (data->soc == SOC_ARCH_EXYNOS4210) { |
589 | /* Write temperature code for threshold */ | 615 | /* Write temperature code for threshold */ |
590 | threshold_code = temp_to_code(data, pdata->threshold); | 616 | threshold_code = temp_to_code(data, pdata->threshold); |
@@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev) | |||
594 | } | 620 | } |
595 | writeb(threshold_code, | 621 | writeb(threshold_code, |
596 | data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); | 622 | data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); |
597 | 623 | for (i = 0; i < trigger_levs; i++) | |
598 | writeb(pdata->trigger_levels[0], | 624 | writeb(pdata->trigger_levels[i], |
599 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0); | 625 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); |
600 | writeb(pdata->trigger_levels[1], | ||
601 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1); | ||
602 | writeb(pdata->trigger_levels[2], | ||
603 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2); | ||
604 | writeb(pdata->trigger_levels[3], | ||
605 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3); | ||
606 | 626 | ||
607 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, | 627 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, |
608 | data->base + EXYNOS_TMU_REG_INTCLEAR); | 628 | data->base + EXYNOS_TMU_REG_INTCLEAR); |
609 | } else if (data->soc == SOC_ARCH_EXYNOS) { | 629 | } else if (data->soc == SOC_ARCH_EXYNOS) { |
610 | /* Write temperature code for threshold */ | 630 | /* Write temperature code for rising and falling threshold */ |
611 | threshold_code = temp_to_code(data, pdata->trigger_levels[0]); | 631 | for (i = 0; i < trigger_levs; i++) { |
612 | if (threshold_code < 0) { | 632 | threshold_code = temp_to_code(data, |
613 | ret = threshold_code; | 633 | pdata->trigger_levels[i]); |
614 | goto out; | 634 | if (threshold_code < 0) { |
615 | } | 635 | ret = threshold_code; |
616 | rising_threshold = threshold_code; | 636 | goto out; |
617 | threshold_code = temp_to_code(data, pdata->trigger_levels[1]); | 637 | } |
618 | if (threshold_code < 0) { | 638 | rising_threshold |= threshold_code << 8 * i; |
619 | ret = threshold_code; | 639 | if (pdata->threshold_falling) { |
620 | goto out; | 640 | threshold_code = temp_to_code(data, |
621 | } | 641 | pdata->trigger_levels[i] - |
622 | rising_threshold |= (threshold_code << 8); | 642 | pdata->threshold_falling); |
623 | threshold_code = temp_to_code(data, pdata->trigger_levels[2]); | 643 | if (threshold_code > 0) |
624 | if (threshold_code < 0) { | 644 | falling_threshold |= |
625 | ret = threshold_code; | 645 | threshold_code << 8 * i; |
626 | goto out; | 646 | } |
627 | } | 647 | } |
628 | rising_threshold |= (threshold_code << 16); | ||
629 | 648 | ||
630 | writel(rising_threshold, | 649 | writel(rising_threshold, |
631 | data->base + EXYNOS_THD_TEMP_RISE); | 650 | data->base + EXYNOS_THD_TEMP_RISE); |
632 | writel(0, data->base + EXYNOS_THD_TEMP_FALL); | 651 | writel(falling_threshold, |
652 | data->base + EXYNOS_THD_TEMP_FALL); | ||
633 | 653 | ||
634 | writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT, | 654 | writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT, |
635 | data->base + EXYNOS_TMU_REG_INTCLEAR); | 655 | data->base + EXYNOS_TMU_REG_INTCLEAR); |
636 | } | 656 | } |
637 | out: | 657 | out: |
@@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) | |||
664 | pdata->trigger_level2_en << 8 | | 684 | pdata->trigger_level2_en << 8 | |
665 | pdata->trigger_level1_en << 4 | | 685 | pdata->trigger_level1_en << 4 | |
666 | pdata->trigger_level0_en; | 686 | pdata->trigger_level0_en; |
687 | if (pdata->threshold_falling) | ||
688 | interrupt_en |= interrupt_en << 16; | ||
667 | } else { | 689 | } else { |
668 | con |= EXYNOS_TMU_CORE_OFF; | 690 | con |= EXYNOS_TMU_CORE_OFF; |
669 | interrupt_en = 0; /* Disable all interrupts */ | 691 | interrupt_en = 0; /* Disable all interrupts */ |
@@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work) | |||
697 | struct exynos_tmu_data *data = container_of(work, | 719 | struct exynos_tmu_data *data = container_of(work, |
698 | struct exynos_tmu_data, irq_work); | 720 | struct exynos_tmu_data, irq_work); |
699 | 721 | ||
722 | exynos_report_trigger(); | ||
700 | mutex_lock(&data->lock); | 723 | mutex_lock(&data->lock); |
701 | clk_enable(data->clk); | 724 | clk_enable(data->clk); |
702 | |||
703 | |||
704 | if (data->soc == SOC_ARCH_EXYNOS) | 725 | if (data->soc == SOC_ARCH_EXYNOS) |
705 | writel(EXYNOS_TMU_CLEAR_RISE_INT, | 726 | writel(EXYNOS_TMU_CLEAR_RISE_INT | |
727 | EXYNOS_TMU_CLEAR_FALL_INT, | ||
706 | data->base + EXYNOS_TMU_REG_INTCLEAR); | 728 | data->base + EXYNOS_TMU_REG_INTCLEAR); |
707 | else | 729 | else |
708 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, | 730 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, |
709 | data->base + EXYNOS_TMU_REG_INTCLEAR); | 731 | data->base + EXYNOS_TMU_REG_INTCLEAR); |
710 | |||
711 | clk_disable(data->clk); | 732 | clk_disable(data->clk); |
712 | mutex_unlock(&data->lock); | 733 | mutex_unlock(&data->lock); |
713 | exynos_report_trigger(); | 734 | |
714 | enable_irq(data->irq); | 735 | enable_irq(data->irq); |
715 | } | 736 | } |
716 | 737 | ||
@@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = { | |||
759 | 780 | ||
760 | #if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) | 781 | #if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) |
761 | static struct exynos_tmu_platform_data const exynos_default_tmu_data = { | 782 | static struct exynos_tmu_platform_data const exynos_default_tmu_data = { |
783 | .threshold_falling = 10, | ||
762 | .trigger_levels[0] = 85, | 784 | .trigger_levels[0] = 85, |
763 | .trigger_levels[1] = 103, | 785 | .trigger_levels[1] = 103, |
764 | .trigger_levels[2] = 110, | 786 | .trigger_levels[2] = 110, |
@@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = { | |||
800 | {}, | 822 | {}, |
801 | }; | 823 | }; |
802 | MODULE_DEVICE_TABLE(of, exynos_tmu_match); | 824 | MODULE_DEVICE_TABLE(of, exynos_tmu_match); |
803 | #else | ||
804 | #define exynos_tmu_match NULL | ||
805 | #endif | 825 | #endif |
806 | 826 | ||
807 | static struct platform_device_id exynos_tmu_driver_ids[] = { | 827 | static struct platform_device_id exynos_tmu_driver_ids[] = { |
@@ -832,6 +852,94 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data( | |||
832 | return (struct exynos_tmu_platform_data *) | 852 | return (struct exynos_tmu_platform_data *) |
833 | platform_get_device_id(pdev)->driver_data; | 853 | platform_get_device_id(pdev)->driver_data; |
834 | } | 854 | } |
855 | |||
856 | #ifdef CONFIG_EXYNOS_THERMAL_EMUL | ||
857 | static ssize_t exynos_tmu_emulation_show(struct device *dev, | ||
858 | struct device_attribute *attr, | ||
859 | char *buf) | ||
860 | { | ||
861 | struct platform_device *pdev = container_of(dev, | ||
862 | struct platform_device, dev); | ||
863 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | ||
864 | unsigned int reg; | ||
865 | u8 temp_code; | ||
866 | int temp = 0; | ||
867 | |||
868 | if (data->soc == SOC_ARCH_EXYNOS4210) | ||
869 | goto out; | ||
870 | |||
871 | mutex_lock(&data->lock); | ||
872 | clk_enable(data->clk); | ||
873 | reg = readl(data->base + EXYNOS_EMUL_CON); | ||
874 | clk_disable(data->clk); | ||
875 | mutex_unlock(&data->lock); | ||
876 | |||
877 | if (reg & EXYNOS_EMUL_ENABLE) { | ||
878 | reg >>= EXYNOS_EMUL_DATA_SHIFT; | ||
879 | temp_code = reg & EXYNOS_EMUL_DATA_MASK; | ||
880 | temp = code_to_temp(data, temp_code); | ||
881 | } | ||
882 | out: | ||
883 | return sprintf(buf, "%d\n", temp * MCELSIUS); | ||
884 | } | ||
885 | |||
886 | static ssize_t exynos_tmu_emulation_store(struct device *dev, | ||
887 | struct device_attribute *attr, | ||
888 | const char *buf, size_t count) | ||
889 | { | ||
890 | struct platform_device *pdev = container_of(dev, | ||
891 | struct platform_device, dev); | ||
892 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | ||
893 | unsigned int reg; | ||
894 | int temp; | ||
895 | |||
896 | if (data->soc == SOC_ARCH_EXYNOS4210) | ||
897 | goto out; | ||
898 | |||
899 | if (!sscanf(buf, "%d\n", &temp) || temp < 0) | ||
900 | return -EINVAL; | ||
901 | |||
902 | mutex_lock(&data->lock); | ||
903 | clk_enable(data->clk); | ||
904 | |||
905 | reg = readl(data->base + EXYNOS_EMUL_CON); | ||
906 | |||
907 | if (temp) { | ||
908 | /* Both CELSIUS and MCELSIUS type are available for input */ | ||
909 | if (temp > MCELSIUS) | ||
910 | temp /= MCELSIUS; | ||
911 | |||
912 | reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) | | ||
913 | (temp_to_code(data, (temp / MCELSIUS)) | ||
914 | << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE; | ||
915 | } else { | ||
916 | reg &= ~EXYNOS_EMUL_ENABLE; | ||
917 | } | ||
918 | |||
919 | writel(reg, data->base + EXYNOS_EMUL_CON); | ||
920 | |||
921 | clk_disable(data->clk); | ||
922 | mutex_unlock(&data->lock); | ||
923 | |||
924 | out: | ||
925 | return count; | ||
926 | } | ||
927 | |||
928 | static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show, | ||
929 | exynos_tmu_emulation_store); | ||
930 | static int create_emulation_sysfs(struct device *dev) | ||
931 | { | ||
932 | return device_create_file(dev, &dev_attr_emulation); | ||
933 | } | ||
934 | static void remove_emulation_sysfs(struct device *dev) | ||
935 | { | ||
936 | device_remove_file(dev, &dev_attr_emulation); | ||
937 | } | ||
938 | #else | ||
939 | static inline int create_emulation_sysfs(struct device *dev) { return 0; } | ||
940 | static inline void remove_emulation_sysfs(struct device *dev) {} | ||
941 | #endif | ||
942 | |||
835 | static int exynos_tmu_probe(struct platform_device *pdev) | 943 | static int exynos_tmu_probe(struct platform_device *pdev) |
836 | { | 944 | { |
837 | struct exynos_tmu_data *data; | 945 | struct exynos_tmu_data *data; |
@@ -914,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev) | |||
914 | exynos_sensor_conf.trip_data.trip_val[i] = | 1022 | exynos_sensor_conf.trip_data.trip_val[i] = |
915 | pdata->threshold + pdata->trigger_levels[i]; | 1023 | pdata->threshold + pdata->trigger_levels[i]; |
916 | 1024 | ||
1025 | exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling; | ||
1026 | |||
917 | exynos_sensor_conf.cooling_data.freq_clip_count = | 1027 | exynos_sensor_conf.cooling_data.freq_clip_count = |
918 | pdata->freq_tab_count; | 1028 | pdata->freq_tab_count; |
919 | for (i = 0; i < pdata->freq_tab_count; i++) { | 1029 | for (i = 0; i < pdata->freq_tab_count; i++) { |
@@ -928,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev) | |||
928 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); | 1038 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); |
929 | goto err_clk; | 1039 | goto err_clk; |
930 | } | 1040 | } |
1041 | |||
1042 | ret = create_emulation_sysfs(&pdev->dev); | ||
1043 | if (ret) | ||
1044 | dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n"); | ||
1045 | |||
931 | return 0; | 1046 | return 0; |
932 | err_clk: | 1047 | err_clk: |
933 | platform_set_drvdata(pdev, NULL); | 1048 | platform_set_drvdata(pdev, NULL); |
@@ -939,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev) | |||
939 | { | 1054 | { |
940 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | 1055 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); |
941 | 1056 | ||
1057 | remove_emulation_sysfs(&pdev->dev); | ||
1058 | |||
942 | exynos_tmu_control(pdev, false); | 1059 | exynos_tmu_control(pdev, false); |
943 | 1060 | ||
944 | exynos_unregister_thermal(); | 1061 | exynos_unregister_thermal(); |
@@ -980,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = { | |||
980 | .name = "exynos-tmu", | 1097 | .name = "exynos-tmu", |
981 | .owner = THIS_MODULE, | 1098 | .owner = THIS_MODULE, |
982 | .pm = EXYNOS_TMU_PM, | 1099 | .pm = EXYNOS_TMU_PM, |
983 | .of_match_table = exynos_tmu_match, | 1100 | .of_match_table = of_match_ptr(exynos_tmu_match), |
984 | }, | 1101 | }, |
985 | .probe = exynos_tmu_probe, | 1102 | .probe = exynos_tmu_probe, |
986 | .remove = exynos_tmu_remove, | 1103 | .remove = exynos_tmu_remove, |
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c new file mode 100644 index 000000000000..ab3ed907d2c3 --- /dev/null +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -0,0 +1,794 @@ | |||
1 | /* | ||
2 | * intel_powerclamp.c - package c-state idle injection | ||
3 | * | ||
4 | * Copyright (c) 2012, Intel Corporation. | ||
5 | * | ||
6 | * Authors: | ||
7 | * Arjan van de Ven <arjan@linux.intel.com> | ||
8 | * Jacob Pan <jacob.jun.pan@linux.intel.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms and conditions of the GNU General Public License, | ||
12 | * version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | * | ||
23 | * | ||
24 | * TODO: | ||
25 | * 1. better handle wakeup from external interrupts, currently a fixed | ||
26 | * compensation is added to clamping duration when excessive amount | ||
27 | * of wakeups are observed during idle time. the reason is that in | ||
28 | * case of external interrupts without need for ack, clamping down | ||
29 | * cpu in non-irq context does not reduce irq. for majority of the | ||
30 | * cases, clamping down cpu does help reduce irq as well, we should | ||
31 | * be able to differenciate the two cases and give a quantitative | ||
32 | * solution for the irqs that we can control. perhaps based on | ||
33 | * get_cpu_iowait_time_us() | ||
34 | * | ||
35 | * 2. synchronization with other hw blocks | ||
36 | * | ||
37 | * | ||
38 | */ | ||
39 | |||
40 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
41 | |||
42 | #include <linux/module.h> | ||
43 | #include <linux/kernel.h> | ||
44 | #include <linux/delay.h> | ||
45 | #include <linux/kthread.h> | ||
46 | #include <linux/freezer.h> | ||
47 | #include <linux/cpu.h> | ||
48 | #include <linux/thermal.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/tick.h> | ||
51 | #include <linux/debugfs.h> | ||
52 | #include <linux/seq_file.h> | ||
53 | |||
54 | #include <asm/nmi.h> | ||
55 | #include <asm/msr.h> | ||
56 | #include <asm/mwait.h> | ||
57 | #include <asm/cpu_device_id.h> | ||
58 | #include <asm/idle.h> | ||
59 | #include <asm/hardirq.h> | ||
60 | |||
61 | #define MAX_TARGET_RATIO (50U) | ||
62 | /* For each undisturbed clamping period (no extra wake ups during idle time), | ||
63 | * we increment the confidence counter for the given target ratio. | ||
64 | * CONFIDENCE_OK defines the level where runtime calibration results are | ||
65 | * valid. | ||
66 | */ | ||
67 | #define CONFIDENCE_OK (3) | ||
68 | /* Default idle injection duration, driver adjust sleep time to meet target | ||
69 | * idle ratio. Similar to frequency modulation. | ||
70 | */ | ||
71 | #define DEFAULT_DURATION_JIFFIES (6) | ||
72 | |||
73 | static unsigned int target_mwait; | ||
74 | static struct dentry *debug_dir; | ||
75 | |||
76 | /* user selected target */ | ||
77 | static unsigned int set_target_ratio; | ||
78 | static unsigned int current_ratio; | ||
79 | static bool should_skip; | ||
80 | static bool reduce_irq; | ||
81 | static atomic_t idle_wakeup_counter; | ||
82 | static unsigned int control_cpu; /* The cpu assigned to collect stat and update | ||
83 | * control parameters. default to BSP but BSP | ||
84 | * can be offlined. | ||
85 | */ | ||
86 | static bool clamping; | ||
87 | |||
88 | |||
89 | static struct task_struct * __percpu *powerclamp_thread; | ||
90 | static struct thermal_cooling_device *cooling_dev; | ||
91 | static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu | ||
92 | * clamping thread | ||
93 | */ | ||
94 | |||
95 | static unsigned int duration; | ||
96 | static unsigned int pkg_cstate_ratio_cur; | ||
97 | static unsigned int window_size; | ||
98 | |||
99 | static int duration_set(const char *arg, const struct kernel_param *kp) | ||
100 | { | ||
101 | int ret = 0; | ||
102 | unsigned long new_duration; | ||
103 | |||
104 | ret = kstrtoul(arg, 10, &new_duration); | ||
105 | if (ret) | ||
106 | goto exit; | ||
107 | if (new_duration > 25 || new_duration < 6) { | ||
108 | pr_err("Out of recommended range %lu, between 6-25ms\n", | ||
109 | new_duration); | ||
110 | ret = -EINVAL; | ||
111 | } | ||
112 | |||
113 | duration = clamp(new_duration, 6ul, 25ul); | ||
114 | smp_mb(); | ||
115 | |||
116 | exit: | ||
117 | |||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | static struct kernel_param_ops duration_ops = { | ||
122 | .set = duration_set, | ||
123 | .get = param_get_int, | ||
124 | }; | ||
125 | |||
126 | |||
127 | module_param_cb(duration, &duration_ops, &duration, 0644); | ||
128 | MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec."); | ||
129 | |||
130 | struct powerclamp_calibration_data { | ||
131 | unsigned long confidence; /* used for calibration, basically a counter | ||
132 | * gets incremented each time a clamping | ||
133 | * period is completed without extra wakeups | ||
134 | * once that counter is reached given level, | ||
135 | * compensation is deemed usable. | ||
136 | */ | ||
137 | unsigned long steady_comp; /* steady state compensation used when | ||
138 | * no extra wakeups occurred. | ||
139 | */ | ||
140 | unsigned long dynamic_comp; /* compensate excessive wakeup from idle | ||
141 | * mostly from external interrupts. | ||
142 | */ | ||
143 | }; | ||
144 | |||
145 | static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO]; | ||
146 | |||
147 | static int window_size_set(const char *arg, const struct kernel_param *kp) | ||
148 | { | ||
149 | int ret = 0; | ||
150 | unsigned long new_window_size; | ||
151 | |||
152 | ret = kstrtoul(arg, 10, &new_window_size); | ||
153 | if (ret) | ||
154 | goto exit_win; | ||
155 | if (new_window_size > 10 || new_window_size < 2) { | ||
156 | pr_err("Out of recommended window size %lu, between 2-10\n", | ||
157 | new_window_size); | ||
158 | ret = -EINVAL; | ||
159 | } | ||
160 | |||
161 | window_size = clamp(new_window_size, 2ul, 10ul); | ||
162 | smp_mb(); | ||
163 | |||
164 | exit_win: | ||
165 | |||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | static struct kernel_param_ops window_size_ops = { | ||
170 | .set = window_size_set, | ||
171 | .get = param_get_int, | ||
172 | }; | ||
173 | |||
174 | module_param_cb(window_size, &window_size_ops, &window_size, 0644); | ||
175 | MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n" | ||
176 | "\tpowerclamp controls idle ratio within this window. larger\n" | ||
177 | "\twindow size results in slower response time but more smooth\n" | ||
178 | "\tclamping results. default to 2."); | ||
179 | |||
180 | static void find_target_mwait(void) | ||
181 | { | ||
182 | unsigned int eax, ebx, ecx, edx; | ||
183 | unsigned int highest_cstate = 0; | ||
184 | unsigned int highest_subcstate = 0; | ||
185 | int i; | ||
186 | |||
187 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
188 | return; | ||
189 | |||
190 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | ||
191 | |||
192 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | ||
193 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | ||
194 | return; | ||
195 | |||
196 | edx >>= MWAIT_SUBSTATE_SIZE; | ||
197 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | ||
198 | if (edx & MWAIT_SUBSTATE_MASK) { | ||
199 | highest_cstate = i; | ||
200 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | ||
201 | } | ||
202 | } | ||
203 | target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | ||
204 | (highest_subcstate - 1); | ||
205 | |||
206 | } | ||
207 | |||
208 | static u64 pkg_state_counter(void) | ||
209 | { | ||
210 | u64 val; | ||
211 | u64 count = 0; | ||
212 | |||
213 | static bool skip_c2; | ||
214 | static bool skip_c3; | ||
215 | static bool skip_c6; | ||
216 | static bool skip_c7; | ||
217 | |||
218 | if (!skip_c2) { | ||
219 | if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val)) | ||
220 | count += val; | ||
221 | else | ||
222 | skip_c2 = true; | ||
223 | } | ||
224 | |||
225 | if (!skip_c3) { | ||
226 | if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val)) | ||
227 | count += val; | ||
228 | else | ||
229 | skip_c3 = true; | ||
230 | } | ||
231 | |||
232 | if (!skip_c6) { | ||
233 | if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val)) | ||
234 | count += val; | ||
235 | else | ||
236 | skip_c6 = true; | ||
237 | } | ||
238 | |||
239 | if (!skip_c7) { | ||
240 | if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val)) | ||
241 | count += val; | ||
242 | else | ||
243 | skip_c7 = true; | ||
244 | } | ||
245 | |||
246 | return count; | ||
247 | } | ||
248 | |||
249 | static void noop_timer(unsigned long foo) | ||
250 | { | ||
251 | /* empty... just the fact that we get the interrupt wakes us up */ | ||
252 | } | ||
253 | |||
254 | static unsigned int get_compensation(int ratio) | ||
255 | { | ||
256 | unsigned int comp = 0; | ||
257 | |||
258 | /* we only use compensation if all adjacent ones are good */ | ||
259 | if (ratio == 1 && | ||
260 | cal_data[ratio].confidence >= CONFIDENCE_OK && | ||
261 | cal_data[ratio + 1].confidence >= CONFIDENCE_OK && | ||
262 | cal_data[ratio + 2].confidence >= CONFIDENCE_OK) { | ||
263 | comp = (cal_data[ratio].steady_comp + | ||
264 | cal_data[ratio + 1].steady_comp + | ||
265 | cal_data[ratio + 2].steady_comp) / 3; | ||
266 | } else if (ratio == MAX_TARGET_RATIO - 1 && | ||
267 | cal_data[ratio].confidence >= CONFIDENCE_OK && | ||
268 | cal_data[ratio - 1].confidence >= CONFIDENCE_OK && | ||
269 | cal_data[ratio - 2].confidence >= CONFIDENCE_OK) { | ||
270 | comp = (cal_data[ratio].steady_comp + | ||
271 | cal_data[ratio - 1].steady_comp + | ||
272 | cal_data[ratio - 2].steady_comp) / 3; | ||
273 | } else if (cal_data[ratio].confidence >= CONFIDENCE_OK && | ||
274 | cal_data[ratio - 1].confidence >= CONFIDENCE_OK && | ||
275 | cal_data[ratio + 1].confidence >= CONFIDENCE_OK) { | ||
276 | comp = (cal_data[ratio].steady_comp + | ||
277 | cal_data[ratio - 1].steady_comp + | ||
278 | cal_data[ratio + 1].steady_comp) / 3; | ||
279 | } | ||
280 | |||
281 | /* REVISIT: simple penalty of double idle injection */ | ||
282 | if (reduce_irq) | ||
283 | comp = ratio; | ||
284 | /* do not exceed limit */ | ||
285 | if (comp + ratio >= MAX_TARGET_RATIO) | ||
286 | comp = MAX_TARGET_RATIO - ratio - 1; | ||
287 | |||
288 | return comp; | ||
289 | } | ||
290 | |||
291 | static void adjust_compensation(int target_ratio, unsigned int win) | ||
292 | { | ||
293 | int delta; | ||
294 | struct powerclamp_calibration_data *d = &cal_data[target_ratio]; | ||
295 | |||
296 | /* | ||
297 | * adjust compensations if confidence level has not been reached or | ||
298 | * there are too many wakeups during the last idle injection period, we | ||
299 | * cannot trust the data for compensation. | ||
300 | */ | ||
301 | if (d->confidence >= CONFIDENCE_OK || | ||
302 | atomic_read(&idle_wakeup_counter) > | ||
303 | win * num_online_cpus()) | ||
304 | return; | ||
305 | |||
306 | delta = set_target_ratio - current_ratio; | ||
307 | /* filter out bad data */ | ||
308 | if (delta >= 0 && delta <= (1+target_ratio/10)) { | ||
309 | if (d->steady_comp) | ||
310 | d->steady_comp = | ||
311 | roundup(delta+d->steady_comp, 2)/2; | ||
312 | else | ||
313 | d->steady_comp = delta; | ||
314 | d->confidence++; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | static bool powerclamp_adjust_controls(unsigned int target_ratio, | ||
319 | unsigned int guard, unsigned int win) | ||
320 | { | ||
321 | static u64 msr_last, tsc_last; | ||
322 | u64 msr_now, tsc_now; | ||
323 | u64 val64; | ||
324 | |||
325 | /* check result for the last window */ | ||
326 | msr_now = pkg_state_counter(); | ||
327 | rdtscll(tsc_now); | ||
328 | |||
329 | /* calculate pkg cstate vs tsc ratio */ | ||
330 | if (!msr_last || !tsc_last) | ||
331 | current_ratio = 1; | ||
332 | else if (tsc_now-tsc_last) { | ||
333 | val64 = 100*(msr_now-msr_last); | ||
334 | do_div(val64, (tsc_now-tsc_last)); | ||
335 | current_ratio = val64; | ||
336 | } | ||
337 | |||
338 | /* update record */ | ||
339 | msr_last = msr_now; | ||
340 | tsc_last = tsc_now; | ||
341 | |||
342 | adjust_compensation(target_ratio, win); | ||
343 | /* | ||
344 | * too many external interrupts, set flag such | ||
345 | * that we can take measure later. | ||
346 | */ | ||
347 | reduce_irq = atomic_read(&idle_wakeup_counter) >= | ||
348 | 2 * win * num_online_cpus(); | ||
349 | |||
350 | atomic_set(&idle_wakeup_counter, 0); | ||
351 | /* if we are above target+guard, skip */ | ||
352 | return set_target_ratio + guard <= current_ratio; | ||
353 | } | ||
354 | |||
355 | static int clamp_thread(void *arg) | ||
356 | { | ||
357 | int cpunr = (unsigned long)arg; | ||
358 | DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0); | ||
359 | static const struct sched_param param = { | ||
360 | .sched_priority = MAX_USER_RT_PRIO/2, | ||
361 | }; | ||
362 | unsigned int count = 0; | ||
363 | unsigned int target_ratio; | ||
364 | |||
365 | set_bit(cpunr, cpu_clamping_mask); | ||
366 | set_freezable(); | ||
367 | init_timer_on_stack(&wakeup_timer); | ||
368 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
369 | |||
370 | while (true == clamping && !kthread_should_stop() && | ||
371 | cpu_online(cpunr)) { | ||
372 | int sleeptime; | ||
373 | unsigned long target_jiffies; | ||
374 | unsigned int guard; | ||
375 | unsigned int compensation = 0; | ||
376 | int interval; /* jiffies to sleep for each attempt */ | ||
377 | unsigned int duration_jiffies = msecs_to_jiffies(duration); | ||
378 | unsigned int window_size_now; | ||
379 | |||
380 | try_to_freeze(); | ||
381 | /* | ||
382 | * make sure user selected ratio does not take effect until | ||
383 | * the next round. adjust target_ratio if user has changed | ||
384 | * target such that we can converge quickly. | ||
385 | */ | ||
386 | target_ratio = set_target_ratio; | ||
387 | guard = 1 + target_ratio/20; | ||
388 | window_size_now = window_size; | ||
389 | count++; | ||
390 | |||
391 | /* | ||
392 | * systems may have different ability to enter package level | ||
393 | * c-states, thus we need to compensate the injected idle ratio | ||
394 | * to achieve the actual target reported by the HW. | ||
395 | */ | ||
396 | compensation = get_compensation(target_ratio); | ||
397 | interval = duration_jiffies*100/(target_ratio+compensation); | ||
398 | |||
399 | /* align idle time */ | ||
400 | target_jiffies = roundup(jiffies, interval); | ||
401 | sleeptime = target_jiffies - jiffies; | ||
402 | if (sleeptime <= 0) | ||
403 | sleeptime = 1; | ||
404 | schedule_timeout_interruptible(sleeptime); | ||
405 | /* | ||
406 | * only elected controlling cpu can collect stats and update | ||
407 | * control parameters. | ||
408 | */ | ||
409 | if (cpunr == control_cpu && !(count%window_size_now)) { | ||
410 | should_skip = | ||
411 | powerclamp_adjust_controls(target_ratio, | ||
412 | guard, window_size_now); | ||
413 | smp_mb(); | ||
414 | } | ||
415 | |||
416 | if (should_skip) | ||
417 | continue; | ||
418 | |||
419 | target_jiffies = jiffies + duration_jiffies; | ||
420 | mod_timer(&wakeup_timer, target_jiffies); | ||
421 | if (unlikely(local_softirq_pending())) | ||
422 | continue; | ||
423 | /* | ||
424 | * stop tick sched during idle time, interrupts are still | ||
425 | * allowed. thus jiffies are updated properly. | ||
426 | */ | ||
427 | preempt_disable(); | ||
428 | tick_nohz_idle_enter(); | ||
429 | /* mwait until target jiffies is reached */ | ||
430 | while (time_before(jiffies, target_jiffies)) { | ||
431 | unsigned long ecx = 1; | ||
432 | unsigned long eax = target_mwait; | ||
433 | |||
434 | /* | ||
435 | * REVISIT: may call enter_idle() to notify drivers who | ||
436 | * can save power during cpu idle. same for exit_idle() | ||
437 | */ | ||
438 | local_touch_nmi(); | ||
439 | stop_critical_timings(); | ||
440 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
441 | cpu_relax(); /* allow HT sibling to run */ | ||
442 | __mwait(eax, ecx); | ||
443 | start_critical_timings(); | ||
444 | atomic_inc(&idle_wakeup_counter); | ||
445 | } | ||
446 | tick_nohz_idle_exit(); | ||
447 | preempt_enable_no_resched(); | ||
448 | } | ||
449 | del_timer_sync(&wakeup_timer); | ||
450 | clear_bit(cpunr, cpu_clamping_mask); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * 1 HZ polling while clamping is active, useful for userspace | ||
457 | * to monitor actual idle ratio. | ||
458 | */ | ||
459 | static void poll_pkg_cstate(struct work_struct *dummy); | ||
460 | static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate); | ||
461 | static void poll_pkg_cstate(struct work_struct *dummy) | ||
462 | { | ||
463 | static u64 msr_last; | ||
464 | static u64 tsc_last; | ||
465 | static unsigned long jiffies_last; | ||
466 | |||
467 | u64 msr_now; | ||
468 | unsigned long jiffies_now; | ||
469 | u64 tsc_now; | ||
470 | u64 val64; | ||
471 | |||
472 | msr_now = pkg_state_counter(); | ||
473 | rdtscll(tsc_now); | ||
474 | jiffies_now = jiffies; | ||
475 | |||
476 | /* calculate pkg cstate vs tsc ratio */ | ||
477 | if (!msr_last || !tsc_last) | ||
478 | pkg_cstate_ratio_cur = 1; | ||
479 | else { | ||
480 | if (tsc_now - tsc_last) { | ||
481 | val64 = 100 * (msr_now - msr_last); | ||
482 | do_div(val64, (tsc_now - tsc_last)); | ||
483 | pkg_cstate_ratio_cur = val64; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | /* update record */ | ||
488 | msr_last = msr_now; | ||
489 | jiffies_last = jiffies_now; | ||
490 | tsc_last = tsc_now; | ||
491 | |||
492 | if (true == clamping) | ||
493 | schedule_delayed_work(&poll_pkg_cstate_work, HZ); | ||
494 | } | ||
495 | |||
496 | static int start_power_clamp(void) | ||
497 | { | ||
498 | unsigned long cpu; | ||
499 | struct task_struct *thread; | ||
500 | |||
501 | /* check if pkg cstate counter is completely 0, abort in this case */ | ||
502 | if (!pkg_state_counter()) { | ||
503 | pr_err("pkg cstate counter not functional, abort\n"); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | |||
507 | set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1); | ||
508 | /* prevent cpu hotplug */ | ||
509 | get_online_cpus(); | ||
510 | |||
511 | /* prefer BSP */ | ||
512 | control_cpu = 0; | ||
513 | if (!cpu_online(control_cpu)) | ||
514 | control_cpu = smp_processor_id(); | ||
515 | |||
516 | clamping = true; | ||
517 | schedule_delayed_work(&poll_pkg_cstate_work, 0); | ||
518 | |||
519 | /* start one thread per online cpu */ | ||
520 | for_each_online_cpu(cpu) { | ||
521 | struct task_struct **p = | ||
522 | per_cpu_ptr(powerclamp_thread, cpu); | ||
523 | |||
524 | thread = kthread_create_on_node(clamp_thread, | ||
525 | (void *) cpu, | ||
526 | cpu_to_node(cpu), | ||
527 | "kidle_inject/%ld", cpu); | ||
528 | /* bind to cpu here */ | ||
529 | if (likely(!IS_ERR(thread))) { | ||
530 | kthread_bind(thread, cpu); | ||
531 | wake_up_process(thread); | ||
532 | *p = thread; | ||
533 | } | ||
534 | |||
535 | } | ||
536 | put_online_cpus(); | ||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | static void end_power_clamp(void) | ||
542 | { | ||
543 | int i; | ||
544 | struct task_struct *thread; | ||
545 | |||
546 | clamping = false; | ||
547 | /* | ||
548 | * make clamping visible to other cpus and give per cpu clamping threads | ||
549 | * sometime to exit, or gets killed later. | ||
550 | */ | ||
551 | smp_mb(); | ||
552 | msleep(20); | ||
553 | if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) { | ||
554 | for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { | ||
555 | pr_debug("clamping thread for cpu %d alive, kill\n", i); | ||
556 | thread = *per_cpu_ptr(powerclamp_thread, i); | ||
557 | kthread_stop(thread); | ||
558 | } | ||
559 | } | ||
560 | } | ||
561 | |||
562 | static int powerclamp_cpu_callback(struct notifier_block *nfb, | ||
563 | unsigned long action, void *hcpu) | ||
564 | { | ||
565 | unsigned long cpu = (unsigned long)hcpu; | ||
566 | struct task_struct *thread; | ||
567 | struct task_struct **percpu_thread = | ||
568 | per_cpu_ptr(powerclamp_thread, cpu); | ||
569 | |||
570 | if (false == clamping) | ||
571 | goto exit_ok; | ||
572 | |||
573 | switch (action) { | ||
574 | case CPU_ONLINE: | ||
575 | thread = kthread_create_on_node(clamp_thread, | ||
576 | (void *) cpu, | ||
577 | cpu_to_node(cpu), | ||
578 | "kidle_inject/%lu", cpu); | ||
579 | if (likely(!IS_ERR(thread))) { | ||
580 | kthread_bind(thread, cpu); | ||
581 | wake_up_process(thread); | ||
582 | *percpu_thread = thread; | ||
583 | } | ||
584 | /* prefer BSP as controlling CPU */ | ||
585 | if (cpu == 0) { | ||
586 | control_cpu = 0; | ||
587 | smp_mb(); | ||
588 | } | ||
589 | break; | ||
590 | case CPU_DEAD: | ||
591 | if (test_bit(cpu, cpu_clamping_mask)) { | ||
592 | pr_err("cpu %lu dead but powerclamping thread is not\n", | ||
593 | cpu); | ||
594 | kthread_stop(*percpu_thread); | ||
595 | } | ||
596 | if (cpu == control_cpu) { | ||
597 | control_cpu = smp_processor_id(); | ||
598 | smp_mb(); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | exit_ok: | ||
603 | return NOTIFY_OK; | ||
604 | } | ||
605 | |||
606 | static struct notifier_block powerclamp_cpu_notifier = { | ||
607 | .notifier_call = powerclamp_cpu_callback, | ||
608 | }; | ||
609 | |||
610 | static int powerclamp_get_max_state(struct thermal_cooling_device *cdev, | ||
611 | unsigned long *state) | ||
612 | { | ||
613 | *state = MAX_TARGET_RATIO; | ||
614 | |||
615 | return 0; | ||
616 | } | ||
617 | |||
618 | static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev, | ||
619 | unsigned long *state) | ||
620 | { | ||
621 | if (true == clamping) | ||
622 | *state = pkg_cstate_ratio_cur; | ||
623 | else | ||
624 | /* to save power, do not poll idle ratio while not clamping */ | ||
625 | *state = -1; /* indicates invalid state */ | ||
626 | |||
627 | return 0; | ||
628 | } | ||
629 | |||
630 | static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev, | ||
631 | unsigned long new_target_ratio) | ||
632 | { | ||
633 | int ret = 0; | ||
634 | |||
635 | new_target_ratio = clamp(new_target_ratio, 0UL, | ||
636 | (unsigned long) (MAX_TARGET_RATIO-1)); | ||
637 | if (set_target_ratio == 0 && new_target_ratio > 0) { | ||
638 | pr_info("Start idle injection to reduce power\n"); | ||
639 | set_target_ratio = new_target_ratio; | ||
640 | ret = start_power_clamp(); | ||
641 | goto exit_set; | ||
642 | } else if (set_target_ratio > 0 && new_target_ratio == 0) { | ||
643 | pr_info("Stop forced idle injection\n"); | ||
644 | set_target_ratio = 0; | ||
645 | end_power_clamp(); | ||
646 | } else /* adjust currently running */ { | ||
647 | set_target_ratio = new_target_ratio; | ||
648 | /* make new set_target_ratio visible to other cpus */ | ||
649 | smp_mb(); | ||
650 | } | ||
651 | |||
652 | exit_set: | ||
653 | return ret; | ||
654 | } | ||
655 | |||
656 | /* bind to generic thermal layer as cooling device*/ | ||
657 | static struct thermal_cooling_device_ops powerclamp_cooling_ops = { | ||
658 | .get_max_state = powerclamp_get_max_state, | ||
659 | .get_cur_state = powerclamp_get_cur_state, | ||
660 | .set_cur_state = powerclamp_set_cur_state, | ||
661 | }; | ||
662 | |||
663 | /* runs on Nehalem and later */ | ||
664 | static const struct x86_cpu_id intel_powerclamp_ids[] = { | ||
665 | { X86_VENDOR_INTEL, 6, 0x1a}, | ||
666 | { X86_VENDOR_INTEL, 6, 0x1c}, | ||
667 | { X86_VENDOR_INTEL, 6, 0x1e}, | ||
668 | { X86_VENDOR_INTEL, 6, 0x1f}, | ||
669 | { X86_VENDOR_INTEL, 6, 0x25}, | ||
670 | { X86_VENDOR_INTEL, 6, 0x26}, | ||
671 | { X86_VENDOR_INTEL, 6, 0x2a}, | ||
672 | { X86_VENDOR_INTEL, 6, 0x2c}, | ||
673 | { X86_VENDOR_INTEL, 6, 0x2d}, | ||
674 | { X86_VENDOR_INTEL, 6, 0x2e}, | ||
675 | { X86_VENDOR_INTEL, 6, 0x2f}, | ||
676 | { X86_VENDOR_INTEL, 6, 0x3a}, | ||
677 | {} | ||
678 | }; | ||
679 | MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); | ||
680 | |||
681 | static int powerclamp_probe(void) | ||
682 | { | ||
683 | if (!x86_match_cpu(intel_powerclamp_ids)) { | ||
684 | pr_err("Intel powerclamp does not run on family %d model %d\n", | ||
685 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
686 | return -ENODEV; | ||
687 | } | ||
688 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || | ||
689 | !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) || | ||
690 | !boot_cpu_has(X86_FEATURE_MWAIT) || | ||
691 | !boot_cpu_has(X86_FEATURE_ARAT)) | ||
692 | return -ENODEV; | ||
693 | |||
694 | /* find the deepest mwait value */ | ||
695 | find_target_mwait(); | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | static int powerclamp_debug_show(struct seq_file *m, void *unused) | ||
701 | { | ||
702 | int i = 0; | ||
703 | |||
704 | seq_printf(m, "controlling cpu: %d\n", control_cpu); | ||
705 | seq_printf(m, "pct confidence steady dynamic (compensation)\n"); | ||
706 | for (i = 0; i < MAX_TARGET_RATIO; i++) { | ||
707 | seq_printf(m, "%d\t%lu\t%lu\t%lu\n", | ||
708 | i, | ||
709 | cal_data[i].confidence, | ||
710 | cal_data[i].steady_comp, | ||
711 | cal_data[i].dynamic_comp); | ||
712 | } | ||
713 | |||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | static int powerclamp_debug_open(struct inode *inode, | ||
718 | struct file *file) | ||
719 | { | ||
720 | return single_open(file, powerclamp_debug_show, inode->i_private); | ||
721 | } | ||
722 | |||
723 | static const struct file_operations powerclamp_debug_fops = { | ||
724 | .open = powerclamp_debug_open, | ||
725 | .read = seq_read, | ||
726 | .llseek = seq_lseek, | ||
727 | .release = single_release, | ||
728 | .owner = THIS_MODULE, | ||
729 | }; | ||
730 | |||
731 | static inline void powerclamp_create_debug_files(void) | ||
732 | { | ||
733 | debug_dir = debugfs_create_dir("intel_powerclamp", NULL); | ||
734 | if (!debug_dir) | ||
735 | return; | ||
736 | |||
737 | if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir, | ||
738 | cal_data, &powerclamp_debug_fops)) | ||
739 | goto file_error; | ||
740 | |||
741 | return; | ||
742 | |||
743 | file_error: | ||
744 | debugfs_remove_recursive(debug_dir); | ||
745 | } | ||
746 | |||
747 | static int powerclamp_init(void) | ||
748 | { | ||
749 | int retval; | ||
750 | int bitmap_size; | ||
751 | |||
752 | bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long); | ||
753 | cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL); | ||
754 | if (!cpu_clamping_mask) | ||
755 | return -ENOMEM; | ||
756 | |||
757 | /* probe cpu features and ids here */ | ||
758 | retval = powerclamp_probe(); | ||
759 | if (retval) | ||
760 | return retval; | ||
761 | /* set default limit, maybe adjusted during runtime based on feedback */ | ||
762 | window_size = 2; | ||
763 | register_hotcpu_notifier(&powerclamp_cpu_notifier); | ||
764 | powerclamp_thread = alloc_percpu(struct task_struct *); | ||
765 | cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL, | ||
766 | &powerclamp_cooling_ops); | ||
767 | if (IS_ERR(cooling_dev)) | ||
768 | return -ENODEV; | ||
769 | |||
770 | if (!duration) | ||
771 | duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES); | ||
772 | powerclamp_create_debug_files(); | ||
773 | |||
774 | return 0; | ||
775 | } | ||
776 | module_init(powerclamp_init); | ||
777 | |||
778 | static void powerclamp_exit(void) | ||
779 | { | ||
780 | unregister_hotcpu_notifier(&powerclamp_cpu_notifier); | ||
781 | end_power_clamp(); | ||
782 | free_percpu(powerclamp_thread); | ||
783 | thermal_cooling_device_unregister(cooling_dev); | ||
784 | kfree(cpu_clamping_mask); | ||
785 | |||
786 | cancel_delayed_work_sync(&poll_pkg_cstate_work); | ||
787 | debugfs_remove_recursive(debug_dir); | ||
788 | } | ||
789 | module_exit(powerclamp_exit); | ||
790 | |||
791 | MODULE_LICENSE("GPL"); | ||
792 | MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>"); | ||
793 | MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>"); | ||
794 | MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs"); | ||
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c new file mode 100644 index 000000000000..65cb4f09e8f6 --- /dev/null +++ b/drivers/thermal/kirkwood_thermal.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * Kirkwood thermal sensor driver | ||
3 | * | ||
4 | * Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org> | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/thermal.h> | ||
24 | |||
25 | #define KIRKWOOD_THERMAL_VALID_OFFSET 9 | ||
26 | #define KIRKWOOD_THERMAL_VALID_MASK 0x1 | ||
27 | #define KIRKWOOD_THERMAL_TEMP_OFFSET 10 | ||
28 | #define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF | ||
29 | |||
30 | /* Kirkwood Thermal Sensor Dev Structure */ | ||
31 | struct kirkwood_thermal_priv { | ||
32 | void __iomem *sensor; | ||
33 | }; | ||
34 | |||
35 | static int kirkwood_get_temp(struct thermal_zone_device *thermal, | ||
36 | unsigned long *temp) | ||
37 | { | ||
38 | unsigned long reg; | ||
39 | struct kirkwood_thermal_priv *priv = thermal->devdata; | ||
40 | |||
41 | reg = readl_relaxed(priv->sensor); | ||
42 | |||
43 | /* Valid check */ | ||
44 | if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & | ||
45 | KIRKWOOD_THERMAL_VALID_MASK) { | ||
46 | dev_err(&thermal->device, | ||
47 | "Temperature sensor reading not valid\n"); | ||
48 | return -EIO; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Calculate temperature. See Section 8.10.1 of the 88AP510, | ||
53 | * datasheet, which has the same sensor. | ||
54 | * Documentation/arm/Marvell/README | ||
55 | */ | ||
56 | reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) & | ||
57 | KIRKWOOD_THERMAL_TEMP_MASK; | ||
58 | *temp = ((2281638UL - (7298*reg)) / 10); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static struct thermal_zone_device_ops ops = { | ||
64 | .get_temp = kirkwood_get_temp, | ||
65 | }; | ||
66 | |||
67 | static const struct of_device_id kirkwood_thermal_id_table[] = { | ||
68 | { .compatible = "marvell,kirkwood-thermal" }, | ||
69 | {} | ||
70 | }; | ||
71 | |||
72 | static int kirkwood_thermal_probe(struct platform_device *pdev) | ||
73 | { | ||
74 | struct thermal_zone_device *thermal = NULL; | ||
75 | struct kirkwood_thermal_priv *priv; | ||
76 | struct resource *res; | ||
77 | |||
78 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
79 | if (!res) { | ||
80 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
81 | return -ENODEV; | ||
82 | } | ||
83 | |||
84 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
85 | if (!priv) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | ||
89 | if (!priv->sensor) { | ||
90 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | ||
91 | return -EADDRNOTAVAIL; | ||
92 | } | ||
93 | |||
94 | thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, | ||
95 | priv, &ops, NULL, 0, 0); | ||
96 | if (IS_ERR(thermal)) { | ||
97 | dev_err(&pdev->dev, | ||
98 | "Failed to register thermal zone device\n"); | ||
99 | return PTR_ERR(thermal); | ||
100 | } | ||
101 | |||
102 | platform_set_drvdata(pdev, thermal); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int kirkwood_thermal_exit(struct platform_device *pdev) | ||
108 | { | ||
109 | struct thermal_zone_device *kirkwood_thermal = | ||
110 | platform_get_drvdata(pdev); | ||
111 | |||
112 | thermal_zone_device_unregister(kirkwood_thermal); | ||
113 | platform_set_drvdata(pdev, NULL); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table); | ||
119 | |||
120 | static struct platform_driver kirkwood_thermal_driver = { | ||
121 | .probe = kirkwood_thermal_probe, | ||
122 | .remove = kirkwood_thermal_exit, | ||
123 | .driver = { | ||
124 | .name = "kirkwood_thermal", | ||
125 | .owner = THIS_MODULE, | ||
126 | .of_match_table = of_match_ptr(kirkwood_thermal_id_table), | ||
127 | }, | ||
128 | }; | ||
129 | |||
130 | module_platform_driver(kirkwood_thermal_driver); | ||
131 | |||
132 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>"); | ||
133 | MODULE_DESCRIPTION("kirkwood thermal driver"); | ||
134 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 90db951725da..28f091994013 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -19,225 +19,473 @@ | |||
19 | */ | 19 | */ |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/irq.h> | ||
23 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | 24 | #include <linux/io.h> |
23 | #include <linux/module.h> | 25 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/reboot.h> | ||
25 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
27 | #include <linux/thermal.h> | 30 | #include <linux/thermal.h> |
28 | 31 | ||
29 | #define THSCR 0x2c | 32 | #define IDLE_INTERVAL 5000 |
30 | #define THSSR 0x30 | 33 | |
34 | #define COMMON_STR 0x00 | ||
35 | #define COMMON_ENR 0x04 | ||
36 | #define COMMON_INTMSK 0x0c | ||
37 | |||
38 | #define REG_POSNEG 0x20 | ||
39 | #define REG_FILONOFF 0x28 | ||
40 | #define REG_THSCR 0x2c | ||
41 | #define REG_THSSR 0x30 | ||
42 | #define REG_INTCTRL 0x34 | ||
31 | 43 | ||
32 | /* THSCR */ | 44 | /* THSCR */ |
33 | #define CPTAP 0xf | 45 | #define CPCTL (1 << 12) |
34 | 46 | ||
35 | /* THSSR */ | 47 | /* THSSR */ |
36 | #define CTEMP 0x3f | 48 | #define CTEMP 0x3f |
37 | 49 | ||
38 | 50 | struct rcar_thermal_common { | |
39 | struct rcar_thermal_priv { | ||
40 | void __iomem *base; | 51 | void __iomem *base; |
41 | struct device *dev; | 52 | struct device *dev; |
53 | struct list_head head; | ||
42 | spinlock_t lock; | 54 | spinlock_t lock; |
43 | u32 comp; | ||
44 | }; | 55 | }; |
45 | 56 | ||
57 | struct rcar_thermal_priv { | ||
58 | void __iomem *base; | ||
59 | struct rcar_thermal_common *common; | ||
60 | struct thermal_zone_device *zone; | ||
61 | struct delayed_work work; | ||
62 | struct mutex lock; | ||
63 | struct list_head list; | ||
64 | int id; | ||
65 | int ctemp; | ||
66 | }; | ||
67 | |||
68 | #define rcar_thermal_for_each_priv(pos, common) \ | ||
69 | list_for_each_entry(pos, &common->head, list) | ||
70 | |||
46 | #define MCELSIUS(temp) ((temp) * 1000) | 71 | #define MCELSIUS(temp) ((temp) * 1000) |
47 | #define rcar_zone_to_priv(zone) (zone->devdata) | 72 | #define rcar_zone_to_priv(zone) ((zone)->devdata) |
73 | #define rcar_priv_to_dev(priv) ((priv)->common->dev) | ||
74 | #define rcar_has_irq_support(priv) ((priv)->common->base) | ||
75 | #define rcar_id_to_shift(priv) ((priv)->id * 8) | ||
76 | |||
77 | #ifdef DEBUG | ||
78 | # define rcar_force_update_temp(priv) 1 | ||
79 | #else | ||
80 | # define rcar_force_update_temp(priv) 0 | ||
81 | #endif | ||
48 | 82 | ||
49 | /* | 83 | /* |
50 | * basic functions | 84 | * basic functions |
51 | */ | 85 | */ |
52 | static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg) | 86 | #define rcar_thermal_common_read(c, r) \ |
87 | _rcar_thermal_common_read(c, COMMON_ ##r) | ||
88 | static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common, | ||
89 | u32 reg) | ||
53 | { | 90 | { |
54 | unsigned long flags; | 91 | return ioread32(common->base + reg); |
55 | u32 ret; | 92 | } |
56 | |||
57 | spin_lock_irqsave(&priv->lock, flags); | ||
58 | 93 | ||
59 | ret = ioread32(priv->base + reg); | 94 | #define rcar_thermal_common_write(c, r, d) \ |
95 | _rcar_thermal_common_write(c, COMMON_ ##r, d) | ||
96 | static void _rcar_thermal_common_write(struct rcar_thermal_common *common, | ||
97 | u32 reg, u32 data) | ||
98 | { | ||
99 | iowrite32(data, common->base + reg); | ||
100 | } | ||
60 | 101 | ||
61 | spin_unlock_irqrestore(&priv->lock, flags); | 102 | #define rcar_thermal_common_bset(c, r, m, d) \ |
103 | _rcar_thermal_common_bset(c, COMMON_ ##r, m, d) | ||
104 | static void _rcar_thermal_common_bset(struct rcar_thermal_common *common, | ||
105 | u32 reg, u32 mask, u32 data) | ||
106 | { | ||
107 | u32 val; | ||
62 | 108 | ||
63 | return ret; | 109 | val = ioread32(common->base + reg); |
110 | val &= ~mask; | ||
111 | val |= (data & mask); | ||
112 | iowrite32(val, common->base + reg); | ||
64 | } | 113 | } |
65 | 114 | ||
66 | #if 0 /* no user at this point */ | 115 | #define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r) |
67 | static void rcar_thermal_write(struct rcar_thermal_priv *priv, | 116 | static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg) |
68 | u32 reg, u32 data) | ||
69 | { | 117 | { |
70 | unsigned long flags; | 118 | return ioread32(priv->base + reg); |
71 | 119 | } | |
72 | spin_lock_irqsave(&priv->lock, flags); | ||
73 | 120 | ||
121 | #define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d) | ||
122 | static void _rcar_thermal_write(struct rcar_thermal_priv *priv, | ||
123 | u32 reg, u32 data) | ||
124 | { | ||
74 | iowrite32(data, priv->base + reg); | 125 | iowrite32(data, priv->base + reg); |
75 | |||
76 | spin_unlock_irqrestore(&priv->lock, flags); | ||
77 | } | 126 | } |
78 | #endif | ||
79 | 127 | ||
80 | static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, | 128 | #define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d) |
81 | u32 mask, u32 data) | 129 | static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, |
130 | u32 mask, u32 data) | ||
82 | { | 131 | { |
83 | unsigned long flags; | ||
84 | u32 val; | 132 | u32 val; |
85 | 133 | ||
86 | spin_lock_irqsave(&priv->lock, flags); | ||
87 | |||
88 | val = ioread32(priv->base + reg); | 134 | val = ioread32(priv->base + reg); |
89 | val &= ~mask; | 135 | val &= ~mask; |
90 | val |= (data & mask); | 136 | val |= (data & mask); |
91 | iowrite32(val, priv->base + reg); | 137 | iowrite32(val, priv->base + reg); |
92 | |||
93 | spin_unlock_irqrestore(&priv->lock, flags); | ||
94 | } | 138 | } |
95 | 139 | ||
96 | /* | 140 | /* |
97 | * zone device functions | 141 | * zone device functions |
98 | */ | 142 | */ |
99 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, | 143 | static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) |
100 | unsigned long *temp) | ||
101 | { | 144 | { |
102 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | 145 | struct device *dev = rcar_priv_to_dev(priv); |
103 | int val, min, max, tmp; | 146 | int i; |
104 | 147 | int ctemp, old, new; | |
105 | tmp = -200; /* default */ | ||
106 | while (1) { | ||
107 | if (priv->comp < 1 || priv->comp > 12) { | ||
108 | dev_err(priv->dev, | ||
109 | "THSSR invalid data (%d)\n", priv->comp); | ||
110 | priv->comp = 4; /* for next thermal */ | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | 148 | ||
114 | /* | 149 | mutex_lock(&priv->lock); |
115 | * THS comparator offset and the reference temperature | ||
116 | * | ||
117 | * Comparator | reference | Temperature field | ||
118 | * offset | temperature | measurement | ||
119 | * | (degrees C) | (degrees C) | ||
120 | * -------------+---------------+------------------- | ||
121 | * 1 | -45 | -45 to -30 | ||
122 | * 2 | -30 | -30 to -15 | ||
123 | * 3 | -15 | -15 to 0 | ||
124 | * 4 | 0 | 0 to +15 | ||
125 | * 5 | +15 | +15 to +30 | ||
126 | * 6 | +30 | +30 to +45 | ||
127 | * 7 | +45 | +45 to +60 | ||
128 | * 8 | +60 | +60 to +75 | ||
129 | * 9 | +75 | +75 to +90 | ||
130 | * 10 | +90 | +90 to +105 | ||
131 | * 11 | +105 | +105 to +120 | ||
132 | * 12 | +120 | +120 to +135 | ||
133 | */ | ||
134 | 150 | ||
135 | /* calculate thermal limitation */ | 151 | /* |
136 | min = (priv->comp * 15) - 60; | 152 | * TSC decides a value of CPTAP automatically, |
137 | max = min + 15; | 153 | * and this is the conditions which validate interrupt. |
154 | */ | ||
155 | rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL); | ||
138 | 156 | ||
157 | ctemp = 0; | ||
158 | old = ~0; | ||
159 | for (i = 0; i < 128; i++) { | ||
139 | /* | 160 | /* |
140 | * we need to wait 300us after changing comparator offset | 161 | * we need to wait 300us after changing comparator offset |
141 | * to get stable temperature. | 162 | * to get stable temperature. |
142 | * see "Usage Notes" on datasheet | 163 | * see "Usage Notes" on datasheet |
143 | */ | 164 | */ |
144 | rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp); | ||
145 | udelay(300); | 165 | udelay(300); |
146 | 166 | ||
147 | /* calculate current temperature */ | 167 | new = rcar_thermal_read(priv, THSSR) & CTEMP; |
148 | val = rcar_thermal_read(priv, THSSR) & CTEMP; | 168 | if (new == old) { |
149 | val = (val * 5) - 65; | 169 | ctemp = new; |
170 | break; | ||
171 | } | ||
172 | old = new; | ||
173 | } | ||
150 | 174 | ||
151 | dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n", | 175 | if (!ctemp) { |
152 | priv->comp, min, max, val); | 176 | dev_err(dev, "thermal sensor was broken\n"); |
177 | return -EINVAL; | ||
178 | } | ||
153 | 179 | ||
154 | /* | 180 | /* |
155 | * If val is same as min/max, then, | 181 | * enable IRQ |
156 | * it should try again on next comparator. | 182 | */ |
157 | * But the val might be correct temperature. | 183 | if (rcar_has_irq_support(priv)) { |
158 | * Keep it on "tmp" and compare with next val. | 184 | rcar_thermal_write(priv, FILONOFF, 0); |
159 | */ | ||
160 | if (tmp == val) | ||
161 | break; | ||
162 | 185 | ||
163 | if (val <= min) { | 186 | /* enable Rising/Falling edge interrupt */ |
164 | tmp = min; | 187 | rcar_thermal_write(priv, POSNEG, 0x1); |
165 | priv->comp--; /* try again */ | 188 | rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) | |
166 | } else if (val >= max) { | 189 | ((ctemp - 1) << 0))); |
167 | tmp = max; | 190 | } |
168 | priv->comp++; /* try again */ | 191 | |
169 | } else { | 192 | dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); |
170 | tmp = val; | 193 | |
171 | break; | 194 | priv->ctemp = ctemp; |
172 | } | 195 | |
196 | mutex_unlock(&priv->lock); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, | ||
202 | unsigned long *temp) | ||
203 | { | ||
204 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
205 | |||
206 | if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv)) | ||
207 | rcar_thermal_update_temp(priv); | ||
208 | |||
209 | mutex_lock(&priv->lock); | ||
210 | *temp = MCELSIUS((priv->ctemp * 5) - 65); | ||
211 | mutex_unlock(&priv->lock); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, | ||
217 | int trip, enum thermal_trip_type *type) | ||
218 | { | ||
219 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
220 | struct device *dev = rcar_priv_to_dev(priv); | ||
221 | |||
222 | /* see rcar_thermal_get_temp() */ | ||
223 | switch (trip) { | ||
224 | case 0: /* +90 <= temp */ | ||
225 | *type = THERMAL_TRIP_CRITICAL; | ||
226 | break; | ||
227 | default: | ||
228 | dev_err(dev, "rcar driver trip error\n"); | ||
229 | return -EINVAL; | ||
230 | } | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone, | ||
236 | int trip, unsigned long *temp) | ||
237 | { | ||
238 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
239 | struct device *dev = rcar_priv_to_dev(priv); | ||
240 | |||
241 | /* see rcar_thermal_get_temp() */ | ||
242 | switch (trip) { | ||
243 | case 0: /* +90 <= temp */ | ||
244 | *temp = MCELSIUS(90); | ||
245 | break; | ||
246 | default: | ||
247 | dev_err(dev, "rcar driver trip error\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static int rcar_thermal_notify(struct thermal_zone_device *zone, | ||
255 | int trip, enum thermal_trip_type type) | ||
256 | { | ||
257 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
258 | struct device *dev = rcar_priv_to_dev(priv); | ||
259 | |||
260 | switch (type) { | ||
261 | case THERMAL_TRIP_CRITICAL: | ||
262 | /* FIXME */ | ||
263 | dev_warn(dev, "Thermal reached to critical temperature\n"); | ||
264 | break; | ||
265 | default: | ||
266 | break; | ||
173 | } | 267 | } |
174 | 268 | ||
175 | *temp = MCELSIUS(tmp); | ||
176 | return 0; | 269 | return 0; |
177 | } | 270 | } |
178 | 271 | ||
179 | static struct thermal_zone_device_ops rcar_thermal_zone_ops = { | 272 | static struct thermal_zone_device_ops rcar_thermal_zone_ops = { |
180 | .get_temp = rcar_thermal_get_temp, | 273 | .get_temp = rcar_thermal_get_temp, |
274 | .get_trip_type = rcar_thermal_get_trip_type, | ||
275 | .get_trip_temp = rcar_thermal_get_trip_temp, | ||
276 | .notify = rcar_thermal_notify, | ||
181 | }; | 277 | }; |
182 | 278 | ||
183 | /* | 279 | /* |
184 | * platform functions | 280 | * interrupt |
185 | */ | 281 | */ |
186 | static int rcar_thermal_probe(struct platform_device *pdev) | 282 | #define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1) |
283 | #define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0) | ||
284 | static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable) | ||
285 | { | ||
286 | struct rcar_thermal_common *common = priv->common; | ||
287 | unsigned long flags; | ||
288 | u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */ | ||
289 | |||
290 | spin_lock_irqsave(&common->lock, flags); | ||
291 | |||
292 | rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask); | ||
293 | |||
294 | spin_unlock_irqrestore(&common->lock, flags); | ||
295 | } | ||
296 | |||
297 | static void rcar_thermal_work(struct work_struct *work) | ||
187 | { | 298 | { |
188 | struct thermal_zone_device *zone; | ||
189 | struct rcar_thermal_priv *priv; | 299 | struct rcar_thermal_priv *priv; |
190 | struct resource *res; | ||
191 | 300 | ||
192 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 301 | priv = container_of(work, struct rcar_thermal_priv, work.work); |
193 | if (!res) { | 302 | |
194 | dev_err(&pdev->dev, "Could not get platform resource\n"); | 303 | rcar_thermal_update_temp(priv); |
195 | return -ENODEV; | 304 | rcar_thermal_irq_enable(priv); |
305 | thermal_zone_device_update(priv->zone); | ||
306 | } | ||
307 | |||
308 | static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status) | ||
309 | { | ||
310 | struct device *dev = rcar_priv_to_dev(priv); | ||
311 | |||
312 | status = (status >> rcar_id_to_shift(priv)) & 0x3; | ||
313 | |||
314 | if (status & 0x3) { | ||
315 | dev_dbg(dev, "thermal%d %s%s\n", | ||
316 | priv->id, | ||
317 | (status & 0x2) ? "Rising " : "", | ||
318 | (status & 0x1) ? "Falling" : ""); | ||
196 | } | 319 | } |
197 | 320 | ||
198 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 321 | return status; |
199 | if (!priv) { | 322 | } |
200 | dev_err(&pdev->dev, "Could not allocate priv\n"); | 323 | |
201 | return -ENOMEM; | 324 | static irqreturn_t rcar_thermal_irq(int irq, void *data) |
325 | { | ||
326 | struct rcar_thermal_common *common = data; | ||
327 | struct rcar_thermal_priv *priv; | ||
328 | unsigned long flags; | ||
329 | u32 status, mask; | ||
330 | |||
331 | spin_lock_irqsave(&common->lock, flags); | ||
332 | |||
333 | mask = rcar_thermal_common_read(common, INTMSK); | ||
334 | status = rcar_thermal_common_read(common, STR); | ||
335 | rcar_thermal_common_write(common, STR, 0x000F0F0F & mask); | ||
336 | |||
337 | spin_unlock_irqrestore(&common->lock, flags); | ||
338 | |||
339 | status = status & ~mask; | ||
340 | |||
341 | /* | ||
342 | * check the status | ||
343 | */ | ||
344 | rcar_thermal_for_each_priv(priv, common) { | ||
345 | if (rcar_thermal_had_changed(priv, status)) { | ||
346 | rcar_thermal_irq_disable(priv); | ||
347 | schedule_delayed_work(&priv->work, | ||
348 | msecs_to_jiffies(300)); | ||
349 | } | ||
202 | } | 350 | } |
203 | 351 | ||
204 | priv->comp = 4; /* basic setup */ | 352 | return IRQ_HANDLED; |
205 | priv->dev = &pdev->dev; | 353 | } |
206 | spin_lock_init(&priv->lock); | 354 | |
207 | priv->base = devm_ioremap_nocache(&pdev->dev, | 355 | /* |
208 | res->start, resource_size(res)); | 356 | * platform functions |
209 | if (!priv->base) { | 357 | */ |
210 | dev_err(&pdev->dev, "Unable to ioremap thermal register\n"); | 358 | static int rcar_thermal_probe(struct platform_device *pdev) |
359 | { | ||
360 | struct rcar_thermal_common *common; | ||
361 | struct rcar_thermal_priv *priv; | ||
362 | struct device *dev = &pdev->dev; | ||
363 | struct resource *res, *irq; | ||
364 | int mres = 0; | ||
365 | int i; | ||
366 | int idle = IDLE_INTERVAL; | ||
367 | |||
368 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); | ||
369 | if (!common) { | ||
370 | dev_err(dev, "Could not allocate common\n"); | ||
211 | return -ENOMEM; | 371 | return -ENOMEM; |
212 | } | 372 | } |
213 | 373 | ||
214 | zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv, | 374 | INIT_LIST_HEAD(&common->head); |
215 | &rcar_thermal_zone_ops, NULL, 0, 0); | 375 | spin_lock_init(&common->lock); |
216 | if (IS_ERR(zone)) { | 376 | common->dev = dev; |
217 | dev_err(&pdev->dev, "thermal zone device is NULL\n"); | 377 | |
218 | return PTR_ERR(zone); | 378 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
379 | if (irq) { | ||
380 | int ret; | ||
381 | |||
382 | /* | ||
383 | * platform has IRQ support. | ||
384 | * Then, drier use common register | ||
385 | */ | ||
386 | res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); | ||
387 | if (!res) { | ||
388 | dev_err(dev, "Could not get platform resource\n"); | ||
389 | return -ENODEV; | ||
390 | } | ||
391 | |||
392 | ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, | ||
393 | dev_name(dev), common); | ||
394 | if (ret) { | ||
395 | dev_err(dev, "irq request failed\n "); | ||
396 | return ret; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * rcar_has_irq_support() will be enabled | ||
401 | */ | ||
402 | common->base = devm_request_and_ioremap(dev, res); | ||
403 | if (!common->base) { | ||
404 | dev_err(dev, "Unable to ioremap thermal register\n"); | ||
405 | return -ENOMEM; | ||
406 | } | ||
407 | |||
408 | /* enable temperature comparation */ | ||
409 | rcar_thermal_common_write(common, ENR, 0x00030303); | ||
410 | |||
411 | idle = 0; /* polling delaye is not needed */ | ||
219 | } | 412 | } |
220 | 413 | ||
221 | platform_set_drvdata(pdev, zone); | 414 | for (i = 0;; i++) { |
415 | res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); | ||
416 | if (!res) | ||
417 | break; | ||
418 | |||
419 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
420 | if (!priv) { | ||
421 | dev_err(dev, "Could not allocate priv\n"); | ||
422 | return -ENOMEM; | ||
423 | } | ||
424 | |||
425 | priv->base = devm_request_and_ioremap(dev, res); | ||
426 | if (!priv->base) { | ||
427 | dev_err(dev, "Unable to ioremap priv register\n"); | ||
428 | return -ENOMEM; | ||
429 | } | ||
222 | 430 | ||
223 | dev_info(&pdev->dev, "proved\n"); | 431 | priv->common = common; |
432 | priv->id = i; | ||
433 | mutex_init(&priv->lock); | ||
434 | INIT_LIST_HEAD(&priv->list); | ||
435 | INIT_DELAYED_WORK(&priv->work, rcar_thermal_work); | ||
436 | rcar_thermal_update_temp(priv); | ||
437 | |||
438 | priv->zone = thermal_zone_device_register("rcar_thermal", | ||
439 | 1, 0, priv, | ||
440 | &rcar_thermal_zone_ops, NULL, 0, | ||
441 | idle); | ||
442 | if (IS_ERR(priv->zone)) { | ||
443 | dev_err(dev, "can't register thermal zone\n"); | ||
444 | goto error_unregister; | ||
445 | } | ||
446 | |||
447 | list_move_tail(&priv->list, &common->head); | ||
448 | |||
449 | if (rcar_has_irq_support(priv)) | ||
450 | rcar_thermal_irq_enable(priv); | ||
451 | } | ||
452 | |||
453 | platform_set_drvdata(pdev, common); | ||
454 | |||
455 | dev_info(dev, "%d sensor proved\n", i); | ||
224 | 456 | ||
225 | return 0; | 457 | return 0; |
458 | |||
459 | error_unregister: | ||
460 | rcar_thermal_for_each_priv(priv, common) | ||
461 | thermal_zone_device_unregister(priv->zone); | ||
462 | |||
463 | return -ENODEV; | ||
226 | } | 464 | } |
227 | 465 | ||
228 | static int rcar_thermal_remove(struct platform_device *pdev) | 466 | static int rcar_thermal_remove(struct platform_device *pdev) |
229 | { | 467 | { |
230 | struct thermal_zone_device *zone = platform_get_drvdata(pdev); | 468 | struct rcar_thermal_common *common = platform_get_drvdata(pdev); |
469 | struct rcar_thermal_priv *priv; | ||
470 | |||
471 | rcar_thermal_for_each_priv(priv, common) | ||
472 | thermal_zone_device_unregister(priv->zone); | ||
231 | 473 | ||
232 | thermal_zone_device_unregister(zone); | ||
233 | platform_set_drvdata(pdev, NULL); | 474 | platform_set_drvdata(pdev, NULL); |
234 | 475 | ||
235 | return 0; | 476 | return 0; |
236 | } | 477 | } |
237 | 478 | ||
479 | static const struct of_device_id rcar_thermal_dt_ids[] = { | ||
480 | { .compatible = "renesas,rcar-thermal", }, | ||
481 | {}, | ||
482 | }; | ||
483 | MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); | ||
484 | |||
238 | static struct platform_driver rcar_thermal_driver = { | 485 | static struct platform_driver rcar_thermal_driver = { |
239 | .driver = { | 486 | .driver = { |
240 | .name = "rcar_thermal", | 487 | .name = "rcar_thermal", |
488 | .of_match_table = rcar_thermal_dt_ids, | ||
241 | }, | 489 | }, |
242 | .probe = rcar_thermal_probe, | 490 | .probe = rcar_thermal_probe, |
243 | .remove = rcar_thermal_remove, | 491 | .remove = rcar_thermal_remove, |
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 6b2d8b21aaee..3c5ee5607977 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c | |||
@@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev) | |||
131 | return -ENOMEM; | 131 | return -ENOMEM; |
132 | } | 132 | } |
133 | 133 | ||
134 | stdev->clk = clk_get(&pdev->dev, NULL); | 134 | stdev->clk = devm_clk_get(&pdev->dev, NULL); |
135 | if (IS_ERR(stdev->clk)) { | 135 | if (IS_ERR(stdev->clk)) { |
136 | dev_err(&pdev->dev, "Can't get clock\n"); | 136 | dev_err(&pdev->dev, "Can't get clock\n"); |
137 | return PTR_ERR(stdev->clk); | 137 | return PTR_ERR(stdev->clk); |
@@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev) | |||
140 | ret = clk_enable(stdev->clk); | 140 | ret = clk_enable(stdev->clk); |
141 | if (ret) { | 141 | if (ret) { |
142 | dev_err(&pdev->dev, "Can't enable clock\n"); | 142 | dev_err(&pdev->dev, "Can't enable clock\n"); |
143 | goto put_clk; | 143 | return ret; |
144 | } | 144 | } |
145 | 145 | ||
146 | stdev->flags = val; | 146 | stdev->flags = val; |
@@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev) | |||
163 | 163 | ||
164 | disable_clk: | 164 | disable_clk: |
165 | clk_disable(stdev->clk); | 165 | clk_disable(stdev->clk); |
166 | put_clk: | ||
167 | clk_put(stdev->clk); | ||
168 | 166 | ||
169 | return ret; | 167 | return ret; |
170 | } | 168 | } |
@@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev) | |||
183 | writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); | 181 | writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); |
184 | 182 | ||
185 | clk_disable(stdev->clk); | 183 | clk_disable(stdev->clk); |
186 | clk_put(stdev->clk); | ||
187 | 184 | ||
188 | return 0; | 185 | return 0; |
189 | } | 186 | } |
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index 0cd5e9fbab1c..407cde3211c1 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c | |||
@@ -35,21 +35,54 @@ | |||
35 | * state for this trip point | 35 | * state for this trip point |
36 | * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling | 36 | * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling |
37 | * state for this trip point | 37 | * state for this trip point |
38 | * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit | ||
39 | * for this trip point | ||
40 | * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit | ||
41 | * for this trip point | ||
42 | * If the temperature is lower than a trip point, | ||
43 | * a. if the trend is THERMAL_TREND_RAISING, do nothing | ||
44 | * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling | ||
45 | * state for this trip point, if the cooling state already | ||
46 | * equals lower limit, deactivate the thermal instance | ||
47 | * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing | ||
48 | * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit, | ||
49 | * if the cooling state already equals lower limit, | ||
50 | * deactive the thermal instance | ||
38 | */ | 51 | */ |
39 | static unsigned long get_target_state(struct thermal_instance *instance, | 52 | static unsigned long get_target_state(struct thermal_instance *instance, |
40 | enum thermal_trend trend) | 53 | enum thermal_trend trend, bool throttle) |
41 | { | 54 | { |
42 | struct thermal_cooling_device *cdev = instance->cdev; | 55 | struct thermal_cooling_device *cdev = instance->cdev; |
43 | unsigned long cur_state; | 56 | unsigned long cur_state; |
44 | 57 | ||
45 | cdev->ops->get_cur_state(cdev, &cur_state); | 58 | cdev->ops->get_cur_state(cdev, &cur_state); |
46 | 59 | ||
47 | if (trend == THERMAL_TREND_RAISING) { | 60 | switch (trend) { |
48 | cur_state = cur_state < instance->upper ? | 61 | case THERMAL_TREND_RAISING: |
49 | (cur_state + 1) : instance->upper; | 62 | if (throttle) |
50 | } else if (trend == THERMAL_TREND_DROPPING) { | 63 | cur_state = cur_state < instance->upper ? |
51 | cur_state = cur_state > instance->lower ? | 64 | (cur_state + 1) : instance->upper; |
52 | (cur_state - 1) : instance->lower; | 65 | break; |
66 | case THERMAL_TREND_RAISE_FULL: | ||
67 | if (throttle) | ||
68 | cur_state = instance->upper; | ||
69 | break; | ||
70 | case THERMAL_TREND_DROPPING: | ||
71 | if (cur_state == instance->lower) { | ||
72 | if (!throttle) | ||
73 | cur_state = -1; | ||
74 | } else | ||
75 | cur_state -= 1; | ||
76 | break; | ||
77 | case THERMAL_TREND_DROP_FULL: | ||
78 | if (cur_state == instance->lower) { | ||
79 | if (!throttle) | ||
80 | cur_state = -1; | ||
81 | } else | ||
82 | cur_state = instance->lower; | ||
83 | break; | ||
84 | default: | ||
85 | break; | ||
53 | } | 86 | } |
54 | 87 | ||
55 | return cur_state; | 88 | return cur_state; |
@@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz, | |||
66 | tz->passive += value; | 99 | tz->passive += value; |
67 | } | 100 | } |
68 | 101 | ||
69 | static void update_instance_for_throttle(struct thermal_zone_device *tz, | ||
70 | int trip, enum thermal_trip_type trip_type, | ||
71 | enum thermal_trend trend) | ||
72 | { | ||
73 | struct thermal_instance *instance; | ||
74 | |||
75 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { | ||
76 | if (instance->trip != trip) | ||
77 | continue; | ||
78 | |||
79 | instance->target = get_target_state(instance, trend); | ||
80 | |||
81 | /* Activate a passive thermal instance */ | ||
82 | if (instance->target == THERMAL_NO_TARGET) | ||
83 | update_passive_instance(tz, trip_type, 1); | ||
84 | |||
85 | instance->cdev->updated = false; /* cdev needs update */ | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static void update_instance_for_dethrottle(struct thermal_zone_device *tz, | ||
90 | int trip, enum thermal_trip_type trip_type) | ||
91 | { | ||
92 | struct thermal_instance *instance; | ||
93 | struct thermal_cooling_device *cdev; | ||
94 | unsigned long cur_state; | ||
95 | |||
96 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { | ||
97 | if (instance->trip != trip || | ||
98 | instance->target == THERMAL_NO_TARGET) | ||
99 | continue; | ||
100 | |||
101 | cdev = instance->cdev; | ||
102 | cdev->ops->get_cur_state(cdev, &cur_state); | ||
103 | |||
104 | instance->target = cur_state > instance->lower ? | ||
105 | (cur_state - 1) : THERMAL_NO_TARGET; | ||
106 | |||
107 | /* Deactivate a passive thermal instance */ | ||
108 | if (instance->target == THERMAL_NO_TARGET) | ||
109 | update_passive_instance(tz, trip_type, -1); | ||
110 | |||
111 | cdev->updated = false; /* cdev needs update */ | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) | 102 | static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) |
116 | { | 103 | { |
117 | long trip_temp; | 104 | long trip_temp; |
118 | enum thermal_trip_type trip_type; | 105 | enum thermal_trip_type trip_type; |
119 | enum thermal_trend trend; | 106 | enum thermal_trend trend; |
107 | struct thermal_instance *instance; | ||
108 | bool throttle = false; | ||
109 | int old_target; | ||
120 | 110 | ||
121 | if (trip == THERMAL_TRIPS_NONE) { | 111 | if (trip == THERMAL_TRIPS_NONE) { |
122 | trip_temp = tz->forced_passive; | 112 | trip_temp = tz->forced_passive; |
@@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) | |||
128 | 118 | ||
129 | trend = get_tz_trend(tz, trip); | 119 | trend = get_tz_trend(tz, trip); |
130 | 120 | ||
121 | if (tz->temperature >= trip_temp) | ||
122 | throttle = true; | ||
123 | |||
131 | mutex_lock(&tz->lock); | 124 | mutex_lock(&tz->lock); |
132 | 125 | ||
133 | if (tz->temperature >= trip_temp) | 126 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { |
134 | update_instance_for_throttle(tz, trip, trip_type, trend); | 127 | if (instance->trip != trip) |
135 | else | 128 | continue; |
136 | update_instance_for_dethrottle(tz, trip, trip_type); | 129 | |
130 | old_target = instance->target; | ||
131 | instance->target = get_target_state(instance, trend, throttle); | ||
132 | |||
133 | /* Activate a passive thermal instance */ | ||
134 | if (old_target == THERMAL_NO_TARGET && | ||
135 | instance->target != THERMAL_NO_TARGET) | ||
136 | update_passive_instance(tz, trip_type, 1); | ||
137 | /* Deactivate a passive thermal instance */ | ||
138 | else if (old_target != THERMAL_NO_TARGET && | ||
139 | instance->target == THERMAL_NO_TARGET) | ||
140 | update_passive_instance(tz, trip_type, -1); | ||
141 | |||
142 | |||
143 | instance->cdev->updated = false; /* cdev needs update */ | ||
144 | } | ||
137 | 145 | ||
138 | mutex_unlock(&tz->lock); | 146 | mutex_unlock(&tz->lock); |
139 | } | 147 | } |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 84e95f32cdb6..5b7863a03f98 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/kdev_t.h> | 32 | #include <linux/kdev_t.h> |
33 | #include <linux/idr.h> | 33 | #include <linux/idr.h> |
34 | #include <linux/thermal.h> | 34 | #include <linux/thermal.h> |
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/reboot.h> | 35 | #include <linux/reboot.h> |
37 | #include <net/netlink.h> | 36 | #include <net/netlink.h> |
38 | #include <net/genetlink.h> | 37 | #include <net/genetlink.h> |
@@ -348,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz, | |||
348 | tz->ops->notify(tz, trip, trip_type); | 347 | tz->ops->notify(tz, trip, trip_type); |
349 | 348 | ||
350 | if (trip_type == THERMAL_TRIP_CRITICAL) { | 349 | if (trip_type == THERMAL_TRIP_CRITICAL) { |
351 | pr_emerg("Critical temperature reached(%d C),shutting down\n", | 350 | dev_emerg(&tz->device, |
352 | tz->temperature / 1000); | 351 | "critical temperature reached(%d C),shutting down\n", |
352 | tz->temperature / 1000); | ||
353 | orderly_poweroff(true); | 353 | orderly_poweroff(true); |
354 | } | 354 | } |
355 | } | 355 | } |
@@ -371,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) | |||
371 | monitor_thermal_zone(tz); | 371 | monitor_thermal_zone(tz); |
372 | } | 372 | } |
373 | 373 | ||
374 | static int thermal_zone_get_temp(struct thermal_zone_device *tz, | ||
375 | unsigned long *temp) | ||
376 | { | ||
377 | int ret = 0; | ||
378 | #ifdef CONFIG_THERMAL_EMULATION | ||
379 | int count; | ||
380 | unsigned long crit_temp = -1UL; | ||
381 | enum thermal_trip_type type; | ||
382 | #endif | ||
383 | |||
384 | mutex_lock(&tz->lock); | ||
385 | |||
386 | ret = tz->ops->get_temp(tz, temp); | ||
387 | #ifdef CONFIG_THERMAL_EMULATION | ||
388 | if (!tz->emul_temperature) | ||
389 | goto skip_emul; | ||
390 | |||
391 | for (count = 0; count < tz->trips; count++) { | ||
392 | ret = tz->ops->get_trip_type(tz, count, &type); | ||
393 | if (!ret && type == THERMAL_TRIP_CRITICAL) { | ||
394 | ret = tz->ops->get_trip_temp(tz, count, &crit_temp); | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | if (ret) | ||
400 | goto skip_emul; | ||
401 | |||
402 | if (*temp < crit_temp) | ||
403 | *temp = tz->emul_temperature; | ||
404 | skip_emul: | ||
405 | #endif | ||
406 | mutex_unlock(&tz->lock); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
374 | static void update_temperature(struct thermal_zone_device *tz) | 410 | static void update_temperature(struct thermal_zone_device *tz) |
375 | { | 411 | { |
376 | long temp; | 412 | long temp; |
377 | int ret; | 413 | int ret; |
378 | 414 | ||
379 | mutex_lock(&tz->lock); | 415 | ret = thermal_zone_get_temp(tz, &temp); |
380 | |||
381 | ret = tz->ops->get_temp(tz, &temp); | ||
382 | if (ret) { | 416 | if (ret) { |
383 | pr_warn("failed to read out thermal zone %d\n", tz->id); | 417 | dev_warn(&tz->device, "failed to read out thermal zone %d\n", |
384 | goto exit; | 418 | tz->id); |
419 | return; | ||
385 | } | 420 | } |
386 | 421 | ||
422 | mutex_lock(&tz->lock); | ||
387 | tz->last_temperature = tz->temperature; | 423 | tz->last_temperature = tz->temperature; |
388 | tz->temperature = temp; | 424 | tz->temperature = temp; |
389 | |||
390 | exit: | ||
391 | mutex_unlock(&tz->lock); | 425 | mutex_unlock(&tz->lock); |
392 | } | 426 | } |
393 | 427 | ||
@@ -430,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
430 | long temperature; | 464 | long temperature; |
431 | int ret; | 465 | int ret; |
432 | 466 | ||
433 | if (!tz->ops->get_temp) | 467 | ret = thermal_zone_get_temp(tz, &temperature); |
434 | return -EPERM; | ||
435 | |||
436 | ret = tz->ops->get_temp(tz, &temperature); | ||
437 | 468 | ||
438 | if (ret) | 469 | if (ret) |
439 | return ret; | 470 | return ret; |
@@ -693,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf) | |||
693 | return sprintf(buf, "%s\n", tz->governor->name); | 724 | return sprintf(buf, "%s\n", tz->governor->name); |
694 | } | 725 | } |
695 | 726 | ||
727 | #ifdef CONFIG_THERMAL_EMULATION | ||
728 | static ssize_t | ||
729 | emul_temp_store(struct device *dev, struct device_attribute *attr, | ||
730 | const char *buf, size_t count) | ||
731 | { | ||
732 | struct thermal_zone_device *tz = to_thermal_zone(dev); | ||
733 | int ret = 0; | ||
734 | unsigned long temperature; | ||
735 | |||
736 | if (kstrtoul(buf, 10, &temperature)) | ||
737 | return -EINVAL; | ||
738 | |||
739 | if (!tz->ops->set_emul_temp) { | ||
740 | mutex_lock(&tz->lock); | ||
741 | tz->emul_temperature = temperature; | ||
742 | mutex_unlock(&tz->lock); | ||
743 | } else { | ||
744 | ret = tz->ops->set_emul_temp(tz, temperature); | ||
745 | } | ||
746 | |||
747 | return ret ? ret : count; | ||
748 | } | ||
749 | static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); | ||
750 | #endif/*CONFIG_THERMAL_EMULATION*/ | ||
751 | |||
696 | static DEVICE_ATTR(type, 0444, type_show, NULL); | 752 | static DEVICE_ATTR(type, 0444, type_show, NULL); |
697 | static DEVICE_ATTR(temp, 0444, temp_show, NULL); | 753 | static DEVICE_ATTR(temp, 0444, temp_show, NULL); |
698 | static DEVICE_ATTR(mode, 0644, mode_show, mode_store); | 754 | static DEVICE_ATTR(mode, 0644, mode_show, mode_store); |
@@ -835,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
835 | temp_input); | 891 | temp_input); |
836 | struct thermal_zone_device *tz = temp->tz; | 892 | struct thermal_zone_device *tz = temp->tz; |
837 | 893 | ||
838 | ret = tz->ops->get_temp(tz, &temperature); | 894 | ret = thermal_zone_get_temp(tz, &temperature); |
839 | 895 | ||
840 | if (ret) | 896 | if (ret) |
841 | return ret; | 897 | return ret; |
@@ -1522,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
1522 | if (!ops || !ops->get_temp) | 1578 | if (!ops || !ops->get_temp) |
1523 | return ERR_PTR(-EINVAL); | 1579 | return ERR_PTR(-EINVAL); |
1524 | 1580 | ||
1581 | if (trips > 0 && !ops->get_trip_type) | ||
1582 | return ERR_PTR(-EINVAL); | ||
1583 | |||
1525 | tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); | 1584 | tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); |
1526 | if (!tz) | 1585 | if (!tz) |
1527 | return ERR_PTR(-ENOMEM); | 1586 | return ERR_PTR(-ENOMEM); |
@@ -1585,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
1585 | goto unregister; | 1644 | goto unregister; |
1586 | } | 1645 | } |
1587 | 1646 | ||
1647 | #ifdef CONFIG_THERMAL_EMULATION | ||
1648 | result = device_create_file(&tz->device, &dev_attr_emul_temp); | ||
1649 | if (result) | ||
1650 | goto unregister; | ||
1651 | #endif | ||
1588 | /* Create policy attribute */ | 1652 | /* Create policy attribute */ |
1589 | result = device_create_file(&tz->device, &dev_attr_policy); | 1653 | result = device_create_file(&tz->device, &dev_attr_policy); |
1590 | if (result) | 1654 | if (result) |
@@ -1704,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = { | |||
1704 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | 1768 | .name = THERMAL_GENL_MCAST_GROUP_NAME, |
1705 | }; | 1769 | }; |
1706 | 1770 | ||
1707 | int thermal_generate_netlink_event(u32 orig, enum events event) | 1771 | int thermal_generate_netlink_event(struct thermal_zone_device *tz, |
1772 | enum events event) | ||
1708 | { | 1773 | { |
1709 | struct sk_buff *skb; | 1774 | struct sk_buff *skb; |
1710 | struct nlattr *attr; | 1775 | struct nlattr *attr; |
@@ -1714,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event) | |||
1714 | int result; | 1779 | int result; |
1715 | static unsigned int thermal_event_seqnum; | 1780 | static unsigned int thermal_event_seqnum; |
1716 | 1781 | ||
1782 | if (!tz) | ||
1783 | return -EINVAL; | ||
1784 | |||
1717 | /* allocate memory */ | 1785 | /* allocate memory */ |
1718 | size = nla_total_size(sizeof(struct thermal_genl_event)) + | 1786 | size = nla_total_size(sizeof(struct thermal_genl_event)) + |
1719 | nla_total_size(0); | 1787 | nla_total_size(0); |
@@ -1748,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event) | |||
1748 | 1816 | ||
1749 | memset(thermal_event, 0, sizeof(struct thermal_genl_event)); | 1817 | memset(thermal_event, 0, sizeof(struct thermal_genl_event)); |
1750 | 1818 | ||
1751 | thermal_event->orig = orig; | 1819 | thermal_event->orig = tz->id; |
1752 | thermal_event->event = event; | 1820 | thermal_event->event = event; |
1753 | 1821 | ||
1754 | /* send multicast genetlink message */ | 1822 | /* send multicast genetlink message */ |
@@ -1760,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event) | |||
1760 | 1828 | ||
1761 | result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); | 1829 | result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); |
1762 | if (result) | 1830 | if (result) |
1763 | pr_info("failed to send netlink event:%d\n", result); | 1831 | dev_err(&tz->device, "Failed to send netlink event:%d", result); |
1764 | 1832 | ||
1765 | return result; | 1833 | return result; |
1766 | } | 1834 | } |
@@ -1800,6 +1868,7 @@ static int __init thermal_init(void) | |||
1800 | idr_destroy(&thermal_cdev_idr); | 1868 | idr_destroy(&thermal_cdev_idr); |
1801 | mutex_destroy(&thermal_idr_lock); | 1869 | mutex_destroy(&thermal_idr_lock); |
1802 | mutex_destroy(&thermal_list_lock); | 1870 | mutex_destroy(&thermal_list_lock); |
1871 | return result; | ||
1803 | } | 1872 | } |
1804 | result = genetlink_init(); | 1873 | result = genetlink_init(); |
1805 | return result; | 1874 | return result; |
diff --git a/include/linux/platform_data/exynos_thermal.h b/include/linux/platform_data/exynos_thermal.h index a7bdb2f63b73..da7e6274b175 100644 --- a/include/linux/platform_data/exynos_thermal.h +++ b/include/linux/platform_data/exynos_thermal.h | |||
@@ -53,6 +53,8 @@ struct freq_clip_table { | |||
53 | * struct exynos_tmu_platform_data | 53 | * struct exynos_tmu_platform_data |
54 | * @threshold: basic temperature for generating interrupt | 54 | * @threshold: basic temperature for generating interrupt |
55 | * 25 <= threshold <= 125 [unit: degree Celsius] | 55 | * 25 <= threshold <= 125 [unit: degree Celsius] |
56 | * @threshold_falling: differntial value for setting threshold | ||
57 | * of temperature falling interrupt. | ||
56 | * @trigger_levels: array for each interrupt levels | 58 | * @trigger_levels: array for each interrupt levels |
57 | * [unit: degree Celsius] | 59 | * [unit: degree Celsius] |
58 | * 0: temperature for trigger_level0 interrupt | 60 | * 0: temperature for trigger_level0 interrupt |
@@ -97,6 +99,7 @@ struct freq_clip_table { | |||
97 | */ | 99 | */ |
98 | struct exynos_tmu_platform_data { | 100 | struct exynos_tmu_platform_data { |
99 | u8 threshold; | 101 | u8 threshold; |
102 | u8 threshold_falling; | ||
100 | u8 trigger_levels[4]; | 103 | u8 trigger_levels[4]; |
101 | bool trigger_level0_en; | 104 | bool trigger_level0_en; |
102 | bool trigger_level1_en; | 105 | bool trigger_level1_en; |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index fe82022478e7..f0bd7f90a90d 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -74,6 +74,8 @@ enum thermal_trend { | |||
74 | THERMAL_TREND_STABLE, /* temperature is stable */ | 74 | THERMAL_TREND_STABLE, /* temperature is stable */ |
75 | THERMAL_TREND_RAISING, /* temperature is raising */ | 75 | THERMAL_TREND_RAISING, /* temperature is raising */ |
76 | THERMAL_TREND_DROPPING, /* temperature is dropping */ | 76 | THERMAL_TREND_DROPPING, /* temperature is dropping */ |
77 | THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */ | ||
78 | THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ | ||
77 | }; | 79 | }; |
78 | 80 | ||
79 | /* Events supported by Thermal Netlink */ | 81 | /* Events supported by Thermal Netlink */ |
@@ -121,6 +123,7 @@ struct thermal_zone_device_ops { | |||
121 | int (*set_trip_hyst) (struct thermal_zone_device *, int, | 123 | int (*set_trip_hyst) (struct thermal_zone_device *, int, |
122 | unsigned long); | 124 | unsigned long); |
123 | int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); | 125 | int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); |
126 | int (*set_emul_temp) (struct thermal_zone_device *, unsigned long); | ||
124 | int (*get_trend) (struct thermal_zone_device *, int, | 127 | int (*get_trend) (struct thermal_zone_device *, int, |
125 | enum thermal_trend *); | 128 | enum thermal_trend *); |
126 | int (*notify) (struct thermal_zone_device *, int, | 129 | int (*notify) (struct thermal_zone_device *, int, |
@@ -163,6 +166,7 @@ struct thermal_zone_device { | |||
163 | int polling_delay; | 166 | int polling_delay; |
164 | int temperature; | 167 | int temperature; |
165 | int last_temperature; | 168 | int last_temperature; |
169 | int emul_temperature; | ||
166 | int passive; | 170 | int passive; |
167 | unsigned int forced_passive; | 171 | unsigned int forced_passive; |
168 | const struct thermal_zone_device_ops *ops; | 172 | const struct thermal_zone_device_ops *ops; |
@@ -244,9 +248,11 @@ int thermal_register_governor(struct thermal_governor *); | |||
244 | void thermal_unregister_governor(struct thermal_governor *); | 248 | void thermal_unregister_governor(struct thermal_governor *); |
245 | 249 | ||
246 | #ifdef CONFIG_NET | 250 | #ifdef CONFIG_NET |
247 | extern int thermal_generate_netlink_event(u32 orig, enum events event); | 251 | extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, |
252 | enum events event); | ||
248 | #else | 253 | #else |
249 | static inline int thermal_generate_netlink_event(u32 orig, enum events event) | 254 | static int thermal_generate_netlink_event(struct thermal_zone_device *tz, |
255 | enum events event) | ||
250 | { | 256 | { |
251 | return 0; | 257 | return 0; |
252 | } | 258 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 314b9ee07edf..a19a39952c1b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -554,6 +554,7 @@ void tick_nohz_idle_enter(void) | |||
554 | 554 | ||
555 | local_irq_enable(); | 555 | local_irq_enable(); |
556 | } | 556 | } |
557 | EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); | ||
557 | 558 | ||
558 | /** | 559 | /** |
559 | * tick_nohz_irq_exit - update next tick event from interrupt exit | 560 | * tick_nohz_irq_exit - update next tick event from interrupt exit |
@@ -685,6 +686,7 @@ void tick_nohz_idle_exit(void) | |||
685 | 686 | ||
686 | local_irq_enable(); | 687 | local_irq_enable(); |
687 | } | 688 | } |
689 | EXPORT_SYMBOL_GPL(tick_nohz_idle_exit); | ||
688 | 690 | ||
689 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | 691 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) |
690 | { | 692 | { |