aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/thermal
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 20:51:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 20:51:55 -0400
commit0db9723cacf4d62bc3685fb15179b39ee4e17679 (patch)
tree7de16280234a3d98d8f7dd95e623ec381fd5af36 /drivers/thermal
parent4570a37169d4b44d316f40b2ccc681dc93fedc7b (diff)
parent111b23cf895b5cbcdc1b2c6580be1bb78a577d05 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
Pull thermal management updates from Zhang Rui: "Specifics: - enhance Thermal Framework with several new capabilities: * use power estimates * compute weights with relative integers instead of percentages * allow governors to have private data in thermal zones * export thermal zone parameters through sysfs Thanks to the ARM thermal team (Javi, Punit, KP). - introduce a new thermal governor: power allocator. First in kernel closed loop PI(D) controller for thermal control. Thanks to ARM thermal team. - enhance OF thermal to allow thermal zones to have sustainable power HW specification. Thanks to Punit. - introduce thermal driver for Intel Quark SoC x1000platform. Thanks to Ong, Boon Leong. - introduce QPNP PMIC temperature alarm driver. Thanks to Ivan T. I. - introduce thermal driver for Hisilicon hi6220. Thanks to kongxinwei. - enhance Exynos thermal driver to handle Exynos5433 TMU. Thanks to Chanwoo C. - TI thermal driver now has a better implementation for EOCZ bit. From Pavel M. - add id for Skylake processors in int340x processor thermal driver. - a couple of small fixes and cleanups." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux: (36 commits) thermal: hisilicon: add new hisilicon thermal sensor driver dt-bindings: Document the hi6220 thermal sensor bindings thermal: of-thermal: add support for reading coefficients property thermal: support slope and offset coefficients thermal: power_allocator: round the division when divvying up power thermal: exynos: Add the support for Exynos5433 TMU thermal: cpu_cooling: Fix power calculation when CPUs are offline thermal: cpu_cooling: Remove cpu_dev update on policy CPU update thermal: export thermal_zone_parameters to sysfs thermal: cpu_cooling: Check memory allocation of power_table ti-soc-thermal: request temperature periodically if hw can't do that itself ti-soc-thermal: implement eocz bit to make driver useful on omap3 cleanup ti-soc-thermal thermal: remove stale THERMAL_POWER_ACTOR select thermal: Default OF created trip points to writable thermal: core: Add Kconfig option to enable writable trips thermal: x86_pkg_temp: drop const for thermal_zone_parameters of: thermal: Introduce sustainable power for a thermal zone thermal: add trace events to the power allocator governor thermal: introduce the Power Allocator governor ...
Diffstat (limited to 'drivers/thermal')
-rw-r--r--drivers/thermal/Kconfig68
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/cpu_cooling.c585
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/fair_share.c41
-rw-r--r--drivers/thermal/hisi_thermal.c421
-rw-r--r--drivers/thermal/imx_thermal.c3
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c59
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/intel_quark_dts_thermal.c473
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.c478
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.h62
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c430
-rw-r--r--drivers/thermal/of-thermal.c41
-rw-r--r--drivers/thermal/power_allocator.c539
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c309
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c187
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h1
-rw-r--r--drivers/thermal/thermal_core.c314
-rw-r--r--drivers/thermal/thermal_core.h11
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c104
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
23 files changed, 3612 insertions, 529 deletions
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index af40db0df58e..118938ee8552 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -42,6 +42,17 @@ config THERMAL_OF
42 Say 'Y' here if you need to build thermal infrastructure 42 Say 'Y' here if you need to build thermal infrastructure
43 based on device tree. 43 based on device tree.
44 44
45config THERMAL_WRITABLE_TRIPS
46 bool "Enable writable trip points"
47 help
48 This option allows the system integrator to choose whether
49 trip temperatures can be changed from userspace. The
50 writable trips need to be specified when setting up the
51 thermal zone but the choice here takes precedence.
52
53 Say 'Y' here if you would like to allow userspace tools to
54 change trip temperatures.
55
45choice 56choice
46 prompt "Default Thermal governor" 57 prompt "Default Thermal governor"
47 default THERMAL_DEFAULT_GOV_STEP_WISE 58 default THERMAL_DEFAULT_GOV_STEP_WISE
@@ -71,6 +82,14 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
71 Select this if you want to let the user space manage the 82 Select this if you want to let the user space manage the
72 platform thermals. 83 platform thermals.
73 84
85config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
86 bool "power_allocator"
87 select THERMAL_GOV_POWER_ALLOCATOR
88 help
89 Select this if you want to control temperature based on
90 system and device power allocation. This governor can only
91 operate on cooling devices that implement the power API.
92
74endchoice 93endchoice
75 94
76config THERMAL_GOV_FAIR_SHARE 95config THERMAL_GOV_FAIR_SHARE
@@ -99,6 +118,12 @@ config THERMAL_GOV_USER_SPACE
99 help 118 help
100 Enable this to let the user space manage the platform thermals. 119 Enable this to let the user space manage the platform thermals.
101 120
121config THERMAL_GOV_POWER_ALLOCATOR
122 bool "Power allocator thermal governor"
123 help
124 Enable this to manage platform thermals by dynamically
125 allocating and limiting power to devices.
126
102config CPU_THERMAL 127config CPU_THERMAL
103 bool "generic cpu cooling support" 128 bool "generic cpu cooling support"
104 depends on CPU_FREQ 129 depends on CPU_FREQ
@@ -136,6 +161,14 @@ config THERMAL_EMULATION
136 because userland can easily disable the thermal policy by simply 161 because userland can easily disable the thermal policy by simply
137 flooding this sysfs node with low temperature values. 162 flooding this sysfs node with low temperature values.
138 163
164config HISI_THERMAL
165 tristate "Hisilicon thermal driver"
166 depends on ARCH_HISI && CPU_THERMAL && OF
167 help
168 Enable this to plug hisilicon's thermal sensor driver into the Linux
169 thermal framework. cpufreq is used as the cooling device to throttle
170 CPUs when the passive trip is crossed.
171
139config IMX_THERMAL 172config IMX_THERMAL
140 tristate "Temperature sensor driver for Freescale i.MX SoCs" 173 tristate "Temperature sensor driver for Freescale i.MX SoCs"
141 depends on CPU_THERMAL 174 depends on CPU_THERMAL
@@ -249,9 +282,20 @@ config X86_PKG_TEMP_THERMAL
249 two trip points which can be set by user to get notifications via thermal 282 two trip points which can be set by user to get notifications via thermal
250 notification methods. 283 notification methods.
251 284
285config INTEL_SOC_DTS_IOSF_CORE
286 tristate
287 depends on X86
288 select IOSF_MBI
289 help
290 This is becoming a common feature for Intel SoCs to expose the additional
291 digital temperature sensors (DTSs) using side band interface (IOSF). This
292 implements the common set of helper functions to register, get temperature
293 and get/set thresholds on DTSs.
294
252config INTEL_SOC_DTS_THERMAL 295config INTEL_SOC_DTS_THERMAL
253 tristate "Intel SoCs DTS thermal driver" 296 tristate "Intel SoCs DTS thermal driver"
254 depends on X86 && IOSF_MBI 297 depends on X86
298 select INTEL_SOC_DTS_IOSF_CORE
255 help 299 help
256 Enable this to register Intel SoCs (e.g. Bay Trail) platform digital 300 Enable this to register Intel SoCs (e.g. Bay Trail) platform digital
257 temperature sensor (DTS). These SoCs have two additional DTSs in 301 temperature sensor (DTS). These SoCs have two additional DTSs in
@@ -261,12 +305,23 @@ config INTEL_SOC_DTS_THERMAL
261 notification methods.The other trip is a critical trip point, which 305 notification methods.The other trip is a critical trip point, which
262 was set by the driver based on the TJ MAX temperature. 306 was set by the driver based on the TJ MAX temperature.
263 307
308config INTEL_QUARK_DTS_THERMAL
309 tristate "Intel Quark DTS thermal driver"
310 depends on X86_INTEL_QUARK
311 help
312 Enable this to register Intel Quark SoC (e.g. X1000) platform digital
313 temperature sensor (DTS). For X1000 SoC, it has one on-die DTS.
314 The DTS will be registered as a thermal zone. There are two trip points:
315 hot & critical. The critical trip point default value is set by
316 underlying BIOS/Firmware.
317
264config INT340X_THERMAL 318config INT340X_THERMAL
265 tristate "ACPI INT340X thermal drivers" 319 tristate "ACPI INT340X thermal drivers"
266 depends on X86 && ACPI 320 depends on X86 && ACPI
267 select THERMAL_GOV_USER_SPACE 321 select THERMAL_GOV_USER_SPACE
268 select ACPI_THERMAL_REL 322 select ACPI_THERMAL_REL
269 select ACPI_FAN 323 select ACPI_FAN
324 select INTEL_SOC_DTS_IOSF_CORE
270 help 325 help
271 Newer laptops and tablets that use ACPI may have thermal sensors and 326 Newer laptops and tablets that use ACPI may have thermal sensors and
272 other devices with thermal control capabilities outside the core 327 other devices with thermal control capabilities outside the core
@@ -299,4 +354,15 @@ depends on ARCH_STI && OF
299source "drivers/thermal/st/Kconfig" 354source "drivers/thermal/st/Kconfig"
300endmenu 355endmenu
301 356
357config QCOM_SPMI_TEMP_ALARM
358 tristate "Qualcomm SPMI PMIC Temperature Alarm"
359 depends on OF && SPMI && IIO
360 select REGMAP_SPMI
361 help
362 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
363 PMIC devices. It shows up in sysfs as a thermal sensor with multiple
364 trip points. The temperature reported by the thermal sensor reflects the
365 real time die temperature if an ADC is present or an estimate of the
366 temperature based upon the over temperature stage value.
367
302endif 368endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index fa0dc486790f..535dfee1496f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -14,6 +14,7 @@ thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
14thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o 14thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
15thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o 15thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
16thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o 16thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
17thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
17 18
18# cpufreq cooling 19# cpufreq cooling
19thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o 20thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -22,6 +23,7 @@ thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
22thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o 23thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
23 24
24# platform thermal drivers 25# platform thermal drivers
26obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
25obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o 27obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
26obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o 28obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
27obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o 29obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
@@ -34,8 +36,11 @@ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
34obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o 36obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
35obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o 37obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
36obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o 38obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
39obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
37obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o 40obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
41obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
38obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ 42obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
39obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ 43obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
40obj-$(CONFIG_ST_THERMAL) += st/ 44obj-$(CONFIG_ST_THERMAL) += st/
41obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o 45obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
46obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f65f0d109fc8..6509c61b9648 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -26,10 +26,13 @@
26#include <linux/thermal.h> 26#include <linux/thermal.h>
27#include <linux/cpufreq.h> 27#include <linux/cpufreq.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/pm_opp.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/cpu.h> 31#include <linux/cpu.h>
31#include <linux/cpu_cooling.h> 32#include <linux/cpu_cooling.h>
32 33
34#include <trace/events/thermal.h>
35
33/* 36/*
34 * Cooling state <-> CPUFreq frequency 37 * Cooling state <-> CPUFreq frequency
35 * 38 *
@@ -45,6 +48,19 @@
45 */ 48 */
46 49
47/** 50/**
51 * struct power_table - frequency to power conversion
52 * @frequency: frequency in KHz
53 * @power: power in mW
54 *
55 * This structure is built when the cooling device registers and helps
56 * in translating frequency to power and viceversa.
57 */
58struct power_table {
59 u32 frequency;
60 u32 power;
61};
62
63/**
48 * struct cpufreq_cooling_device - data for cooling device with cpufreq 64 * struct cpufreq_cooling_device - data for cooling device with cpufreq
49 * @id: unique integer value corresponding to each cpufreq_cooling_device 65 * @id: unique integer value corresponding to each cpufreq_cooling_device
50 * registered. 66 * registered.
@@ -58,6 +74,15 @@
58 * cpufreq frequencies. 74 * cpufreq frequencies.
59 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. 75 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
60 * @node: list_head to link all cpufreq_cooling_device together. 76 * @node: list_head to link all cpufreq_cooling_device together.
77 * @last_load: load measured by the latest call to cpufreq_get_actual_power()
78 * @time_in_idle: previous reading of the absolute time that this cpu was idle
79 * @time_in_idle_timestamp: wall time of the last invocation of
80 * get_cpu_idle_time_us()
81 * @dyn_power_table: array of struct power_table for frequency to power
82 * conversion, sorted in ascending order.
83 * @dyn_power_table_entries: number of entries in the @dyn_power_table array
84 * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
85 * @plat_get_static_power: callback to calculate the static power
61 * 86 *
62 * This structure is required for keeping information of each registered 87 * This structure is required for keeping information of each registered
63 * cpufreq_cooling_device. 88 * cpufreq_cooling_device.
@@ -71,6 +96,13 @@ struct cpufreq_cooling_device {
71 unsigned int *freq_table; /* In descending order */ 96 unsigned int *freq_table; /* In descending order */
72 struct cpumask allowed_cpus; 97 struct cpumask allowed_cpus;
73 struct list_head node; 98 struct list_head node;
99 u32 last_load;
100 u64 *time_in_idle;
101 u64 *time_in_idle_timestamp;
102 struct power_table *dyn_power_table;
103 int dyn_power_table_entries;
104 struct device *cpu_dev;
105 get_static_t plat_get_static_power;
74}; 106};
75static DEFINE_IDR(cpufreq_idr); 107static DEFINE_IDR(cpufreq_idr);
76static DEFINE_MUTEX(cooling_cpufreq_lock); 108static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -186,23 +218,237 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
186 unsigned long max_freq = 0; 218 unsigned long max_freq = 0;
187 struct cpufreq_cooling_device *cpufreq_dev; 219 struct cpufreq_cooling_device *cpufreq_dev;
188 220
189 if (event != CPUFREQ_ADJUST) 221 switch (event) {
190 return 0;
191 222
192 mutex_lock(&cooling_cpufreq_lock); 223 case CPUFREQ_ADJUST:
193 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 224 mutex_lock(&cooling_cpufreq_lock);
194 if (!cpumask_test_cpu(policy->cpu, 225 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
195 &cpufreq_dev->allowed_cpus)) 226 if (!cpumask_test_cpu(policy->cpu,
227 &cpufreq_dev->allowed_cpus))
228 continue;
229
230 max_freq = cpufreq_dev->cpufreq_val;
231
232 if (policy->max != max_freq)
233 cpufreq_verify_within_limits(policy, 0,
234 max_freq);
235 }
236 mutex_unlock(&cooling_cpufreq_lock);
237 break;
238 default:
239 return NOTIFY_DONE;
240 }
241
242 return NOTIFY_OK;
243}
244
245/**
246 * build_dyn_power_table() - create a dynamic power to frequency table
247 * @cpufreq_device: the cpufreq cooling device in which to store the table
248 * @capacitance: dynamic power coefficient for these cpus
249 *
250 * Build a dynamic power to frequency table for this cpu and store it
251 * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
252 * cpu_freq_to_power() to convert between power and frequency
253 * efficiently. Power is stored in mW, frequency in KHz. The
254 * resulting table is in ascending order.
255 *
256 * Return: 0 on success, -E* on error.
257 */
258static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
259 u32 capacitance)
260{
261 struct power_table *power_table;
262 struct dev_pm_opp *opp;
263 struct device *dev = NULL;
264 int num_opps = 0, cpu, i, ret = 0;
265 unsigned long freq;
266
267 rcu_read_lock();
268
269 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
270 dev = get_cpu_device(cpu);
271 if (!dev) {
272 dev_warn(&cpufreq_device->cool_dev->device,
273 "No cpu device for cpu %d\n", cpu);
196 continue; 274 continue;
275 }
276
277 num_opps = dev_pm_opp_get_opp_count(dev);
278 if (num_opps > 0) {
279 break;
280 } else if (num_opps < 0) {
281 ret = num_opps;
282 goto unlock;
283 }
284 }
197 285
198 max_freq = cpufreq_dev->cpufreq_val; 286 if (num_opps == 0) {
287 ret = -EINVAL;
288 goto unlock;
289 }
199 290
200 if (policy->max != max_freq) 291 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
201 cpufreq_verify_within_limits(policy, 0, max_freq); 292 if (!power_table) {
293 ret = -ENOMEM;
294 goto unlock;
202 } 295 }
203 mutex_unlock(&cooling_cpufreq_lock);
204 296
205 return 0; 297 for (freq = 0, i = 0;
298 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
299 freq++, i++) {
300 u32 freq_mhz, voltage_mv;
301 u64 power;
302
303 freq_mhz = freq / 1000000;
304 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
305
306 /*
307 * Do the multiplication with MHz and millivolt so as
308 * to not overflow.
309 */
310 power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
311 do_div(power, 1000000000);
312
313 /* frequency is stored in power_table in KHz */
314 power_table[i].frequency = freq / 1000;
315
316 /* power is stored in mW */
317 power_table[i].power = power;
318 }
319
320 if (i == 0) {
321 ret = PTR_ERR(opp);
322 goto unlock;
323 }
324
325 cpufreq_device->cpu_dev = dev;
326 cpufreq_device->dyn_power_table = power_table;
327 cpufreq_device->dyn_power_table_entries = i;
328
329unlock:
330 rcu_read_unlock();
331 return ret;
332}
333
334static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
335 u32 freq)
336{
337 int i;
338 struct power_table *pt = cpufreq_device->dyn_power_table;
339
340 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
341 if (freq < pt[i].frequency)
342 break;
343
344 return pt[i - 1].power;
345}
346
347static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
348 u32 power)
349{
350 int i;
351 struct power_table *pt = cpufreq_device->dyn_power_table;
352
353 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
354 if (power < pt[i].power)
355 break;
356
357 return pt[i - 1].frequency;
358}
359
360/**
361 * get_load() - get load for a cpu since last updated
362 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
363 * @cpu: cpu number
364 *
365 * Return: The average load of cpu @cpu in percentage since this
366 * function was last called.
367 */
368static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
369{
370 u32 load;
371 u64 now, now_idle, delta_time, delta_idle;
372
373 now_idle = get_cpu_idle_time(cpu, &now, 0);
374 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
375 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
376
377 if (delta_time <= delta_idle)
378 load = 0;
379 else
380 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
381
382 cpufreq_device->time_in_idle[cpu] = now_idle;
383 cpufreq_device->time_in_idle_timestamp[cpu] = now;
384
385 return load;
386}
387
388/**
389 * get_static_power() - calculate the static power consumed by the cpus
390 * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
391 * @tz: thermal zone device in which we're operating
392 * @freq: frequency in KHz
393 * @power: pointer in which to store the calculated static power
394 *
395 * Calculate the static power consumed by the cpus described by
396 * @cpu_actor running at frequency @freq. This function relies on a
397 * platform specific function that should have been provided when the
398 * actor was registered. If it wasn't, the static power is assumed to
399 * be negligible. The calculated static power is stored in @power.
400 *
401 * Return: 0 on success, -E* on failure.
402 */
403static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
404 struct thermal_zone_device *tz, unsigned long freq,
405 u32 *power)
406{
407 struct dev_pm_opp *opp;
408 unsigned long voltage;
409 struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
410 unsigned long freq_hz = freq * 1000;
411
412 if (!cpufreq_device->plat_get_static_power ||
413 !cpufreq_device->cpu_dev) {
414 *power = 0;
415 return 0;
416 }
417
418 rcu_read_lock();
419
420 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
421 true);
422 voltage = dev_pm_opp_get_voltage(opp);
423
424 rcu_read_unlock();
425
426 if (voltage == 0) {
427 dev_warn_ratelimited(cpufreq_device->cpu_dev,
428 "Failed to get voltage for frequency %lu: %ld\n",
429 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
430 return -EINVAL;
431 }
432
433 return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
434 voltage, power);
435}
436
437/**
438 * get_dynamic_power() - calculate the dynamic power
439 * @cpufreq_device: &cpufreq_cooling_device for this cdev
440 * @freq: current frequency
441 *
442 * Return: the dynamic power consumed by the cpus described by
443 * @cpufreq_device.
444 */
445static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
446 unsigned long freq)
447{
448 u32 raw_cpu_power;
449
450 raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
451 return (raw_cpu_power * cpufreq_device->last_load) / 100;
206} 452}
207 453
208/* cpufreq cooling device callback functions are defined below */ 454/* cpufreq cooling device callback functions are defined below */
@@ -280,8 +526,205 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
280 return 0; 526 return 0;
281} 527}
282 528
529/**
530 * cpufreq_get_requested_power() - get the current power
531 * @cdev: &thermal_cooling_device pointer
532 * @tz: a valid thermal zone device pointer
533 * @power: pointer in which to store the resulting power
534 *
535 * Calculate the current power consumption of the cpus in milliwatts
536 * and store it in @power. This function should actually calculate
537 * the requested power, but it's hard to get the frequency that
538 * cpufreq would have assigned if there were no thermal limits.
539 * Instead, we calculate the current power on the assumption that the
540 * immediate future will look like the immediate past.
541 *
542 * We use the current frequency and the average load since this
543 * function was last called. In reality, there could have been
544 * multiple opps since this function was last called and that affects
545 * the load calculation. While it's not perfectly accurate, this
546 * simplification is good enough and works. REVISIT this, as more
547 * complex code may be needed if experiments show that it's not
548 * accurate enough.
549 *
550 * Return: 0 on success, -E* if getting the static power failed.
551 */
552static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
553 struct thermal_zone_device *tz,
554 u32 *power)
555{
556 unsigned long freq;
557 int i = 0, cpu, ret;
558 u32 static_power, dynamic_power, total_load = 0;
559 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
560 u32 *load_cpu = NULL;
561
562 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
563
564 /*
565 * All the CPUs are offline, thus the requested power by
566 * the cdev is 0
567 */
568 if (cpu >= nr_cpu_ids) {
569 *power = 0;
570 return 0;
571 }
572
573 freq = cpufreq_quick_get(cpu);
574
575 if (trace_thermal_power_cpu_get_power_enabled()) {
576 u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
577
578 load_cpu = devm_kcalloc(&cdev->device, ncpus, sizeof(*load_cpu),
579 GFP_KERNEL);
580 }
581
582 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
583 u32 load;
584
585 if (cpu_online(cpu))
586 load = get_load(cpufreq_device, cpu);
587 else
588 load = 0;
589
590 total_load += load;
591 if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
592 load_cpu[i] = load;
593
594 i++;
595 }
596
597 cpufreq_device->last_load = total_load;
598
599 dynamic_power = get_dynamic_power(cpufreq_device, freq);
600 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
601 if (ret) {
602 if (load_cpu)
603 devm_kfree(&cdev->device, load_cpu);
604 return ret;
605 }
606
607 if (load_cpu) {
608 trace_thermal_power_cpu_get_power(
609 &cpufreq_device->allowed_cpus,
610 freq, load_cpu, i, dynamic_power, static_power);
611
612 devm_kfree(&cdev->device, load_cpu);
613 }
614
615 *power = static_power + dynamic_power;
616 return 0;
617}
618
619/**
620 * cpufreq_state2power() - convert a cpu cdev state to power consumed
621 * @cdev: &thermal_cooling_device pointer
622 * @tz: a valid thermal zone device pointer
623 * @state: cooling device state to be converted
624 * @power: pointer in which to store the resulting power
625 *
626 * Convert cooling device state @state into power consumption in
627 * milliwatts assuming 100% load. Store the calculated power in
628 * @power.
629 *
630 * Return: 0 on success, -EINVAL if the cooling device state could not
631 * be converted into a frequency or other -E* if there was an error
632 * when calculating the static power.
633 */
634static int cpufreq_state2power(struct thermal_cooling_device *cdev,
635 struct thermal_zone_device *tz,
636 unsigned long state, u32 *power)
637{
638 unsigned int freq, num_cpus;
639 cpumask_t cpumask;
640 u32 static_power, dynamic_power;
641 int ret;
642 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
643
644 cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
645 num_cpus = cpumask_weight(&cpumask);
646
647 /* None of our cpus are online, so no power */
648 if (num_cpus == 0) {
649 *power = 0;
650 return 0;
651 }
652
653 freq = cpufreq_device->freq_table[state];
654 if (!freq)
655 return -EINVAL;
656
657 dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
658 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
659 if (ret)
660 return ret;
661
662 *power = static_power + dynamic_power;
663 return 0;
664}
665
666/**
667 * cpufreq_power2state() - convert power to a cooling device state
668 * @cdev: &thermal_cooling_device pointer
669 * @tz: a valid thermal zone device pointer
670 * @power: power in milliwatts to be converted
671 * @state: pointer in which to store the resulting state
672 *
673 * Calculate a cooling device state for the cpus described by @cdev
674 * that would allow them to consume at most @power mW and store it in
675 * @state. Note that this calculation depends on external factors
676 * such as the cpu load or the current static power. Calling this
677 * function with the same power as input can yield different cooling
678 * device states depending on those external factors.
679 *
680 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
681 * the calculated frequency could not be converted to a valid state.
682 * The latter should not happen unless the frequencies available to
683 * cpufreq have changed since the initialization of the cpu cooling
684 * device.
685 */
686static int cpufreq_power2state(struct thermal_cooling_device *cdev,
687 struct thermal_zone_device *tz, u32 power,
688 unsigned long *state)
689{
690 unsigned int cpu, cur_freq, target_freq;
691 int ret;
692 s32 dyn_power;
693 u32 last_load, normalised_power, static_power;
694 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
695
696 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
697
698 /* None of our cpus are online */
699 if (cpu >= nr_cpu_ids)
700 return -ENODEV;
701
702 cur_freq = cpufreq_quick_get(cpu);
703 ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
704 if (ret)
705 return ret;
706
707 dyn_power = power - static_power;
708 dyn_power = dyn_power > 0 ? dyn_power : 0;
709 last_load = cpufreq_device->last_load ?: 1;
710 normalised_power = (dyn_power * 100) / last_load;
711 target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
712
713 *state = cpufreq_cooling_get_level(cpu, target_freq);
714 if (*state == THERMAL_CSTATE_INVALID) {
715 dev_warn_ratelimited(&cdev->device,
716 "Failed to convert %dKHz for cpu %d into a cdev state\n",
717 target_freq, cpu);
718 return -EINVAL;
719 }
720
721 trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
722 target_freq, *state, power);
723 return 0;
724}
725
283/* Bind cpufreq callbacks to thermal cooling device ops */ 726/* Bind cpufreq callbacks to thermal cooling device ops */
284static struct thermal_cooling_device_ops const cpufreq_cooling_ops = { 727static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
285 .get_max_state = cpufreq_get_max_state, 728 .get_max_state = cpufreq_get_max_state,
286 .get_cur_state = cpufreq_get_cur_state, 729 .get_cur_state = cpufreq_get_cur_state,
287 .set_cur_state = cpufreq_set_cur_state, 730 .set_cur_state = cpufreq_set_cur_state,
@@ -311,6 +754,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
311 * @np: a valid struct device_node to the cooling device device tree node 754 * @np: a valid struct device_node to the cooling device device tree node
312 * @clip_cpus: cpumask of cpus where the frequency constraints will happen. 755 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
313 * Normally this should be same as cpufreq policy->related_cpus. 756 * Normally this should be same as cpufreq policy->related_cpus.
757 * @capacitance: dynamic power coefficient for these cpus
758 * @plat_static_func: function to calculate the static power consumed by these
759 * cpus (optional)
314 * 760 *
315 * This interface function registers the cpufreq cooling device with the name 761 * This interface function registers the cpufreq cooling device with the name
316 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 762 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -322,13 +768,14 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
322 */ 768 */
323static struct thermal_cooling_device * 769static struct thermal_cooling_device *
324__cpufreq_cooling_register(struct device_node *np, 770__cpufreq_cooling_register(struct device_node *np,
325 const struct cpumask *clip_cpus) 771 const struct cpumask *clip_cpus, u32 capacitance,
772 get_static_t plat_static_func)
326{ 773{
327 struct thermal_cooling_device *cool_dev; 774 struct thermal_cooling_device *cool_dev;
328 struct cpufreq_cooling_device *cpufreq_dev; 775 struct cpufreq_cooling_device *cpufreq_dev;
329 char dev_name[THERMAL_NAME_LENGTH]; 776 char dev_name[THERMAL_NAME_LENGTH];
330 struct cpufreq_frequency_table *pos, *table; 777 struct cpufreq_frequency_table *pos, *table;
331 unsigned int freq, i; 778 unsigned int freq, i, num_cpus;
332 int ret; 779 int ret;
333 780
334 table = cpufreq_frequency_get_table(cpumask_first(clip_cpus)); 781 table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
@@ -341,6 +788,23 @@ __cpufreq_cooling_register(struct device_node *np,
341 if (!cpufreq_dev) 788 if (!cpufreq_dev)
342 return ERR_PTR(-ENOMEM); 789 return ERR_PTR(-ENOMEM);
343 790
791 num_cpus = cpumask_weight(clip_cpus);
792 cpufreq_dev->time_in_idle = kcalloc(num_cpus,
793 sizeof(*cpufreq_dev->time_in_idle),
794 GFP_KERNEL);
795 if (!cpufreq_dev->time_in_idle) {
796 cool_dev = ERR_PTR(-ENOMEM);
797 goto free_cdev;
798 }
799
800 cpufreq_dev->time_in_idle_timestamp =
801 kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
802 GFP_KERNEL);
803 if (!cpufreq_dev->time_in_idle_timestamp) {
804 cool_dev = ERR_PTR(-ENOMEM);
805 goto free_time_in_idle;
806 }
807
344 /* Find max levels */ 808 /* Find max levels */
345 cpufreq_for_each_valid_entry(pos, table) 809 cpufreq_for_each_valid_entry(pos, table)
346 cpufreq_dev->max_level++; 810 cpufreq_dev->max_level++;
@@ -349,7 +813,7 @@ __cpufreq_cooling_register(struct device_node *np,
349 cpufreq_dev->max_level, GFP_KERNEL); 813 cpufreq_dev->max_level, GFP_KERNEL);
350 if (!cpufreq_dev->freq_table) { 814 if (!cpufreq_dev->freq_table) {
351 cool_dev = ERR_PTR(-ENOMEM); 815 cool_dev = ERR_PTR(-ENOMEM);
352 goto free_cdev; 816 goto free_time_in_idle_timestamp;
353 } 817 }
354 818
355 /* max_level is an index, not a counter */ 819 /* max_level is an index, not a counter */
@@ -357,6 +821,20 @@ __cpufreq_cooling_register(struct device_node *np,
357 821
358 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 822 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
359 823
824 if (capacitance) {
825 cpufreq_cooling_ops.get_requested_power =
826 cpufreq_get_requested_power;
827 cpufreq_cooling_ops.state2power = cpufreq_state2power;
828 cpufreq_cooling_ops.power2state = cpufreq_power2state;
829 cpufreq_dev->plat_get_static_power = plat_static_func;
830
831 ret = build_dyn_power_table(cpufreq_dev, capacitance);
832 if (ret) {
833 cool_dev = ERR_PTR(ret);
834 goto free_table;
835 }
836 }
837
360 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 838 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
361 if (ret) { 839 if (ret) {
362 cool_dev = ERR_PTR(ret); 840 cool_dev = ERR_PTR(ret);
@@ -402,6 +880,10 @@ remove_idr:
402 release_idr(&cpufreq_idr, cpufreq_dev->id); 880 release_idr(&cpufreq_idr, cpufreq_dev->id);
403free_table: 881free_table:
404 kfree(cpufreq_dev->freq_table); 882 kfree(cpufreq_dev->freq_table);
883free_time_in_idle_timestamp:
884 kfree(cpufreq_dev->time_in_idle_timestamp);
885free_time_in_idle:
886 kfree(cpufreq_dev->time_in_idle);
405free_cdev: 887free_cdev:
406 kfree(cpufreq_dev); 888 kfree(cpufreq_dev);
407 889
@@ -422,7 +904,7 @@ free_cdev:
422struct thermal_cooling_device * 904struct thermal_cooling_device *
423cpufreq_cooling_register(const struct cpumask *clip_cpus) 905cpufreq_cooling_register(const struct cpumask *clip_cpus)
424{ 906{
425 return __cpufreq_cooling_register(NULL, clip_cpus); 907 return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
426} 908}
427EXPORT_SYMBOL_GPL(cpufreq_cooling_register); 909EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
428 910
@@ -446,11 +928,78 @@ of_cpufreq_cooling_register(struct device_node *np,
446 if (!np) 928 if (!np)
447 return ERR_PTR(-EINVAL); 929 return ERR_PTR(-EINVAL);
448 930
449 return __cpufreq_cooling_register(np, clip_cpus); 931 return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
450} 932}
451EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); 933EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
452 934
453/** 935/**
936 * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
937 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
938 * @capacitance: dynamic power coefficient for these cpus
939 * @plat_static_func: function to calculate the static power consumed by these
940 * cpus (optional)
941 *
942 * This interface function registers the cpufreq cooling device with
943 * the name "thermal-cpufreq-%x". This api can support multiple
944 * instances of cpufreq cooling devices. Using this function, the
945 * cooling device will implement the power extensions by using a
946 * simple cpu power model. The cpus must have registered their OPPs
947 * using the OPP library.
948 *
949 * An optional @plat_static_func may be provided to calculate the
950 * static power consumed by these cpus. If the platform's static
951 * power consumption is unknown or negligible, make it NULL.
952 *
953 * Return: a valid struct thermal_cooling_device pointer on success,
954 * on failure, it returns a corresponding ERR_PTR().
955 */
956struct thermal_cooling_device *
957cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
958 get_static_t plat_static_func)
959{
960 return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
961 plat_static_func);
962}
963EXPORT_SYMBOL(cpufreq_power_cooling_register);
964
965/**
966 * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
967 * @np: a valid struct device_node to the cooling device device tree node
968 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
969 * @capacitance: dynamic power coefficient for these cpus
970 * @plat_static_func: function to calculate the static power consumed by these
971 * cpus (optional)
972 *
973 * This interface function registers the cpufreq cooling device with
974 * the name "thermal-cpufreq-%x". This api can support multiple
975 * instances of cpufreq cooling devices. Using this API, the cpufreq
976 * cooling device will be linked to the device tree node provided.
977 * Using this function, the cooling device will implement the power
978 * extensions by using a simple cpu power model. The cpus must have
979 * registered their OPPs using the OPP library.
980 *
981 * An optional @plat_static_func may be provided to calculate the
982 * static power consumed by these cpus. If the platform's static
983 * power consumption is unknown or negligible, make it NULL.
984 *
985 * Return: a valid struct thermal_cooling_device pointer on success,
986 * on failure, it returns a corresponding ERR_PTR().
987 */
988struct thermal_cooling_device *
989of_cpufreq_power_cooling_register(struct device_node *np,
990 const struct cpumask *clip_cpus,
991 u32 capacitance,
992 get_static_t plat_static_func)
993{
994 if (!np)
995 return ERR_PTR(-EINVAL);
996
997 return __cpufreq_cooling_register(np, clip_cpus, capacitance,
998 plat_static_func);
999}
1000EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1001
1002/**
454 * cpufreq_cooling_unregister - function to remove cpufreq cooling device. 1003 * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
455 * @cdev: thermal cooling device pointer. 1004 * @cdev: thermal cooling device pointer.
456 * 1005 *
@@ -475,6 +1024,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
475 1024
476 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1025 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
477 release_idr(&cpufreq_idr, cpufreq_dev->id); 1026 release_idr(&cpufreq_idr, cpufreq_dev->id);
1027 kfree(cpufreq_dev->time_in_idle_timestamp);
1028 kfree(cpufreq_dev->time_in_idle);
478 kfree(cpufreq_dev->freq_table); 1029 kfree(cpufreq_dev->freq_table);
479 kfree(cpufreq_dev); 1030 kfree(cpufreq_dev);
480} 1031}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 20adfbe27df1..2fb273c4baa9 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -76,7 +76,7 @@ static int db8500_cdev_bind(struct thermal_zone_device *thermal,
76 upper = lower = i > max_state ? max_state : i; 76 upper = lower = i > max_state ? max_state : i;
77 77
78 ret = thermal_zone_bind_cooling_device(thermal, i, cdev, 78 ret = thermal_zone_bind_cooling_device(thermal, i, cdev,
79 upper, lower); 79 upper, lower, THERMAL_WEIGHT_DEFAULT);
80 80
81 dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type, 81 dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type,
82 i, ret, ret ? "fail" : "succeed"); 82 i, ret, ret ? "fail" : "succeed");
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 6e0a3fbfae86..c2c10bbe24d6 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -59,17 +59,17 @@ static int get_trip_level(struct thermal_zone_device *tz)
59} 59}
60 60
61static long get_target_state(struct thermal_zone_device *tz, 61static long get_target_state(struct thermal_zone_device *tz,
62 struct thermal_cooling_device *cdev, int weight, int level) 62 struct thermal_cooling_device *cdev, int percentage, int level)
63{ 63{
64 unsigned long max_state; 64 unsigned long max_state;
65 65
66 cdev->ops->get_max_state(cdev, &max_state); 66 cdev->ops->get_max_state(cdev, &max_state);
67 67
68 return (long)(weight * level * max_state) / (100 * tz->trips); 68 return (long)(percentage * level * max_state) / (100 * tz->trips);
69} 69}
70 70
71/** 71/**
72 * fair_share_throttle - throttles devices asscciated with the given zone 72 * fair_share_throttle - throttles devices associated with the given zone
73 * @tz - thermal_zone_device 73 * @tz - thermal_zone_device
74 * 74 *
75 * Throttling Logic: This uses three parameters to calculate the new 75 * Throttling Logic: This uses three parameters to calculate the new
@@ -77,7 +77,7 @@ static long get_target_state(struct thermal_zone_device *tz,
77 * 77 *
78 * Parameters used for Throttling: 78 * Parameters used for Throttling:
79 * P1. max_state: Maximum throttle state exposed by the cooling device. 79 * P1. max_state: Maximum throttle state exposed by the cooling device.
80 * P2. weight[i]/100: 80 * P2. percentage[i]/100:
81 * How 'effective' the 'i'th device is, in cooling the given zone. 81 * How 'effective' the 'i'th device is, in cooling the given zone.
82 * P3. cur_trip_level/max_no_of_trips: 82 * P3. cur_trip_level/max_no_of_trips:
83 * This describes the extent to which the devices should be throttled. 83 * This describes the extent to which the devices should be throttled.
@@ -88,28 +88,33 @@ static long get_target_state(struct thermal_zone_device *tz,
88 */ 88 */
89static int fair_share_throttle(struct thermal_zone_device *tz, int trip) 89static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
90{ 90{
91 const struct thermal_zone_params *tzp;
92 struct thermal_cooling_device *cdev;
93 struct thermal_instance *instance; 91 struct thermal_instance *instance;
94 int i; 92 int total_weight = 0;
93 int total_instance = 0;
95 int cur_trip_level = get_trip_level(tz); 94 int cur_trip_level = get_trip_level(tz);
96 95
97 if (!tz->tzp || !tz->tzp->tbp) 96 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
98 return -EINVAL; 97 if (instance->trip != trip)
98 continue;
99
100 total_weight += instance->weight;
101 total_instance++;
102 }
99 103
100 tzp = tz->tzp; 104 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
105 int percentage;
106 struct thermal_cooling_device *cdev = instance->cdev;
101 107
102 for (i = 0; i < tzp->num_tbps; i++) { 108 if (instance->trip != trip)
103 if (!tzp->tbp[i].cdev)
104 continue; 109 continue;
105 110
106 cdev = tzp->tbp[i].cdev; 111 if (!total_weight)
107 instance = get_thermal_instance(tz, cdev, trip); 112 percentage = 100 / total_instance;
108 if (!instance) 113 else
109 continue; 114 percentage = (instance->weight * 100) / total_weight;
110 115
111 instance->target = get_target_state(tz, cdev, 116 instance->target = get_target_state(tz, cdev, percentage,
112 tzp->tbp[i].weight, cur_trip_level); 117 cur_trip_level);
113 118
114 instance->cdev->updated = false; 119 instance->cdev->updated = false;
115 thermal_cdev_update(cdev); 120 thermal_cdev_update(cdev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
new file mode 100644
index 000000000000..d5dd357ba57c
--- /dev/null
+++ b/drivers/thermal/hisi_thermal.c
@@ -0,0 +1,421 @@
1/*
2 * Hisilicon thermal sensor driver
3 *
4 * Copyright (c) 2014-2015 Hisilicon Limited.
5 * Copyright (c) 2014-2015 Linaro Limited.
6 *
7 * Xinwei Kong <kong.kongxinwei@hisilicon.com>
8 * Leo Yan <leo.yan@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/cpufreq.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/io.h>
26
27#include "thermal_core.h"
28
29#define TEMP0_TH (0x4)
30#define TEMP0_RST_TH (0x8)
31#define TEMP0_CFG (0xC)
32#define TEMP0_EN (0x10)
33#define TEMP0_INT_EN (0x14)
34#define TEMP0_INT_CLR (0x18)
35#define TEMP0_RST_MSK (0x1C)
36#define TEMP0_VALUE (0x28)
37
38#define HISI_TEMP_BASE (-60)
39#define HISI_TEMP_RESET (100000)
40
41#define HISI_MAX_SENSORS 4
42
43struct hisi_thermal_sensor {
44 struct hisi_thermal_data *thermal;
45 struct thermal_zone_device *tzd;
46
47 long sensor_temp;
48 uint32_t id;
49 uint32_t thres_temp;
50};
51
52struct hisi_thermal_data {
53 struct mutex thermal_lock; /* protects register data */
54 struct platform_device *pdev;
55 struct clk *clk;
56 struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS];
57
58 int irq, irq_bind_sensor;
59 bool irq_enabled;
60
61 void __iomem *regs;
62};
63
64/* in millicelsius */
65static inline int _step_to_temp(int step)
66{
67 /*
68 * Every step equals (1 * 200) / 255 celsius, and finally
69 * need convert to millicelsius.
70 */
71 return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
72}
73
74static inline long _temp_to_step(long temp)
75{
76 return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
77}
78
79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
80 struct hisi_thermal_sensor *sensor)
81{
82 long val;
83
84 mutex_lock(&data->thermal_lock);
85
86 /* disable interrupt */
87 writel(0x0, data->regs + TEMP0_INT_EN);
88 writel(0x1, data->regs + TEMP0_INT_CLR);
89
90 /* disable module firstly */
91 writel(0x0, data->regs + TEMP0_EN);
92
93 /* select sensor id */
94 writel((sensor->id << 12), data->regs + TEMP0_CFG);
95
96 /* enable module */
97 writel(0x1, data->regs + TEMP0_EN);
98
99 usleep_range(3000, 5000);
100
101 val = readl(data->regs + TEMP0_VALUE);
102 val = _step_to_temp(val);
103
104 mutex_unlock(&data->thermal_lock);
105
106 return val;
107}
108
109static void hisi_thermal_enable_bind_irq_sensor
110 (struct hisi_thermal_data *data)
111{
112 struct hisi_thermal_sensor *sensor;
113
114 mutex_lock(&data->thermal_lock);
115
116 sensor = &data->sensors[data->irq_bind_sensor];
117
118 /* setting the hdak time */
119 writel(0x0, data->regs + TEMP0_CFG);
120
121 /* disable module firstly */
122 writel(0x0, data->regs + TEMP0_RST_MSK);
123 writel(0x0, data->regs + TEMP0_EN);
124
125 /* select sensor id */
126 writel((sensor->id << 12), data->regs + TEMP0_CFG);
127
128 /* enable for interrupt */
129 writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
130 data->regs + TEMP0_TH);
131
132 writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
133
134 /* enable module */
135 writel(0x1, data->regs + TEMP0_RST_MSK);
136 writel(0x1, data->regs + TEMP0_EN);
137
138 writel(0x0, data->regs + TEMP0_INT_CLR);
139 writel(0x1, data->regs + TEMP0_INT_EN);
140
141 usleep_range(3000, 5000);
142
143 mutex_unlock(&data->thermal_lock);
144}
145
146static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
147{
148 mutex_lock(&data->thermal_lock);
149
150 /* disable sensor module */
151 writel(0x0, data->regs + TEMP0_INT_EN);
152 writel(0x0, data->regs + TEMP0_RST_MSK);
153 writel(0x0, data->regs + TEMP0_EN);
154
155 mutex_unlock(&data->thermal_lock);
156}
157
158static int hisi_thermal_get_temp(void *_sensor, long *temp)
159{
160 struct hisi_thermal_sensor *sensor = _sensor;
161 struct hisi_thermal_data *data = sensor->thermal;
162
163 int sensor_id = 0, i;
164 long max_temp = 0;
165
166 *temp = hisi_thermal_get_sensor_temp(data, sensor);
167
168 sensor->sensor_temp = *temp;
169
170 for (i = 0; i < HISI_MAX_SENSORS; i++) {
171 if (data->sensors[i].sensor_temp >= max_temp) {
172 max_temp = data->sensors[i].sensor_temp;
173 sensor_id = i;
174 }
175 }
176
177 mutex_lock(&data->thermal_lock);
178 data->irq_bind_sensor = sensor_id;
179 mutex_unlock(&data->thermal_lock);
180
181 dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n",
182 sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
183 /*
184 * Bind irq to sensor for two cases:
185 * Reenable alarm IRQ if temperature below threshold;
186 * if irq has been enabled, always set it;
187 */
188 if (data->irq_enabled) {
189 hisi_thermal_enable_bind_irq_sensor(data);
190 return 0;
191 }
192
193 if (max_temp < sensor->thres_temp) {
194 data->irq_enabled = true;
195 hisi_thermal_enable_bind_irq_sensor(data);
196 enable_irq(data->irq);
197 }
198
199 return 0;
200}
201
202static struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
203 .get_temp = hisi_thermal_get_temp,
204};
205
206static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev)
207{
208 struct hisi_thermal_data *data = dev;
209
210 disable_irq_nosync(irq);
211 data->irq_enabled = false;
212
213 return IRQ_WAKE_THREAD;
214}
215
216static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
217{
218 struct hisi_thermal_data *data = dev;
219 struct hisi_thermal_sensor *sensor;
220 int i;
221
222 mutex_lock(&data->thermal_lock);
223 sensor = &data->sensors[data->irq_bind_sensor];
224
225 dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
226 sensor->thres_temp / 1000);
227 mutex_unlock(&data->thermal_lock);
228
229 for (i = 0; i < HISI_MAX_SENSORS; i++)
230 thermal_zone_device_update(data->sensors[i].tzd);
231
232 return IRQ_HANDLED;
233}
234
235static int hisi_thermal_register_sensor(struct platform_device *pdev,
236 struct hisi_thermal_data *data,
237 struct hisi_thermal_sensor *sensor,
238 int index)
239{
240 int ret, i;
241 const struct thermal_trip *trip;
242
243 sensor->id = index;
244 sensor->thermal = data;
245
246 sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id,
247 sensor, &hisi_of_thermal_ops);
248 if (IS_ERR(sensor->tzd)) {
249 ret = PTR_ERR(sensor->tzd);
250 dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
251 sensor->id, ret);
252 return ret;
253 }
254
255 trip = of_thermal_get_trip_points(sensor->tzd);
256
257 for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
258 if (trip[i].type == THERMAL_TRIP_PASSIVE) {
259 sensor->thres_temp = trip[i].temperature;
260 break;
261 }
262 }
263
264 return 0;
265}
266
267static const struct of_device_id of_hisi_thermal_match[] = {
268 { .compatible = "hisilicon,tsensor" },
269 { /* end */ }
270};
271MODULE_DEVICE_TABLE(of, of_hisi_thermal_match);
272
273static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
274 bool on)
275{
276 struct thermal_zone_device *tzd = sensor->tzd;
277
278 tzd->ops->set_mode(tzd,
279 on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
280}
281
282static int hisi_thermal_probe(struct platform_device *pdev)
283{
284 struct hisi_thermal_data *data;
285 struct resource *res;
286 int i;
287 int ret;
288
289 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
290 if (!data)
291 return -ENOMEM;
292
293 mutex_init(&data->thermal_lock);
294 data->pdev = pdev;
295
296 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
297 data->regs = devm_ioremap_resource(&pdev->dev, res);
298 if (IS_ERR(data->regs)) {
299 dev_err(&pdev->dev, "failed to get io address\n");
300 return PTR_ERR(data->regs);
301 }
302
303 data->irq = platform_get_irq(pdev, 0);
304 if (data->irq < 0)
305 return data->irq;
306
307 ret = devm_request_threaded_irq(&pdev->dev, data->irq,
308 hisi_thermal_alarm_irq,
309 hisi_thermal_alarm_irq_thread,
310 0, "hisi_thermal", data);
311 if (ret < 0) {
312 dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
313 return ret;
314 }
315
316 platform_set_drvdata(pdev, data);
317
318 data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
319 if (IS_ERR(data->clk)) {
320 ret = PTR_ERR(data->clk);
321 if (ret != -EPROBE_DEFER)
322 dev_err(&pdev->dev,
323 "failed to get thermal clk: %d\n", ret);
324 return ret;
325 }
326
327 /* enable clock for thermal */
328 ret = clk_prepare_enable(data->clk);
329 if (ret) {
330 dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
331 return ret;
332 }
333
334 for (i = 0; i < HISI_MAX_SENSORS; ++i) {
335 ret = hisi_thermal_register_sensor(pdev, data,
336 &data->sensors[i], i);
337 if (ret) {
338 dev_err(&pdev->dev,
339 "failed to register thermal sensor: %d\n", ret);
340 goto err_get_sensor_data;
341 }
342 }
343
344 hisi_thermal_enable_bind_irq_sensor(data);
345 data->irq_enabled = true;
346
347 for (i = 0; i < HISI_MAX_SENSORS; i++)
348 hisi_thermal_toggle_sensor(&data->sensors[i], true);
349
350 return 0;
351
352err_get_sensor_data:
353 clk_disable_unprepare(data->clk);
354
355 return ret;
356}
357
358static int hisi_thermal_remove(struct platform_device *pdev)
359{
360 struct hisi_thermal_data *data = platform_get_drvdata(pdev);
361 int i;
362
363 for (i = 0; i < HISI_MAX_SENSORS; i++) {
364 struct hisi_thermal_sensor *sensor = &data->sensors[i];
365
366 hisi_thermal_toggle_sensor(sensor, false);
367 thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
368 }
369
370 hisi_thermal_disable_sensor(data);
371 clk_disable_unprepare(data->clk);
372
373 return 0;
374}
375
376#ifdef CONFIG_PM_SLEEP
377static int hisi_thermal_suspend(struct device *dev)
378{
379 struct hisi_thermal_data *data = dev_get_drvdata(dev);
380
381 hisi_thermal_disable_sensor(data);
382 data->irq_enabled = false;
383
384 clk_disable_unprepare(data->clk);
385
386 return 0;
387}
388
389static int hisi_thermal_resume(struct device *dev)
390{
391 struct hisi_thermal_data *data = dev_get_drvdata(dev);
392
393 clk_prepare_enable(data->clk);
394
395 data->irq_enabled = true;
396 hisi_thermal_enable_bind_irq_sensor(data);
397
398 return 0;
399}
400#endif
401
402static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
403 hisi_thermal_suspend, hisi_thermal_resume);
404
405static struct platform_driver hisi_thermal_driver = {
406 .driver = {
407 .name = "hisi_thermal",
408 .owner = THIS_MODULE,
409 .pm = &hisi_thermal_pm_ops,
410 .of_match_table = of_hisi_thermal_match,
411 },
412 .probe = hisi_thermal_probe,
413 .remove = hisi_thermal_remove,
414};
415
416module_platform_driver(hisi_thermal_driver);
417
418MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
419MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
420MODULE_DESCRIPTION("Hisilicon thermal driver");
421MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2ccbc0788353..fde4c2876d14 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,8 @@ static int imx_bind(struct thermal_zone_device *tz,
306 306
307 ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, 307 ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
308 THERMAL_NO_LIMIT, 308 THERMAL_NO_LIMIT,
309 THERMAL_NO_LIMIT); 309 THERMAL_NO_LIMIT,
310 THERMAL_WEIGHT_DEFAULT);
310 if (ret) { 311 if (ret) {
311 dev_err(&tz->device, 312 dev_err(&tz->device,
312 "binding zone %s with cdev %s failed:%d\n", 313 "binding zone %s with cdev %s failed:%d\n",
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 5e8d8e91ea6d..3df3dc34b124 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -16,15 +16,20 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/interrupt.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/acpi.h> 21#include <linux/acpi.h>
21#include <linux/thermal.h> 22#include <linux/thermal.h>
22#include "int340x_thermal_zone.h" 23#include "int340x_thermal_zone.h"
24#include "../intel_soc_dts_iosf.h"
23 25
24/* Broadwell-U/HSB thermal reporting device */ 26/* Broadwell-U/HSB thermal reporting device */
25#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 27#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
26#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03 28#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
27 29
30/* Skylake thermal reporting device */
31#define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903
32
28/* Braswell thermal reporting device */ 33/* Braswell thermal reporting device */
29#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC 34#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
30 35
@@ -42,6 +47,7 @@ struct proc_thermal_device {
42 struct acpi_device *adev; 47 struct acpi_device *adev;
43 struct power_config power_limits[2]; 48 struct power_config power_limits[2];
44 struct int34x_thermal_zone *int340x_zone; 49 struct int34x_thermal_zone *int340x_zone;
50 struct intel_soc_dts_sensors *soc_dts;
45}; 51};
46 52
47enum proc_thermal_emum_mode_type { 53enum proc_thermal_emum_mode_type {
@@ -308,6 +314,18 @@ static int int3401_remove(struct platform_device *pdev)
308 return 0; 314 return 0;
309} 315}
310 316
317static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
318{
319 struct proc_thermal_device *proc_priv;
320 struct pci_dev *pdev = devid;
321
322 proc_priv = pci_get_drvdata(pdev);
323
324 intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts);
325
326 return IRQ_HANDLED;
327}
328
311static int proc_thermal_pci_probe(struct pci_dev *pdev, 329static int proc_thermal_pci_probe(struct pci_dev *pdev,
312 const struct pci_device_id *unused) 330 const struct pci_device_id *unused)
313{ 331{
@@ -334,18 +352,57 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
334 pci_set_drvdata(pdev, proc_priv); 352 pci_set_drvdata(pdev, proc_priv);
335 proc_thermal_emum_mode = PROC_THERMAL_PCI; 353 proc_thermal_emum_mode = PROC_THERMAL_PCI;
336 354
355 if (pdev->device == PCI_DEVICE_ID_PROC_BSW_THERMAL) {
356 /*
357 * Enumerate additional DTS sensors available via IOSF.
358 * But we are not treating as a failure condition, if
359 * there are no aux DTSs enabled or fails. This driver
360 * already exposes sensors, which can be accessed via
361 * ACPI/MSR. So we don't want to fail for auxiliary DTSs.
362 */
363 proc_priv->soc_dts = intel_soc_dts_iosf_init(
364 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
365
366 if (proc_priv->soc_dts && pdev->irq) {
367 ret = pci_enable_msi(pdev);
368 if (!ret) {
369 ret = request_threaded_irq(pdev->irq, NULL,
370 proc_thermal_pci_msi_irq,
371 IRQF_ONESHOT, "proc_thermal",
372 pdev);
373 if (ret) {
374 intel_soc_dts_iosf_exit(
375 proc_priv->soc_dts);
376 pci_disable_msi(pdev);
377 proc_priv->soc_dts = NULL;
378 }
379 }
380 } else
381 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
382 }
383
337 return 0; 384 return 0;
338} 385}
339 386
340static void proc_thermal_pci_remove(struct pci_dev *pdev) 387static void proc_thermal_pci_remove(struct pci_dev *pdev)
341{ 388{
342 proc_thermal_remove(pci_get_drvdata(pdev)); 389 struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
390
391 if (proc_priv->soc_dts) {
392 intel_soc_dts_iosf_exit(proc_priv->soc_dts);
393 if (pdev->irq) {
394 free_irq(pdev->irq, pdev);
395 pci_disable_msi(pdev);
396 }
397 }
398 proc_thermal_remove(proc_priv);
343 pci_disable_device(pdev); 399 pci_disable_device(pdev);
344} 400}
345 401
346static const struct pci_device_id proc_thermal_pci_ids[] = { 402static const struct pci_device_id proc_thermal_pci_ids[] = {
347 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, 403 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
348 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, 404 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
405 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL)},
349 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)}, 406 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
350 { 0, }, 407 { 0, },
351}; 408};
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 725718e97a0b..2e6716104d3f 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -697,6 +697,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
697 { X86_VENDOR_INTEL, 6, 0x4d}, 697 { X86_VENDOR_INTEL, 6, 0x4d},
698 { X86_VENDOR_INTEL, 6, 0x4f}, 698 { X86_VENDOR_INTEL, 6, 0x4f},
699 { X86_VENDOR_INTEL, 6, 0x56}, 699 { X86_VENDOR_INTEL, 6, 0x56},
700 { X86_VENDOR_INTEL, 6, 0x57},
700 {} 701 {}
701}; 702};
702MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); 703MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c
new file mode 100644
index 000000000000..4434ec812cb7
--- /dev/null
+++ b/drivers/thermal/intel_quark_dts_thermal.c
@@ -0,0 +1,473 @@
1/*
2 * intel_quark_dts_thermal.c
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2015 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Ong Boon Leong <boon.leong.ong@intel.com>
22 * Intel Malaysia, Penang
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2015 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 *
54 * Quark DTS thermal driver is implemented by referencing
55 * intel_soc_dts_thermal.c.
56 */
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/module.h>
61#include <linux/slab.h>
62#include <linux/interrupt.h>
63#include <linux/thermal.h>
64#include <asm/cpu_device_id.h>
65#include <asm/iosf_mbi.h>
66
67#define X86_FAMILY_QUARK 0x5
68#define X86_MODEL_QUARK_X1000 0x9
69
70/* DTS reset is programmed via QRK_MBI_UNIT_SOC */
71#define QRK_DTS_REG_OFFSET_RESET 0x34
72#define QRK_DTS_RESET_BIT BIT(0)
73
74/* DTS enable is programmed via QRK_MBI_UNIT_RMU */
75#define QRK_DTS_REG_OFFSET_ENABLE 0xB0
76#define QRK_DTS_ENABLE_BIT BIT(15)
77
78/* Temperature Register is read via QRK_MBI_UNIT_RMU */
79#define QRK_DTS_REG_OFFSET_TEMP 0xB1
80#define QRK_DTS_MASK_TEMP 0xFF
81#define QRK_DTS_OFFSET_TEMP 0
82#define QRK_DTS_OFFSET_REL_TEMP 16
83#define QRK_DTS_TEMP_BASE 50
84
85/* Programmable Trip Point Register is configured via QRK_MBI_UNIT_RMU */
86#define QRK_DTS_REG_OFFSET_PTPS 0xB2
87#define QRK_DTS_MASK_TP_THRES 0xFF
88#define QRK_DTS_SHIFT_TP 8
89#define QRK_DTS_ID_TP_CRITICAL 0
90#define QRK_DTS_SAFE_TP_THRES 105
91
92/* Thermal Sensor Register Lock */
93#define QRK_DTS_REG_OFFSET_LOCK 0x71
94#define QRK_DTS_LOCK_BIT BIT(5)
95
96/* Quark DTS has 2 trip points: hot & catastrophic */
97#define QRK_MAX_DTS_TRIPS 2
98/* If DTS not locked, all trip points are configurable */
99#define QRK_DTS_WR_MASK_SET 0x3
100/* If DTS locked, all trip points are not configurable */
101#define QRK_DTS_WR_MASK_CLR 0
102
103#define DEFAULT_POLL_DELAY 2000
104
105struct soc_sensor_entry {
106 bool locked;
107 u32 store_ptps;
108 u32 store_dts_enable;
109 enum thermal_device_mode mode;
110 struct thermal_zone_device *tzone;
111};
112
113static struct soc_sensor_entry *soc_dts;
114
115static int polling_delay = DEFAULT_POLL_DELAY;
116module_param(polling_delay, int, 0644);
117MODULE_PARM_DESC(polling_delay,
118 "Polling interval for checking trip points (in milliseconds)");
119
120static DEFINE_MUTEX(dts_update_mutex);
121
122static int soc_dts_enable(struct thermal_zone_device *tzd)
123{
124 u32 out;
125 struct soc_sensor_entry *aux_entry = tzd->devdata;
126 int ret;
127
128 ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
129 QRK_DTS_REG_OFFSET_ENABLE, &out);
130 if (ret)
131 return ret;
132
133 if (out & QRK_DTS_ENABLE_BIT) {
134 aux_entry->mode = THERMAL_DEVICE_ENABLED;
135 return 0;
136 }
137
138 if (!aux_entry->locked) {
139 out |= QRK_DTS_ENABLE_BIT;
140 ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
141 QRK_DTS_REG_OFFSET_ENABLE, out);
142 if (ret)
143 return ret;
144
145 aux_entry->mode = THERMAL_DEVICE_ENABLED;
146 } else {
147 aux_entry->mode = THERMAL_DEVICE_DISABLED;
148 pr_info("DTS is locked. Cannot enable DTS\n");
149 ret = -EPERM;
150 }
151
152 return ret;
153}
154
155static int soc_dts_disable(struct thermal_zone_device *tzd)
156{
157 u32 out;
158 struct soc_sensor_entry *aux_entry = tzd->devdata;
159 int ret;
160
161 ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
162 QRK_DTS_REG_OFFSET_ENABLE, &out);
163 if (ret)
164 return ret;
165
166 if (!(out & QRK_DTS_ENABLE_BIT)) {
167 aux_entry->mode = THERMAL_DEVICE_DISABLED;
168 return 0;
169 }
170
171 if (!aux_entry->locked) {
172 out &= ~QRK_DTS_ENABLE_BIT;
173 ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
174 QRK_DTS_REG_OFFSET_ENABLE, out);
175
176 if (ret)
177 return ret;
178
179 aux_entry->mode = THERMAL_DEVICE_DISABLED;
180 } else {
181 aux_entry->mode = THERMAL_DEVICE_ENABLED;
182 pr_info("DTS is locked. Cannot disable DTS\n");
183 ret = -EPERM;
184 }
185
186 return ret;
187}
188
189static int _get_trip_temp(int trip, unsigned long *temp)
190{
191 int status;
192 u32 out;
193
194 mutex_lock(&dts_update_mutex);
195 status = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
196 QRK_DTS_REG_OFFSET_PTPS, &out);
197 mutex_unlock(&dts_update_mutex);
198
199 if (status)
200 return status;
201
202 /*
203 * Thermal Sensor Programmable Trip Point Register has 8-bit
204 * fields for critical (catastrophic) and hot set trip point
205 * thresholds. The threshold value is always offset by its
206 * temperature base (50 degree Celsius).
207 */
208 *temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES;
209 *temp -= QRK_DTS_TEMP_BASE;
210
211 return 0;
212}
213
214static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
215 int trip, unsigned long *temp)
216{
217 return _get_trip_temp(trip, temp);
218}
219
220static inline int sys_get_crit_temp(struct thermal_zone_device *tzd,
221 unsigned long *temp)
222{
223 return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
224}
225
226static int update_trip_temp(struct soc_sensor_entry *aux_entry,
227 int trip, unsigned long temp)
228{
229 u32 out;
230 u32 temp_out;
231 u32 store_ptps;
232 int ret;
233
234 mutex_lock(&dts_update_mutex);
235 if (aux_entry->locked) {
236 ret = -EPERM;
237 goto failed;
238 }
239
240 ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
241 QRK_DTS_REG_OFFSET_PTPS, &store_ptps);
242 if (ret)
243 goto failed;
244
245 /*
246 * Protection against unsafe trip point thresdhold value.
247 * As Quark X1000 data-sheet does not provide any recommendation
248 * regarding the safe trip point threshold value to use, we choose
249 * the safe value according to the threshold value set by UEFI BIOS.
250 */
251 if (temp > QRK_DTS_SAFE_TP_THRES)
252 temp = QRK_DTS_SAFE_TP_THRES;
253
254 /*
255 * Thermal Sensor Programmable Trip Point Register has 8-bit
256 * fields for critical (catastrophic) and hot set trip point
257 * thresholds. The threshold value is always offset by its
258 * temperature base (50 degree Celsius).
259 */
260 temp_out = temp + QRK_DTS_TEMP_BASE;
261 out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES <<
262 (trip * QRK_DTS_SHIFT_TP)));
263 out |= (temp_out & QRK_DTS_MASK_TP_THRES) <<
264 (trip * QRK_DTS_SHIFT_TP);
265
266 ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
267 QRK_DTS_REG_OFFSET_PTPS, out);
268
269failed:
270 mutex_unlock(&dts_update_mutex);
271 return ret;
272}
273
274static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
275 unsigned long temp)
276{
277 return update_trip_temp(tzd->devdata, trip, temp);
278}
279
280static int sys_get_trip_type(struct thermal_zone_device *thermal,
281 int trip, enum thermal_trip_type *type)
282{
283 if (trip)
284 *type = THERMAL_TRIP_HOT;
285 else
286 *type = THERMAL_TRIP_CRITICAL;
287
288 return 0;
289}
290
291static int sys_get_curr_temp(struct thermal_zone_device *tzd,
292 unsigned long *temp)
293{
294 u32 out;
295 int ret;
296
297 mutex_lock(&dts_update_mutex);
298 ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
299 QRK_DTS_REG_OFFSET_TEMP, &out);
300 mutex_unlock(&dts_update_mutex);
301
302 if (ret)
303 return ret;
304
305 /*
306 * Thermal Sensor Temperature Register has 8-bit field
307 * for temperature value (offset by temperature base
308 * 50 degree Celsius).
309 */
310 out = (out >> QRK_DTS_OFFSET_TEMP) & QRK_DTS_MASK_TEMP;
311 *temp = out - QRK_DTS_TEMP_BASE;
312
313 return 0;
314}
315
316static int sys_get_mode(struct thermal_zone_device *tzd,
317 enum thermal_device_mode *mode)
318{
319 struct soc_sensor_entry *aux_entry = tzd->devdata;
320 *mode = aux_entry->mode;
321 return 0;
322}
323
324static int sys_set_mode(struct thermal_zone_device *tzd,
325 enum thermal_device_mode mode)
326{
327 int ret;
328
329 mutex_lock(&dts_update_mutex);
330 if (mode == THERMAL_DEVICE_ENABLED)
331 ret = soc_dts_enable(tzd);
332 else
333 ret = soc_dts_disable(tzd);
334 mutex_unlock(&dts_update_mutex);
335
336 return ret;
337}
338
339static struct thermal_zone_device_ops tzone_ops = {
340 .get_temp = sys_get_curr_temp,
341 .get_trip_temp = sys_get_trip_temp,
342 .get_trip_type = sys_get_trip_type,
343 .set_trip_temp = sys_set_trip_temp,
344 .get_crit_temp = sys_get_crit_temp,
345 .get_mode = sys_get_mode,
346 .set_mode = sys_set_mode,
347};
348
349static void free_soc_dts(struct soc_sensor_entry *aux_entry)
350{
351 if (aux_entry) {
352 if (!aux_entry->locked) {
353 mutex_lock(&dts_update_mutex);
354 iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
355 QRK_DTS_REG_OFFSET_ENABLE,
356 aux_entry->store_dts_enable);
357
358 iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
359 QRK_DTS_REG_OFFSET_PTPS,
360 aux_entry->store_ptps);
361 mutex_unlock(&dts_update_mutex);
362 }
363 thermal_zone_device_unregister(aux_entry->tzone);
364 kfree(aux_entry);
365 }
366}
367
368static struct soc_sensor_entry *alloc_soc_dts(void)
369{
370 struct soc_sensor_entry *aux_entry;
371 int err;
372 u32 out;
373 int wr_mask;
374
375 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
376 if (!aux_entry) {
377 err = -ENOMEM;
378 return ERR_PTR(-ENOMEM);
379 }
380
381 /* Check if DTS register is locked */
382 err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
383 QRK_DTS_REG_OFFSET_LOCK,
384 &out);
385 if (err)
386 goto err_ret;
387
388 if (out & QRK_DTS_LOCK_BIT) {
389 aux_entry->locked = true;
390 wr_mask = QRK_DTS_WR_MASK_CLR;
391 } else {
392 aux_entry->locked = false;
393 wr_mask = QRK_DTS_WR_MASK_SET;
394 }
395
396 /* Store DTS default state if DTS registers are not locked */
397 if (!aux_entry->locked) {
398 /* Store DTS default enable for restore on exit */
399 err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
400 QRK_DTS_REG_OFFSET_ENABLE,
401 &aux_entry->store_dts_enable);
402 if (err)
403 goto err_ret;
404
405 /* Store DTS default PTPS register for restore on exit */
406 err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
407 QRK_DTS_REG_OFFSET_PTPS,
408 &aux_entry->store_ptps);
409 if (err)
410 goto err_ret;
411 }
412
413 aux_entry->tzone = thermal_zone_device_register("quark_dts",
414 QRK_MAX_DTS_TRIPS,
415 wr_mask,
416 aux_entry, &tzone_ops, NULL, 0, polling_delay);
417 if (IS_ERR(aux_entry->tzone)) {
418 err = PTR_ERR(aux_entry->tzone);
419 goto err_ret;
420 }
421
422 mutex_lock(&dts_update_mutex);
423 err = soc_dts_enable(aux_entry->tzone);
424 mutex_unlock(&dts_update_mutex);
425 if (err)
426 goto err_aux_status;
427
428 return aux_entry;
429
430err_aux_status:
431 thermal_zone_device_unregister(aux_entry->tzone);
432err_ret:
433 kfree(aux_entry);
434 return ERR_PTR(err);
435}
436
437static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
438 { X86_VENDOR_INTEL, X86_FAMILY_QUARK, X86_MODEL_QUARK_X1000 },
439 {}
440};
441MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
442
443static int __init intel_quark_thermal_init(void)
444{
445 int err = 0;
446
447 if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
448 return -ENODEV;
449
450 soc_dts = alloc_soc_dts();
451 if (IS_ERR(soc_dts)) {
452 err = PTR_ERR(soc_dts);
453 goto err_free;
454 }
455
456 return 0;
457
458err_free:
459 free_soc_dts(soc_dts);
460 return err;
461}
462
463static void __exit intel_quark_thermal_exit(void)
464{
465 free_soc_dts(soc_dts);
466}
467
468module_init(intel_quark_thermal_init)
469module_exit(intel_quark_thermal_exit)
470
471MODULE_DESCRIPTION("Intel Quark DTS Thermal Driver");
472MODULE_AUTHOR("Ong Boon Leong <boon.leong.ong@intel.com>");
473MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
new file mode 100644
index 000000000000..42e4b6ac3875
--- /dev/null
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -0,0 +1,478 @@
1/*
2 * intel_soc_dts_iosf.c
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <asm/iosf_mbi.h>
22#include "intel_soc_dts_iosf.h"
23
24#define SOC_DTS_OFFSET_ENABLE 0xB0
25#define SOC_DTS_OFFSET_TEMP 0xB1
26
27#define SOC_DTS_OFFSET_PTPS 0xB2
28#define SOC_DTS_OFFSET_PTTS 0xB3
29#define SOC_DTS_OFFSET_PTTSS 0xB4
30#define SOC_DTS_OFFSET_PTMC 0x80
31#define SOC_DTS_TE_AUX0 0xB5
32#define SOC_DTS_TE_AUX1 0xB6
33
34#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
35#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
36#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
37#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
38#define SOC_DTS_TE_SCI_ENABLE BIT(9)
39#define SOC_DTS_TE_SMI_ENABLE BIT(10)
40#define SOC_DTS_TE_MSI_ENABLE BIT(11)
41#define SOC_DTS_TE_APICA_ENABLE BIT(14)
42#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
43
44/* DTS encoding for TJ MAX temperature */
45#define SOC_DTS_TJMAX_ENCODING 0x7F
46
47/* Only 2 out of 4 is allowed for OSPM */
48#define SOC_MAX_DTS_TRIPS 2
49
50/* Mask for two trips in status bits */
51#define SOC_DTS_TRIP_MASK 0x03
52
53/* DTS0 and DTS 1 */
54#define SOC_MAX_DTS_SENSORS 2
55
56static int get_tj_max(u32 *tj_max)
57{
58 u32 eax, edx;
59 u32 val;
60 int err;
61
62 err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
63 if (err)
64 goto err_ret;
65 else {
66 val = (eax >> 16) & 0xff;
67 if (val)
68 *tj_max = val * 1000;
69 else {
70 err = -EINVAL;
71 goto err_ret;
72 }
73 }
74
75 return 0;
76err_ret:
77 *tj_max = 0;
78
79 return err;
80}
81
82static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
83 unsigned long *temp)
84{
85 int status;
86 u32 out;
87 struct intel_soc_dts_sensor_entry *dts;
88 struct intel_soc_dts_sensors *sensors;
89
90 dts = tzd->devdata;
91 sensors = dts->sensors;
92 mutex_lock(&sensors->dts_update_lock);
93 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
94 SOC_DTS_OFFSET_PTPS, &out);
95 mutex_unlock(&sensors->dts_update_lock);
96 if (status)
97 return status;
98
99 out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
100 if (!out)
101 *temp = 0;
102 else
103 *temp = sensors->tj_max - out * 1000;
104
105 return 0;
106}
107
108static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
109 int thres_index, unsigned long temp,
110 enum thermal_trip_type trip_type)
111{
112 int status;
113 u32 temp_out;
114 u32 out;
115 u32 store_ptps;
116 u32 store_ptmc;
117 u32 store_te_out;
118 u32 te_out;
119 u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE;
120 struct intel_soc_dts_sensors *sensors = dts->sensors;
121
122 if (sensors->intr_type == INTEL_SOC_DTS_INTERRUPT_MSI)
123 int_enable_bit |= SOC_DTS_TE_MSI_ENABLE;
124
125 temp_out = (sensors->tj_max - temp) / 1000;
126
127 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
128 SOC_DTS_OFFSET_PTPS, &store_ptps);
129 if (status)
130 return status;
131
132 out = (store_ptps & ~(0xFF << (thres_index * 8)));
133 out |= (temp_out & 0xFF) << (thres_index * 8);
134 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
135 SOC_DTS_OFFSET_PTPS, out);
136 if (status)
137 return status;
138
139 pr_debug("update_trip_temp PTPS = %x\n", out);
140 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
141 SOC_DTS_OFFSET_PTMC, &out);
142 if (status)
143 goto err_restore_ptps;
144
145 store_ptmc = out;
146
147 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
148 SOC_DTS_TE_AUX0 + thres_index,
149 &te_out);
150 if (status)
151 goto err_restore_ptmc;
152
153 store_te_out = te_out;
154 /* Enable for CPU module 0 and module 1 */
155 out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
156 SOC_DTS_CPU_MODULE1_ENABLE_BIT);
157 if (temp) {
158 if (thres_index)
159 out |= SOC_DTS_AUX1_ENABLE_BIT;
160 else
161 out |= SOC_DTS_AUX0_ENABLE_BIT;
162 te_out |= int_enable_bit;
163 } else {
164 if (thres_index)
165 out &= ~SOC_DTS_AUX1_ENABLE_BIT;
166 else
167 out &= ~SOC_DTS_AUX0_ENABLE_BIT;
168 te_out &= ~int_enable_bit;
169 }
170 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
171 SOC_DTS_OFFSET_PTMC, out);
172 if (status)
173 goto err_restore_te_out;
174
175 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
176 SOC_DTS_TE_AUX0 + thres_index,
177 te_out);
178 if (status)
179 goto err_restore_te_out;
180
181 dts->trip_types[thres_index] = trip_type;
182
183 return 0;
184err_restore_te_out:
185 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
186 SOC_DTS_OFFSET_PTMC, store_te_out);
187err_restore_ptmc:
188 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
189 SOC_DTS_OFFSET_PTMC, store_ptmc);
190err_restore_ptps:
191 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
192 SOC_DTS_OFFSET_PTPS, store_ptps);
193 /* Nothing we can do if restore fails */
194
195 return status;
196}
197
198static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
199 unsigned long temp)
200{
201 struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
202 struct intel_soc_dts_sensors *sensors = dts->sensors;
203 int status;
204
205 if (temp > sensors->tj_max)
206 return -EINVAL;
207
208 mutex_lock(&sensors->dts_update_lock);
209 status = update_trip_temp(tzd->devdata, trip, temp,
210 dts->trip_types[trip]);
211 mutex_unlock(&sensors->dts_update_lock);
212
213 return status;
214}
215
216static int sys_get_trip_type(struct thermal_zone_device *tzd,
217 int trip, enum thermal_trip_type *type)
218{
219 struct intel_soc_dts_sensor_entry *dts;
220
221 dts = tzd->devdata;
222
223 *type = dts->trip_types[trip];
224
225 return 0;
226}
227
228static int sys_get_curr_temp(struct thermal_zone_device *tzd,
229 unsigned long *temp)
230{
231 int status;
232 u32 out;
233 struct intel_soc_dts_sensor_entry *dts;
234 struct intel_soc_dts_sensors *sensors;
235
236 dts = tzd->devdata;
237 sensors = dts->sensors;
238 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
239 SOC_DTS_OFFSET_TEMP, &out);
240 if (status)
241 return status;
242
243 out = (out & dts->temp_mask) >> dts->temp_shift;
244 out -= SOC_DTS_TJMAX_ENCODING;
245 *temp = sensors->tj_max - out * 1000;
246
247 return 0;
248}
249
250static struct thermal_zone_device_ops tzone_ops = {
251 .get_temp = sys_get_curr_temp,
252 .get_trip_temp = sys_get_trip_temp,
253 .get_trip_type = sys_get_trip_type,
254 .set_trip_temp = sys_set_trip_temp,
255};
256
257static int soc_dts_enable(int id)
258{
259 u32 out;
260 int ret;
261
262 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
263 SOC_DTS_OFFSET_ENABLE, &out);
264 if (ret)
265 return ret;
266
267 if (!(out & BIT(id))) {
268 out |= BIT(id);
269 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
270 SOC_DTS_OFFSET_ENABLE, out);
271 if (ret)
272 return ret;
273 }
274
275 return ret;
276}
277
278static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts)
279{
280 if (dts) {
281 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
282 SOC_DTS_OFFSET_ENABLE, dts->store_status);
283 thermal_zone_device_unregister(dts->tzone);
284 }
285}
286
287static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
288 bool notification_support, int trip_cnt,
289 int read_only_trip_cnt)
290{
291 char name[10];
292 int trip_count = 0;
293 int trip_mask = 0;
294 u32 store_ptps;
295 int ret;
296 int i;
297
298 /* Store status to restor on exit */
299 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
300 SOC_DTS_OFFSET_ENABLE,
301 &dts->store_status);
302 if (ret)
303 goto err_ret;
304
305 dts->id = id;
306 dts->temp_mask = 0x00FF << (id * 8);
307 dts->temp_shift = id * 8;
308 if (notification_support) {
309 trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
310 trip_mask = BIT(trip_count - read_only_trip_cnt) - 1;
311 }
312
313 /* Check if the writable trip we provide is not used by BIOS */
314 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
315 SOC_DTS_OFFSET_PTPS, &store_ptps);
316 if (ret)
317 trip_mask = 0;
318 else {
319 for (i = 0; i < trip_count; ++i) {
320 if (trip_mask & BIT(i))
321 if (store_ptps & (0xff << (i * 8)))
322 trip_mask &= ~BIT(i);
323 }
324 }
325 dts->trip_mask = trip_mask;
326 dts->trip_count = trip_count;
327 snprintf(name, sizeof(name), "soc_dts%d", id);
328 dts->tzone = thermal_zone_device_register(name,
329 trip_count,
330 trip_mask,
331 dts, &tzone_ops,
332 NULL, 0, 0);
333 if (IS_ERR(dts->tzone)) {
334 ret = PTR_ERR(dts->tzone);
335 goto err_ret;
336 }
337
338 ret = soc_dts_enable(id);
339 if (ret)
340 goto err_enable;
341
342 return 0;
343err_enable:
344 thermal_zone_device_unregister(dts->tzone);
345err_ret:
346 return ret;
347}
348
349int intel_soc_dts_iosf_add_read_only_critical_trip(
350 struct intel_soc_dts_sensors *sensors, int critical_offset)
351{
352 int i, j;
353
354 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
355 for (j = 0; j < sensors->soc_dts[i].trip_count; ++j) {
356 if (!(sensors->soc_dts[i].trip_mask & BIT(j))) {
357 return update_trip_temp(&sensors->soc_dts[i], j,
358 sensors->tj_max - critical_offset,
359 THERMAL_TRIP_CRITICAL);
360 }
361 }
362 }
363
364 return -EINVAL;
365}
366EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_add_read_only_critical_trip);
367
368void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
369{
370 u32 sticky_out;
371 int status;
372 u32 ptmc_out;
373 unsigned long flags;
374
375 spin_lock_irqsave(&sensors->intr_notify_lock, flags);
376
377 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
378 SOC_DTS_OFFSET_PTMC, &ptmc_out);
379 ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
380 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
381 SOC_DTS_OFFSET_PTMC, ptmc_out);
382
383 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
384 SOC_DTS_OFFSET_PTTSS, &sticky_out);
385 pr_debug("status %d PTTSS %x\n", status, sticky_out);
386 if (sticky_out & SOC_DTS_TRIP_MASK) {
387 int i;
388 /* reset sticky bit */
389 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
390 SOC_DTS_OFFSET_PTTSS, sticky_out);
391 spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
392
393 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
394 pr_debug("TZD update for zone %d\n", i);
395 thermal_zone_device_update(sensors->soc_dts[i].tzone);
396 }
397 } else
398 spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
399}
400EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler);
401
402struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
403 enum intel_soc_dts_interrupt_type intr_type, int trip_count,
404 int read_only_trip_count)
405{
406 struct intel_soc_dts_sensors *sensors;
407 bool notification;
408 u32 tj_max;
409 int ret;
410 int i;
411
412 if (!iosf_mbi_available())
413 return ERR_PTR(-ENODEV);
414
415 if (!trip_count || read_only_trip_count > trip_count)
416 return ERR_PTR(-EINVAL);
417
418 if (get_tj_max(&tj_max))
419 return ERR_PTR(-EINVAL);
420
421 sensors = kzalloc(sizeof(*sensors), GFP_KERNEL);
422 if (!sensors)
423 return ERR_PTR(-ENOMEM);
424
425 spin_lock_init(&sensors->intr_notify_lock);
426 mutex_init(&sensors->dts_update_lock);
427 sensors->intr_type = intr_type;
428 sensors->tj_max = tj_max;
429 if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE)
430 notification = false;
431 else
432 notification = true;
433 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
434 sensors->soc_dts[i].sensors = sensors;
435 ret = add_dts_thermal_zone(i, &sensors->soc_dts[i],
436 notification, trip_count,
437 read_only_trip_count);
438 if (ret)
439 goto err_free;
440 }
441
442 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
443 ret = update_trip_temp(&sensors->soc_dts[i], 0, 0,
444 THERMAL_TRIP_PASSIVE);
445 if (ret)
446 goto err_remove_zone;
447
448 ret = update_trip_temp(&sensors->soc_dts[i], 1, 0,
449 THERMAL_TRIP_PASSIVE);
450 if (ret)
451 goto err_remove_zone;
452 }
453
454 return sensors;
455err_remove_zone:
456 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
457 remove_dts_thermal_zone(&sensors->soc_dts[i]);
458
459err_free:
460 kfree(sensors);
461 return ERR_PTR(ret);
462}
463EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_init);
464
465void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors)
466{
467 int i;
468
469 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
470 update_trip_temp(&sensors->soc_dts[i], 0, 0, 0);
471 update_trip_temp(&sensors->soc_dts[i], 1, 0, 0);
472 remove_dts_thermal_zone(&sensors->soc_dts[i]);
473 }
474 kfree(sensors);
475}
476EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit);
477
478MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_soc_dts_iosf.h b/drivers/thermal/intel_soc_dts_iosf.h
new file mode 100644
index 000000000000..625e37bf93dc
--- /dev/null
+++ b/drivers/thermal/intel_soc_dts_iosf.h
@@ -0,0 +1,62 @@
1/*
2 * intel_soc_dts_iosf.h
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef _INTEL_SOC_DTS_IOSF_CORE_H
17#define _INTEL_SOC_DTS_IOSF_CORE_H
18
19#include <linux/thermal.h>
20
21/* DTS0 and DTS 1 */
22#define SOC_MAX_DTS_SENSORS 2
23
24enum intel_soc_dts_interrupt_type {
25 INTEL_SOC_DTS_INTERRUPT_NONE,
26 INTEL_SOC_DTS_INTERRUPT_APIC,
27 INTEL_SOC_DTS_INTERRUPT_MSI,
28 INTEL_SOC_DTS_INTERRUPT_SCI,
29 INTEL_SOC_DTS_INTERRUPT_SMI,
30};
31
32struct intel_soc_dts_sensors;
33
34struct intel_soc_dts_sensor_entry {
35 int id;
36 u32 temp_mask;
37 u32 temp_shift;
38 u32 store_status;
39 u32 trip_mask;
40 u32 trip_count;
41 enum thermal_trip_type trip_types[2];
42 struct thermal_zone_device *tzone;
43 struct intel_soc_dts_sensors *sensors;
44};
45
46struct intel_soc_dts_sensors {
47 u32 tj_max;
48 spinlock_t intr_notify_lock;
49 struct mutex dts_update_lock;
50 enum intel_soc_dts_interrupt_type intr_type;
51 struct intel_soc_dts_sensor_entry soc_dts[SOC_MAX_DTS_SENSORS];
52};
53
54struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
55 enum intel_soc_dts_interrupt_type intr_type, int trip_count,
56 int read_only_trip_count);
57void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors);
58void intel_soc_dts_iosf_interrupt_handler(
59 struct intel_soc_dts_sensors *sensors);
60int intel_soc_dts_iosf_add_read_only_critical_trip(
61 struct intel_soc_dts_sensors *sensors, int critical_offset);
62#endif
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 9013505e43b7..4ebb31a35a64 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -16,431 +16,54 @@
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21#include <linux/thermal.h>
22#include <asm/cpu_device_id.h> 20#include <asm/cpu_device_id.h>
23#include <asm/iosf_mbi.h> 21#include "intel_soc_dts_iosf.h"
24
25#define SOC_DTS_OFFSET_ENABLE 0xB0
26#define SOC_DTS_OFFSET_TEMP 0xB1
27
28#define SOC_DTS_OFFSET_PTPS 0xB2
29#define SOC_DTS_OFFSET_PTTS 0xB3
30#define SOC_DTS_OFFSET_PTTSS 0xB4
31#define SOC_DTS_OFFSET_PTMC 0x80
32#define SOC_DTS_TE_AUX0 0xB5
33#define SOC_DTS_TE_AUX1 0xB6
34
35#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
36#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
37#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
38#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
39#define SOC_DTS_TE_SCI_ENABLE BIT(9)
40#define SOC_DTS_TE_SMI_ENABLE BIT(10)
41#define SOC_DTS_TE_MSI_ENABLE BIT(11)
42#define SOC_DTS_TE_APICA_ENABLE BIT(14)
43#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
44
45/* DTS encoding for TJ MAX temperature */
46#define SOC_DTS_TJMAX_ENCODING 0x7F
47
48/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
49#define BYT_SOC_DTS_APIC_IRQ 86
50
51/* Only 2 out of 4 is allowed for OSPM */
52#define SOC_MAX_DTS_TRIPS 2
53
54/* Mask for two trips in status bits */
55#define SOC_DTS_TRIP_MASK 0x03
56
57/* DTS0 and DTS 1 */
58#define SOC_MAX_DTS_SENSORS 2
59 22
60#define CRITICAL_OFFSET_FROM_TJ_MAX 5000 23#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
61 24
62struct soc_sensor_entry {
63 int id;
64 u32 tj_max;
65 u32 temp_mask;
66 u32 temp_shift;
67 u32 store_status;
68 struct thermal_zone_device *tzone;
69};
70
71static struct soc_sensor_entry *soc_dts[SOC_MAX_DTS_SENSORS];
72
73static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX; 25static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX;
74module_param(crit_offset, int, 0644); 26module_param(crit_offset, int, 0644);
75MODULE_PARM_DESC(crit_offset, 27MODULE_PARM_DESC(crit_offset,
76 "Critical Temperature offset from tj max in millidegree Celsius."); 28 "Critical Temperature offset from tj max in millidegree Celsius.");
77 29
78static DEFINE_MUTEX(aux_update_mutex); 30/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
79static spinlock_t intr_notify_lock; 31#define BYT_SOC_DTS_APIC_IRQ 86
80static int soc_dts_thres_irq;
81
82static int get_tj_max(u32 *tj_max)
83{
84 u32 eax, edx;
85 u32 val;
86 int err;
87
88 err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
89 if (err)
90 goto err_ret;
91 else {
92 val = (eax >> 16) & 0xff;
93 if (val)
94 *tj_max = val * 1000;
95 else {
96 err = -EINVAL;
97 goto err_ret;
98 }
99 }
100
101 return 0;
102err_ret:
103 *tj_max = 0;
104
105 return err;
106}
107
108static int sys_get_trip_temp(struct thermal_zone_device *tzd,
109 int trip, unsigned long *temp)
110{
111 int status;
112 u32 out;
113 struct soc_sensor_entry *aux_entry;
114
115 aux_entry = tzd->devdata;
116
117 if (!trip) {
118 /* Just return the critical temp */
119 *temp = aux_entry->tj_max - crit_offset;
120 return 0;
121 }
122
123 mutex_lock(&aux_update_mutex);
124 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
125 SOC_DTS_OFFSET_PTPS, &out);
126 mutex_unlock(&aux_update_mutex);
127 if (status)
128 return status;
129
130 out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
131
132 if (!out)
133 *temp = 0;
134 else
135 *temp = aux_entry->tj_max - out * 1000;
136
137 return 0;
138}
139
140static int update_trip_temp(struct soc_sensor_entry *aux_entry,
141 int thres_index, unsigned long temp)
142{
143 int status;
144 u32 temp_out;
145 u32 out;
146 u32 store_ptps;
147 u32 store_ptmc;
148 u32 store_te_out;
149 u32 te_out;
150
151 u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE |
152 SOC_DTS_TE_MSI_ENABLE;
153
154 temp_out = (aux_entry->tj_max - temp) / 1000;
155
156 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
157 SOC_DTS_OFFSET_PTPS, &store_ptps);
158 if (status)
159 return status;
160
161 out = (store_ptps & ~(0xFF << (thres_index * 8)));
162 out |= (temp_out & 0xFF) << (thres_index * 8);
163 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
164 SOC_DTS_OFFSET_PTPS, out);
165 if (status)
166 return status;
167 pr_debug("update_trip_temp PTPS = %x\n", out);
168 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
169 SOC_DTS_OFFSET_PTMC, &out);
170 if (status)
171 goto err_restore_ptps;
172
173 store_ptmc = out;
174
175 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
176 SOC_DTS_TE_AUX0 + thres_index,
177 &te_out);
178 if (status)
179 goto err_restore_ptmc;
180
181 store_te_out = te_out;
182
183 /* Enable for CPU module 0 and module 1 */
184 out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
185 SOC_DTS_CPU_MODULE1_ENABLE_BIT);
186 if (temp) {
187 if (thres_index)
188 out |= SOC_DTS_AUX1_ENABLE_BIT;
189 else
190 out |= SOC_DTS_AUX0_ENABLE_BIT;
191 te_out |= int_enable_bit;
192 } else {
193 if (thres_index)
194 out &= ~SOC_DTS_AUX1_ENABLE_BIT;
195 else
196 out &= ~SOC_DTS_AUX0_ENABLE_BIT;
197 te_out &= ~int_enable_bit;
198 }
199 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
200 SOC_DTS_OFFSET_PTMC, out);
201 if (status)
202 goto err_restore_te_out;
203
204 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
205 SOC_DTS_TE_AUX0 + thres_index,
206 te_out);
207 if (status)
208 goto err_restore_te_out;
209
210 return 0;
211
212err_restore_te_out:
213 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
214 SOC_DTS_OFFSET_PTMC, store_te_out);
215err_restore_ptmc:
216 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
217 SOC_DTS_OFFSET_PTMC, store_ptmc);
218err_restore_ptps:
219 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
220 SOC_DTS_OFFSET_PTPS, store_ptps);
221 /* Nothing we can do if restore fails */
222
223 return status;
224}
225
226static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
227 unsigned long temp)
228{
229 struct soc_sensor_entry *aux_entry = tzd->devdata;
230 int status;
231
232 if (temp > (aux_entry->tj_max - crit_offset))
233 return -EINVAL;
234
235 mutex_lock(&aux_update_mutex);
236 status = update_trip_temp(tzd->devdata, trip, temp);
237 mutex_unlock(&aux_update_mutex);
238
239 return status;
240}
241
242static int sys_get_trip_type(struct thermal_zone_device *thermal,
243 int trip, enum thermal_trip_type *type)
244{
245 if (trip)
246 *type = THERMAL_TRIP_PASSIVE;
247 else
248 *type = THERMAL_TRIP_CRITICAL;
249
250 return 0;
251}
252
253static int sys_get_curr_temp(struct thermal_zone_device *tzd,
254 unsigned long *temp)
255{
256 int status;
257 u32 out;
258 struct soc_sensor_entry *aux_entry;
259
260 aux_entry = tzd->devdata;
261
262 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
263 SOC_DTS_OFFSET_TEMP, &out);
264 if (status)
265 return status;
266
267 out = (out & aux_entry->temp_mask) >> aux_entry->temp_shift;
268 out -= SOC_DTS_TJMAX_ENCODING;
269 *temp = aux_entry->tj_max - out * 1000;
270
271 return 0;
272}
273
274static struct thermal_zone_device_ops tzone_ops = {
275 .get_temp = sys_get_curr_temp,
276 .get_trip_temp = sys_get_trip_temp,
277 .get_trip_type = sys_get_trip_type,
278 .set_trip_temp = sys_set_trip_temp,
279};
280
281static void free_soc_dts(struct soc_sensor_entry *aux_entry)
282{
283 if (aux_entry) {
284 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
285 SOC_DTS_OFFSET_ENABLE, aux_entry->store_status);
286 thermal_zone_device_unregister(aux_entry->tzone);
287 kfree(aux_entry);
288 }
289}
290
291static int soc_dts_enable(int id)
292{
293 u32 out;
294 int ret;
295
296 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
297 SOC_DTS_OFFSET_ENABLE, &out);
298 if (ret)
299 return ret;
300
301 if (!(out & BIT(id))) {
302 out |= BIT(id);
303 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
304 SOC_DTS_OFFSET_ENABLE, out);
305 if (ret)
306 return ret;
307 }
308
309 return ret;
310}
311
312static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
313 bool notification_support)
314{
315 struct soc_sensor_entry *aux_entry;
316 char name[10];
317 int trip_count = 0;
318 int trip_mask = 0;
319 int err;
320
321 aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
322 if (!aux_entry) {
323 err = -ENOMEM;
324 return ERR_PTR(-ENOMEM);
325 }
326
327 /* Store status to restor on exit */
328 err = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
329 SOC_DTS_OFFSET_ENABLE,
330 &aux_entry->store_status);
331 if (err)
332 goto err_ret;
333
334 aux_entry->id = id;
335 aux_entry->tj_max = tj_max;
336 aux_entry->temp_mask = 0x00FF << (id * 8);
337 aux_entry->temp_shift = id * 8;
338 if (notification_support) {
339 trip_count = SOC_MAX_DTS_TRIPS;
340 trip_mask = 0x02;
341 }
342 snprintf(name, sizeof(name), "soc_dts%d", id);
343 aux_entry->tzone = thermal_zone_device_register(name,
344 trip_count,
345 trip_mask,
346 aux_entry, &tzone_ops,
347 NULL, 0, 0);
348 if (IS_ERR(aux_entry->tzone)) {
349 err = PTR_ERR(aux_entry->tzone);
350 goto err_ret;
351 }
352
353 err = soc_dts_enable(id);
354 if (err)
355 goto err_aux_status;
356
357 return aux_entry;
358
359err_aux_status:
360 thermal_zone_device_unregister(aux_entry->tzone);
361err_ret:
362 kfree(aux_entry);
363 return ERR_PTR(err);
364}
365
366static void proc_thermal_interrupt(void)
367{
368 u32 sticky_out;
369 int status;
370 u32 ptmc_out;
371 unsigned long flags;
372
373 spin_lock_irqsave(&intr_notify_lock, flags);
374
375 /* Clear APIC interrupt */
376 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
377 SOC_DTS_OFFSET_PTMC, &ptmc_out);
378
379 ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
380 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
381 SOC_DTS_OFFSET_PTMC, ptmc_out);
382
383 /* Read status here */
384 status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
385 SOC_DTS_OFFSET_PTTSS, &sticky_out);
386 pr_debug("status %d PTTSS %x\n", status, sticky_out);
387 if (sticky_out & SOC_DTS_TRIP_MASK) {
388 int i;
389 /* reset sticky bit */
390 status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
391 SOC_DTS_OFFSET_PTTSS, sticky_out);
392 spin_unlock_irqrestore(&intr_notify_lock, flags);
393
394 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
395 pr_debug("TZD update for zone %d\n", i);
396 thermal_zone_device_update(soc_dts[i]->tzone);
397 }
398 } else
399 spin_unlock_irqrestore(&intr_notify_lock, flags);
400 32
401} 33static int soc_dts_thres_irq;
34static struct intel_soc_dts_sensors *soc_dts;
402 35
403static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) 36static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
404{ 37{
405 proc_thermal_interrupt();
406 pr_debug("proc_thermal_interrupt\n"); 38 pr_debug("proc_thermal_interrupt\n");
39 intel_soc_dts_iosf_interrupt_handler(soc_dts);
407 40
408 return IRQ_HANDLED; 41 return IRQ_HANDLED;
409} 42}
410 43
411static const struct x86_cpu_id soc_thermal_ids[] = { 44static const struct x86_cpu_id soc_thermal_ids[] = {
412 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ}, 45 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
413 { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
414 {} 46 {}
415}; 47};
416MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); 48MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
417 49
418static int __init intel_soc_thermal_init(void) 50static int __init intel_soc_thermal_init(void)
419{ 51{
420 u32 tj_max;
421 int err = 0; 52 int err = 0;
422 int i;
423 const struct x86_cpu_id *match_cpu; 53 const struct x86_cpu_id *match_cpu;
424 54
425 match_cpu = x86_match_cpu(soc_thermal_ids); 55 match_cpu = x86_match_cpu(soc_thermal_ids);
426 if (!match_cpu) 56 if (!match_cpu)
427 return -ENODEV; 57 return -ENODEV;
428 58
429 if (get_tj_max(&tj_max)) 59 /* Create a zone with 2 trips with marked as read only */
430 return -EINVAL; 60 soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, 2, 1);
431 61 if (IS_ERR(soc_dts)) {
432 soc_dts_thres_irq = (int)match_cpu->driver_data; 62 err = PTR_ERR(soc_dts);
433 63 return err;
434 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
435 soc_dts[i] = alloc_soc_dts(i, tj_max,
436 soc_dts_thres_irq ? true : false);
437 if (IS_ERR(soc_dts[i])) {
438 err = PTR_ERR(soc_dts[i]);
439 goto err_free;
440 }
441 } 64 }
442 65
443 spin_lock_init(&intr_notify_lock); 66 soc_dts_thres_irq = (int)match_cpu->driver_data;
444 67
445 if (soc_dts_thres_irq) { 68 if (soc_dts_thres_irq) {
446 err = request_threaded_irq(soc_dts_thres_irq, NULL, 69 err = request_threaded_irq(soc_dts_thres_irq, NULL,
@@ -449,42 +72,31 @@ static int __init intel_soc_thermal_init(void)
449 "soc_dts", soc_dts); 72 "soc_dts", soc_dts);
450 if (err) { 73 if (err) {
451 pr_err("request_threaded_irq ret %d\n", err); 74 pr_err("request_threaded_irq ret %d\n", err);
452 goto err_free; 75 goto error_irq;
453 } 76 }
454 } 77 }
455 78
456 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { 79 err = intel_soc_dts_iosf_add_read_only_critical_trip(soc_dts,
457 err = update_trip_temp(soc_dts[i], 0, tj_max - crit_offset); 80 crit_offset);
458 if (err) 81 if (err)
459 goto err_trip_temp; 82 goto error_trips;
460 }
461 83
462 return 0; 84 return 0;
463 85
464err_trip_temp: 86error_trips:
465 i = SOC_MAX_DTS_SENSORS;
466 if (soc_dts_thres_irq) 87 if (soc_dts_thres_irq)
467 free_irq(soc_dts_thres_irq, soc_dts); 88 free_irq(soc_dts_thres_irq, soc_dts);
468err_free: 89error_irq:
469 while (--i >= 0) 90 intel_soc_dts_iosf_exit(soc_dts);
470 free_soc_dts(soc_dts[i]);
471 91
472 return err; 92 return err;
473} 93}
474 94
475static void __exit intel_soc_thermal_exit(void) 95static void __exit intel_soc_thermal_exit(void)
476{ 96{
477 int i;
478
479 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
480 update_trip_temp(soc_dts[i], 0, 0);
481
482 if (soc_dts_thres_irq) 97 if (soc_dts_thres_irq)
483 free_irq(soc_dts_thres_irq, soc_dts); 98 free_irq(soc_dts_thres_irq, soc_dts);
484 99 intel_soc_dts_iosf_exit(soc_dts);
485 for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
486 free_soc_dts(soc_dts[i]);
487
488} 100}
489 101
490module_init(intel_soc_thermal_init) 102module_init(intel_soc_thermal_init)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 668fb1bdea9e..b295b2b6c191 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -58,6 +58,8 @@ struct __thermal_bind_params {
58 * @mode: current thermal zone device mode (enabled/disabled) 58 * @mode: current thermal zone device mode (enabled/disabled)
59 * @passive_delay: polling interval while passive cooling is activated 59 * @passive_delay: polling interval while passive cooling is activated
60 * @polling_delay: zone polling interval 60 * @polling_delay: zone polling interval
61 * @slope: slope of the temperature adjustment curve
62 * @offset: offset of the temperature adjustment curve
61 * @ntrips: number of trip points 63 * @ntrips: number of trip points
62 * @trips: an array of trip points (0..ntrips - 1) 64 * @trips: an array of trip points (0..ntrips - 1)
63 * @num_tbps: number of thermal bind params 65 * @num_tbps: number of thermal bind params
@@ -70,6 +72,8 @@ struct __thermal_zone {
70 enum thermal_device_mode mode; 72 enum thermal_device_mode mode;
71 int passive_delay; 73 int passive_delay;
72 int polling_delay; 74 int polling_delay;
75 int slope;
76 int offset;
73 77
74 /* trip data */ 78 /* trip data */
75 int ntrips; 79 int ntrips;
@@ -227,7 +231,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
227 ret = thermal_zone_bind_cooling_device(thermal, 231 ret = thermal_zone_bind_cooling_device(thermal,
228 tbp->trip_id, cdev, 232 tbp->trip_id, cdev,
229 tbp->max, 233 tbp->max,
230 tbp->min); 234 tbp->min,
235 tbp->usage);
231 if (ret) 236 if (ret)
232 return ret; 237 return ret;
233 } 238 }
@@ -581,7 +586,7 @@ static int thermal_of_populate_bind_params(struct device_node *np,
581 u32 prop; 586 u32 prop;
582 587
583 /* Default weight. Usage is optional */ 588 /* Default weight. Usage is optional */
584 __tbp->usage = 0; 589 __tbp->usage = THERMAL_WEIGHT_DEFAULT;
585 ret = of_property_read_u32(np, "contribution", &prop); 590 ret = of_property_read_u32(np, "contribution", &prop);
586 if (ret == 0) 591 if (ret == 0)
587 __tbp->usage = prop; 592 __tbp->usage = prop;
@@ -715,7 +720,7 @@ static int thermal_of_populate_trip(struct device_node *np,
715 * @np parameter and fills the read data into a __thermal_zone data structure 720 * @np parameter and fills the read data into a __thermal_zone data structure
716 * and return this pointer. 721 * and return this pointer.
717 * 722 *
718 * TODO: Missing properties to parse: thermal-sensor-names and coefficients 723 * TODO: Missing properties to parse: thermal-sensor-names
719 * 724 *
720 * Return: On success returns a valid struct __thermal_zone, 725 * Return: On success returns a valid struct __thermal_zone,
721 * otherwise, it returns a corresponding ERR_PTR(). Caller must 726 * otherwise, it returns a corresponding ERR_PTR(). Caller must
@@ -727,7 +732,7 @@ thermal_of_build_thermal_zone(struct device_node *np)
727 struct device_node *child = NULL, *gchild; 732 struct device_node *child = NULL, *gchild;
728 struct __thermal_zone *tz; 733 struct __thermal_zone *tz;
729 int ret, i; 734 int ret, i;
730 u32 prop; 735 u32 prop, coef[2];
731 736
732 if (!np) { 737 if (!np) {
733 pr_err("no thermal zone np\n"); 738 pr_err("no thermal zone np\n");
@@ -752,6 +757,20 @@ thermal_of_build_thermal_zone(struct device_node *np)
752 } 757 }
753 tz->polling_delay = prop; 758 tz->polling_delay = prop;
754 759
760 /*
761 * REVIST: for now, the thermal framework supports only
762 * one sensor per thermal zone. Thus, we are considering
763 * only the first two values as slope and offset.
764 */
765 ret = of_property_read_u32_array(np, "coefficients", coef, 2);
766 if (ret == 0) {
767 tz->slope = coef[0];
768 tz->offset = coef[1];
769 } else {
770 tz->slope = 1;
771 tz->offset = 0;
772 }
773
755 /* trips */ 774 /* trips */
756 child = of_get_child_by_name(np, "trips"); 775 child = of_get_child_by_name(np, "trips");
757 776
@@ -865,6 +884,8 @@ int __init of_parse_thermal_zones(void)
865 for_each_child_of_node(np, child) { 884 for_each_child_of_node(np, child) {
866 struct thermal_zone_device *zone; 885 struct thermal_zone_device *zone;
867 struct thermal_zone_params *tzp; 886 struct thermal_zone_params *tzp;
887 int i, mask = 0;
888 u32 prop;
868 889
869 /* Check whether child is enabled or not */ 890 /* Check whether child is enabled or not */
870 if (!of_device_is_available(child)) 891 if (!of_device_is_available(child))
@@ -891,8 +912,18 @@ int __init of_parse_thermal_zones(void)
891 /* No hwmon because there might be hwmon drivers registering */ 912 /* No hwmon because there might be hwmon drivers registering */
892 tzp->no_hwmon = true; 913 tzp->no_hwmon = true;
893 914
915 if (!of_property_read_u32(child, "sustainable-power", &prop))
916 tzp->sustainable_power = prop;
917
918 for (i = 0; i < tz->ntrips; i++)
919 mask |= 1 << i;
920
921 /* these two are left for temperature drivers to use */
922 tzp->slope = tz->slope;
923 tzp->offset = tz->offset;
924
894 zone = thermal_zone_device_register(child->name, tz->ntrips, 925 zone = thermal_zone_device_register(child->name, tz->ntrips,
895 0, tz, 926 mask, tz,
896 ops, tzp, 927 ops, tzp,
897 tz->passive_delay, 928 tz->passive_delay,
898 tz->polling_delay); 929 tz->polling_delay);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
new file mode 100644
index 000000000000..4672250b329f
--- /dev/null
+++ b/drivers/thermal/power_allocator.c
@@ -0,0 +1,539 @@
1/*
2 * A power allocator to manage temperature
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "Power allocator: " fmt
17
18#include <linux/rculist.h>
19#include <linux/slab.h>
20#include <linux/thermal.h>
21
22#define CREATE_TRACE_POINTS
23#include <trace/events/thermal_power_allocator.h>
24
25#include "thermal_core.h"
26
27#define FRAC_BITS 10
28#define int_to_frac(x) ((x) << FRAC_BITS)
29#define frac_to_int(x) ((x) >> FRAC_BITS)
30
31/**
32 * mul_frac() - multiply two fixed-point numbers
33 * @x: first multiplicand
34 * @y: second multiplicand
35 *
36 * Return: the result of multiplying two fixed-point numbers. The
37 * result is also a fixed-point number.
38 */
39static inline s64 mul_frac(s64 x, s64 y)
40{
41 return (x * y) >> FRAC_BITS;
42}
43
44/**
45 * div_frac() - divide two fixed-point numbers
46 * @x: the dividend
47 * @y: the divisor
48 *
49 * Return: the result of dividing two fixed-point numbers. The
50 * result is also a fixed-point number.
51 */
52static inline s64 div_frac(s64 x, s64 y)
53{
54 return div_s64(x << FRAC_BITS, y);
55}
56
57/**
58 * struct power_allocator_params - parameters for the power allocator governor
59 * @err_integral: accumulated error in the PID controller.
60 * @prev_err: error in the previous iteration of the PID controller.
61 * Used to calculate the derivative term.
62 * @trip_switch_on: first passive trip point of the thermal zone. The
63 * governor switches on when this trip point is crossed.
64 * @trip_max_desired_temperature: last passive trip point of the thermal
65 * zone. The temperature we are
66 * controlling for.
67 */
68struct power_allocator_params {
69 s64 err_integral;
70 s32 prev_err;
71 int trip_switch_on;
72 int trip_max_desired_temperature;
73};
74
75/**
76 * pid_controller() - PID controller
77 * @tz: thermal zone we are operating in
78 * @current_temp: the current temperature in millicelsius
79 * @control_temp: the target temperature in millicelsius
80 * @max_allocatable_power: maximum allocatable power for this thermal zone
81 *
82 * This PID controller increases the available power budget so that the
83 * temperature of the thermal zone gets as close as possible to
84 * @control_temp and limits the power if it exceeds it. k_po is the
85 * proportional term when we are overshooting, k_pu is the
86 * proportional term when we are undershooting. integral_cutoff is a
87 * threshold below which we stop accumulating the error. The
88 * accumulated error is only valid if the requested power will make
89 * the system warmer. If the system is mostly idle, there's no point
90 * in accumulating positive error.
91 *
92 * Return: The power budget for the next period.
93 */
94static u32 pid_controller(struct thermal_zone_device *tz,
95 unsigned long current_temp,
96 unsigned long control_temp,
97 u32 max_allocatable_power)
98{
99 s64 p, i, d, power_range;
100 s32 err, max_power_frac;
101 struct power_allocator_params *params = tz->governor_data;
102
103 max_power_frac = int_to_frac(max_allocatable_power);
104
105 err = ((s32)control_temp - (s32)current_temp);
106 err = int_to_frac(err);
107
108 /* Calculate the proportional term */
109 p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err);
110
111 /*
112 * Calculate the integral term
113 *
114 * if the error is less than cut off allow integration (but
115 * the integral is limited to max power)
116 */
117 i = mul_frac(tz->tzp->k_i, params->err_integral);
118
119 if (err < int_to_frac(tz->tzp->integral_cutoff)) {
120 s64 i_next = i + mul_frac(tz->tzp->k_i, err);
121
122 if (abs64(i_next) < max_power_frac) {
123 i = i_next;
124 params->err_integral += err;
125 }
126 }
127
128 /*
129 * Calculate the derivative term
130 *
131 * We do err - prev_err, so with a positive k_d, a decreasing
132 * error (i.e. driving closer to the line) results in less
133 * power being applied, slowing down the controller)
134 */
135 d = mul_frac(tz->tzp->k_d, err - params->prev_err);
136 d = div_frac(d, tz->passive_delay);
137 params->prev_err = err;
138
139 power_range = p + i + d;
140
141 /* feed-forward the known sustainable dissipatable power */
142 power_range = tz->tzp->sustainable_power + frac_to_int(power_range);
143
144 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
145
146 trace_thermal_power_allocator_pid(tz, frac_to_int(err),
147 frac_to_int(params->err_integral),
148 frac_to_int(p), frac_to_int(i),
149 frac_to_int(d), power_range);
150
151 return power_range;
152}
153
154/**
155 * divvy_up_power() - divvy the allocated power between the actors
156 * @req_power: each actor's requested power
157 * @max_power: each actor's maximum available power
158 * @num_actors: size of the @req_power, @max_power and @granted_power's array
159 * @total_req_power: sum of @req_power
160 * @power_range: total allocated power
161 * @granted_power: output array: each actor's granted power
162 * @extra_actor_power: an appropriately sized array to be used in the
163 * function as temporary storage of the extra power given
164 * to the actors
165 *
166 * This function divides the total allocated power (@power_range)
167 * fairly between the actors. It first tries to give each actor a
168 * share of the @power_range according to how much power it requested
169 * compared to the rest of the actors. For example, if only one actor
170 * requests power, then it receives all the @power_range. If
171 * three actors each requests 1mW, each receives a third of the
172 * @power_range.
173 *
174 * If any actor received more than their maximum power, then that
175 * surplus is re-divvied among the actors based on how far they are
176 * from their respective maximums.
177 *
178 * Granted power for each actor is written to @granted_power, which
179 * should've been allocated by the calling function.
180 */
181static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
182 u32 total_req_power, u32 power_range,
183 u32 *granted_power, u32 *extra_actor_power)
184{
185 u32 extra_power, capped_extra_power;
186 int i;
187
188 /*
189 * Prevent division by 0 if none of the actors request power.
190 */
191 if (!total_req_power)
192 total_req_power = 1;
193
194 capped_extra_power = 0;
195 extra_power = 0;
196 for (i = 0; i < num_actors; i++) {
197 u64 req_range = req_power[i] * power_range;
198
199 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
200 total_req_power);
201
202 if (granted_power[i] > max_power[i]) {
203 extra_power += granted_power[i] - max_power[i];
204 granted_power[i] = max_power[i];
205 }
206
207 extra_actor_power[i] = max_power[i] - granted_power[i];
208 capped_extra_power += extra_actor_power[i];
209 }
210
211 if (!extra_power)
212 return;
213
214 /*
215 * Re-divvy the reclaimed extra among actors based on
216 * how far they are from the max
217 */
218 extra_power = min(extra_power, capped_extra_power);
219 if (capped_extra_power > 0)
220 for (i = 0; i < num_actors; i++)
221 granted_power[i] += (extra_actor_power[i] *
222 extra_power) / capped_extra_power;
223}
224
225static int allocate_power(struct thermal_zone_device *tz,
226 unsigned long current_temp,
227 unsigned long control_temp)
228{
229 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data;
231 u32 *req_power, *max_power, *granted_power, *extra_actor_power;
232 u32 total_req_power, max_allocatable_power;
233 u32 total_granted_power, power_range;
234 int i, num_actors, total_weight, ret = 0;
235 int trip_max_desired_temperature = params->trip_max_desired_temperature;
236
237 mutex_lock(&tz->lock);
238
239 num_actors = 0;
240 total_weight = 0;
241 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
242 if ((instance->trip == trip_max_desired_temperature) &&
243 cdev_is_power_actor(instance->cdev)) {
244 num_actors++;
245 total_weight += instance->weight;
246 }
247 }
248
249 /*
250 * We need to allocate three arrays of the same size:
251 * req_power, max_power and granted_power. They are going to
252 * be needed until this function returns. Allocate them all
253 * in one go to simplify the allocation and deallocation
254 * logic.
255 */
256 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
259 req_power = devm_kcalloc(&tz->device, num_actors * 4,
260 sizeof(*req_power), GFP_KERNEL);
261 if (!req_power) {
262 ret = -ENOMEM;
263 goto unlock;
264 }
265
266 max_power = &req_power[num_actors];
267 granted_power = &req_power[2 * num_actors];
268 extra_actor_power = &req_power[3 * num_actors];
269
270 i = 0;
271 total_req_power = 0;
272 max_allocatable_power = 0;
273
274 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
275 int weight;
276 struct thermal_cooling_device *cdev = instance->cdev;
277
278 if (instance->trip != trip_max_desired_temperature)
279 continue;
280
281 if (!cdev_is_power_actor(cdev))
282 continue;
283
284 if (cdev->ops->get_requested_power(cdev, tz, &req_power[i]))
285 continue;
286
287 if (!total_weight)
288 weight = 1 << FRAC_BITS;
289 else
290 weight = instance->weight;
291
292 req_power[i] = frac_to_int(weight * req_power[i]);
293
294 if (power_actor_get_max_power(cdev, tz, &max_power[i]))
295 continue;
296
297 total_req_power += req_power[i];
298 max_allocatable_power += max_power[i];
299
300 i++;
301 }
302
303 power_range = pid_controller(tz, current_temp, control_temp,
304 max_allocatable_power);
305
306 divvy_up_power(req_power, max_power, num_actors, total_req_power,
307 power_range, granted_power, extra_actor_power);
308
309 total_granted_power = 0;
310 i = 0;
311 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
312 if (instance->trip != trip_max_desired_temperature)
313 continue;
314
315 if (!cdev_is_power_actor(instance->cdev))
316 continue;
317
318 power_actor_set_power(instance->cdev, instance,
319 granted_power[i]);
320 total_granted_power += granted_power[i];
321
322 i++;
323 }
324
325 trace_thermal_power_allocator(tz, req_power, total_req_power,
326 granted_power, total_granted_power,
327 num_actors, power_range,
328 max_allocatable_power, current_temp,
329 (s32)control_temp - (s32)current_temp);
330
331 devm_kfree(&tz->device, req_power);
332unlock:
333 mutex_unlock(&tz->lock);
334
335 return ret;
336}
337
338static int get_governor_trips(struct thermal_zone_device *tz,
339 struct power_allocator_params *params)
340{
341 int i, ret, last_passive;
342 bool found_first_passive;
343
344 found_first_passive = false;
345 last_passive = -1;
346 ret = -EINVAL;
347
348 for (i = 0; i < tz->trips; i++) {
349 enum thermal_trip_type type;
350
351 ret = tz->ops->get_trip_type(tz, i, &type);
352 if (ret)
353 return ret;
354
355 if (!found_first_passive) {
356 if (type == THERMAL_TRIP_PASSIVE) {
357 params->trip_switch_on = i;
358 found_first_passive = true;
359 }
360 } else if (type == THERMAL_TRIP_PASSIVE) {
361 last_passive = i;
362 } else {
363 break;
364 }
365 }
366
367 if (last_passive != -1) {
368 params->trip_max_desired_temperature = last_passive;
369 ret = 0;
370 } else {
371 ret = -EINVAL;
372 }
373
374 return ret;
375}
376
377static void reset_pid_controller(struct power_allocator_params *params)
378{
379 params->err_integral = 0;
380 params->prev_err = 0;
381}
382
383static void allow_maximum_power(struct thermal_zone_device *tz)
384{
385 struct thermal_instance *instance;
386 struct power_allocator_params *params = tz->governor_data;
387
388 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
389 if ((instance->trip != params->trip_max_desired_temperature) ||
390 (!cdev_is_power_actor(instance->cdev)))
391 continue;
392
393 instance->target = 0;
394 instance->cdev->updated = false;
395 thermal_cdev_update(instance->cdev);
396 }
397}
398
399/**
400 * power_allocator_bind() - bind the power_allocator governor to a thermal zone
401 * @tz: thermal zone to bind it to
402 *
403 * Check that the thermal zone is valid for this governor, that is, it
404 * has two thermal trips. If so, initialize the PID controller
405 * parameters and bind it to the thermal zone.
406 *
407 * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM
408 * if we ran out of memory.
409 */
410static int power_allocator_bind(struct thermal_zone_device *tz)
411{
412 int ret;
413 struct power_allocator_params *params;
414 unsigned long switch_on_temp, control_temp;
415 u32 temperature_threshold;
416
417 if (!tz->tzp || !tz->tzp->sustainable_power) {
418 dev_err(&tz->device,
419 "power_allocator: missing sustainable_power\n");
420 return -EINVAL;
421 }
422
423 params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
424 if (!params)
425 return -ENOMEM;
426
427 ret = get_governor_trips(tz, params);
428 if (ret) {
429 dev_err(&tz->device,
430 "thermal zone %s has wrong trip setup for power allocator\n",
431 tz->type);
432 goto free;
433 }
434
435 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
436 &switch_on_temp);
437 if (ret)
438 goto free;
439
440 ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
441 &control_temp);
442 if (ret)
443 goto free;
444
445 temperature_threshold = control_temp - switch_on_temp;
446
447 tz->tzp->k_po = tz->tzp->k_po ?:
448 int_to_frac(tz->tzp->sustainable_power) / temperature_threshold;
449 tz->tzp->k_pu = tz->tzp->k_pu ?:
450 int_to_frac(2 * tz->tzp->sustainable_power) /
451 temperature_threshold;
452 tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000;
453 /*
454 * The default for k_d and integral_cutoff is 0, so we can
455 * leave them as they are.
456 */
457
458 reset_pid_controller(params);
459
460 tz->governor_data = params;
461
462 return 0;
463
464free:
465 devm_kfree(&tz->device, params);
466 return ret;
467}
468
469static void power_allocator_unbind(struct thermal_zone_device *tz)
470{
471 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
472 devm_kfree(&tz->device, tz->governor_data);
473 tz->governor_data = NULL;
474}
475
476static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
477{
478 int ret;
479 unsigned long switch_on_temp, control_temp, current_temp;
480 struct power_allocator_params *params = tz->governor_data;
481
482 /*
483 * We get called for every trip point but we only need to do
484 * our calculations once
485 */
486 if (trip != params->trip_max_desired_temperature)
487 return 0;
488
489 ret = thermal_zone_get_temp(tz, &current_temp);
490 if (ret) {
491 dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
492 return ret;
493 }
494
495 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
496 &switch_on_temp);
497 if (ret) {
498 dev_warn(&tz->device,
499 "Failed to get switch on temperature: %d\n", ret);
500 return ret;
501 }
502
503 if (current_temp < switch_on_temp) {
504 tz->passive = 0;
505 reset_pid_controller(params);
506 allow_maximum_power(tz);
507 return 0;
508 }
509
510 tz->passive = 1;
511
512 ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
513 &control_temp);
514 if (ret) {
515 dev_warn(&tz->device,
516 "Failed to get the maximum desired temperature: %d\n",
517 ret);
518 return ret;
519 }
520
521 return allocate_power(tz, current_temp, control_temp);
522}
523
524static struct thermal_governor thermal_gov_power_allocator = {
525 .name = "power_allocator",
526 .bind_to_tz = power_allocator_bind,
527 .unbind_from_tz = power_allocator_unbind,
528 .throttle = power_allocator_throttle,
529};
530
531int thermal_gov_power_allocator_register(void)
532{
533 return thermal_register_governor(&thermal_gov_power_allocator);
534}
535
536void thermal_gov_power_allocator_unregister(void)
537{
538 thermal_unregister_governor(&thermal_gov_power_allocator);
539}
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
new file mode 100644
index 000000000000..c8d27b8fb9ec
--- /dev/null
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/iio/consumer.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22#include <linux/regmap.h>
23#include <linux/thermal.h>
24
25#define QPNP_TM_REG_TYPE 0x04
26#define QPNP_TM_REG_SUBTYPE 0x05
27#define QPNP_TM_REG_STATUS 0x08
28#define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40
29#define QPNP_TM_REG_ALARM_CTRL 0x46
30
31#define QPNP_TM_TYPE 0x09
32#define QPNP_TM_SUBTYPE 0x08
33
34#define STATUS_STAGE_MASK 0x03
35
36#define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03
37
38#define ALARM_CTRL_FORCE_ENABLE 0x80
39
40/*
41 * Trip point values based on threshold control
42 * 0 = {105 C, 125 C, 145 C}
43 * 1 = {110 C, 130 C, 150 C}
44 * 2 = {115 C, 135 C, 155 C}
45 * 3 = {120 C, 140 C, 160 C}
46*/
47#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
48#define TEMP_STAGE_HYSTERESIS 2000
49
50#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
51#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
52
53#define THRESH_MIN 0
54
55/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
56#define DEFAULT_TEMP 37000
57
58struct qpnp_tm_chip {
59 struct regmap *map;
60 struct thermal_zone_device *tz_dev;
61 long temp;
62 unsigned int thresh;
63 unsigned int stage;
64 unsigned int prev_stage;
65 unsigned int base;
66 struct iio_channel *adc;
67};
68
69static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data)
70{
71 unsigned int val;
72 int ret;
73
74 ret = regmap_read(chip->map, chip->base + addr, &val);
75 if (ret < 0)
76 return ret;
77
78 *data = val;
79 return 0;
80}
81
82static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
83{
84 return regmap_write(chip->map, chip->base + addr, data);
85}
86
87/*
88 * This function updates the internal temp value based on the
89 * current thermal stage and threshold as well as the previous stage
90 */
91static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
92{
93 unsigned int stage;
94 int ret;
95 u8 reg = 0;
96
97 ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
98 if (ret < 0)
99 return ret;
100
101 stage = reg & STATUS_STAGE_MASK;
102
103 if (stage > chip->stage) {
104 /* increasing stage, use lower bound */
105 chip->temp = (stage - 1) * TEMP_STAGE_STEP +
106 chip->thresh * TEMP_THRESH_STEP +
107 TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
108 } else if (stage < chip->stage) {
109 /* decreasing stage, use upper bound */
110 chip->temp = stage * TEMP_STAGE_STEP +
111 chip->thresh * TEMP_THRESH_STEP -
112 TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
113 }
114
115 chip->stage = stage;
116
117 return 0;
118}
119
120static int qpnp_tm_get_temp(void *data, long *temp)
121{
122 struct qpnp_tm_chip *chip = data;
123 int ret, mili_celsius;
124
125 if (!temp)
126 return -EINVAL;
127
128 if (IS_ERR(chip->adc)) {
129 ret = qpnp_tm_update_temp_no_adc(chip);
130 if (ret < 0)
131 return ret;
132 } else {
133 ret = iio_read_channel_processed(chip->adc, &mili_celsius);
134 if (ret < 0)
135 return ret;
136
137 chip->temp = mili_celsius;
138 }
139
140 *temp = chip->temp < 0 ? 0 : chip->temp;
141
142 return 0;
143}
144
145static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
146 .get_temp = qpnp_tm_get_temp,
147};
148
149static irqreturn_t qpnp_tm_isr(int irq, void *data)
150{
151 struct qpnp_tm_chip *chip = data;
152
153 thermal_zone_device_update(chip->tz_dev);
154
155 return IRQ_HANDLED;
156}
157
158/*
159 * This function initializes the internal temp value based on only the
160 * current thermal stage and threshold. Setup threshold control and
161 * disable shutdown override.
162 */
163static int qpnp_tm_init(struct qpnp_tm_chip *chip)
164{
165 int ret;
166 u8 reg;
167
168 chip->thresh = THRESH_MIN;
169 chip->temp = DEFAULT_TEMP;
170
171 ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
172 if (ret < 0)
173 return ret;
174
175 chip->stage = reg & STATUS_STAGE_MASK;
176
177 if (chip->stage)
178 chip->temp = chip->thresh * TEMP_THRESH_STEP +
179 (chip->stage - 1) * TEMP_STAGE_STEP +
180 TEMP_THRESH_MIN;
181
182 /*
183 * Set threshold and disable software override of stage 2 and 3
184 * shutdowns.
185 */
186 reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
187 ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
188 if (ret < 0)
189 return ret;
190
191 /* Enable the thermal alarm PMIC module in always-on mode. */
192 reg = ALARM_CTRL_FORCE_ENABLE;
193 ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg);
194
195 return ret;
196}
197
198static int qpnp_tm_probe(struct platform_device *pdev)
199{
200 struct qpnp_tm_chip *chip;
201 struct device_node *node;
202 u8 type, subtype;
203 u32 res[2];
204 int ret, irq;
205
206 node = pdev->dev.of_node;
207
208 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
209 if (!chip)
210 return -ENOMEM;
211
212 dev_set_drvdata(&pdev->dev, chip);
213
214 chip->map = dev_get_regmap(pdev->dev.parent, NULL);
215 if (!chip->map)
216 return -ENXIO;
217
218 ret = of_property_read_u32_array(node, "reg", res, 2);
219 if (ret < 0)
220 return ret;
221
222 irq = platform_get_irq(pdev, 0);
223 if (irq < 0)
224 return irq;
225
226 /* ADC based measurements are optional */
227 chip->adc = iio_channel_get(&pdev->dev, "thermal");
228 if (PTR_ERR(chip->adc) == -EPROBE_DEFER)
229 return PTR_ERR(chip->adc);
230
231 chip->base = res[0];
232
233 ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
234 if (ret < 0) {
235 dev_err(&pdev->dev, "could not read type\n");
236 goto fail;
237 }
238
239 ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype);
240 if (ret < 0) {
241 dev_err(&pdev->dev, "could not read subtype\n");
242 goto fail;
243 }
244
245 if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
246 dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
247 type, subtype);
248 ret = -ENODEV;
249 goto fail;
250 }
251
252 ret = qpnp_tm_init(chip);
253 if (ret < 0) {
254 dev_err(&pdev->dev, "init failed\n");
255 goto fail;
256 }
257
258 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
259 IRQF_ONESHOT, node->name, chip);
260 if (ret < 0)
261 goto fail;
262
263 chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
264 &qpnp_tm_sensor_ops);
265 if (IS_ERR(chip->tz_dev)) {
266 dev_err(&pdev->dev, "failed to register sensor\n");
267 ret = PTR_ERR(chip->tz_dev);
268 goto fail;
269 }
270
271 return 0;
272
273fail:
274 if (!IS_ERR(chip->adc))
275 iio_channel_release(chip->adc);
276
277 return ret;
278}
279
280static int qpnp_tm_remove(struct platform_device *pdev)
281{
282 struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
283
284 thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
285 if (!IS_ERR(chip->adc))
286 iio_channel_release(chip->adc);
287
288 return 0;
289}
290
291static const struct of_device_id qpnp_tm_match_table[] = {
292 { .compatible = "qcom,spmi-temp-alarm" },
293 { }
294};
295MODULE_DEVICE_TABLE(of, qpnp_tm_match_table);
296
297static struct platform_driver qpnp_tm_driver = {
298 .driver = {
299 .name = "spmi-temp-alarm",
300 .of_match_table = qpnp_tm_match_table,
301 },
302 .probe = qpnp_tm_probe,
303 .remove = qpnp_tm_remove,
304};
305module_platform_driver(qpnp_tm_driver);
306
307MODULE_ALIAS("platform:spmi-temp-alarm");
308MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver");
309MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 1d30b0975651..531f4b179871 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -97,6 +97,32 @@
97#define EXYNOS4412_MUX_ADDR_VALUE 6 97#define EXYNOS4412_MUX_ADDR_VALUE 6
98#define EXYNOS4412_MUX_ADDR_SHIFT 20 98#define EXYNOS4412_MUX_ADDR_SHIFT 20
99 99
100/* Exynos5433 specific registers */
101#define EXYNOS5433_TMU_REG_CONTROL1 0x024
102#define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c
103#define EXYNOS5433_TMU_COUNTER_VALUE0 0x030
104#define EXYNOS5433_TMU_COUNTER_VALUE1 0x034
105#define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044
106#define EXYNOS5433_THD_TEMP_RISE3_0 0x050
107#define EXYNOS5433_THD_TEMP_RISE7_4 0x054
108#define EXYNOS5433_THD_TEMP_FALL3_0 0x060
109#define EXYNOS5433_THD_TEMP_FALL7_4 0x064
110#define EXYNOS5433_TMU_REG_INTEN 0x0c0
111#define EXYNOS5433_TMU_REG_INTPEND 0x0c8
112#define EXYNOS5433_TMU_EMUL_CON 0x110
113#define EXYNOS5433_TMU_PD_DET_EN 0x130
114
115#define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
116#define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
117#define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
118 (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
119#define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
120
121#define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
122#define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
123
124#define EXYNOS5433_PD_DET_EN 1
125
100/*exynos5440 specific registers*/ 126/*exynos5440 specific registers*/
101#define EXYNOS5440_TMU_S0_7_TRIM 0x000 127#define EXYNOS5440_TMU_S0_7_TRIM 0x000
102#define EXYNOS5440_TMU_S0_7_CTRL 0x020 128#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -484,6 +510,101 @@ out:
484 return ret; 510 return ret;
485} 511}
486 512
513static int exynos5433_tmu_initialize(struct platform_device *pdev)
514{
515 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
516 struct exynos_tmu_platform_data *pdata = data->pdata;
517 struct thermal_zone_device *tz = data->tzd;
518 unsigned int status, trim_info;
519 unsigned int rising_threshold = 0, falling_threshold = 0;
520 unsigned long temp, temp_hist;
521 int ret = 0, threshold_code, i, sensor_id, cal_type;
522
523 status = readb(data->base + EXYNOS_TMU_REG_STATUS);
524 if (!status) {
525 ret = -EBUSY;
526 goto out;
527 }
528
529 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
530 sanitize_temp_error(data, trim_info);
531
532 /* Read the temperature sensor id */
533 sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
534 >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
535 dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
536
537 /* Read the calibration mode */
538 writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
539 cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
540 >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
541
542 switch (cal_type) {
543 case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
544 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
545 break;
546 case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
547 pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
548 break;
549 default:
550 pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
551 break;
552 };
553
554 dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
555 cal_type ? 2 : 1);
556
557 /* Write temperature code for rising and falling threshold */
558 for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
559 int rising_reg_offset, falling_reg_offset;
560 int j = 0;
561
562 switch (i) {
563 case 0:
564 case 1:
565 case 2:
566 case 3:
567 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
568 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
569 j = i;
570 break;
571 case 4:
572 case 5:
573 case 6:
574 case 7:
575 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
576 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
577 j = i - 4;
578 break;
579 default:
580 continue;
581 }
582
583 /* Write temperature code for rising threshold */
584 tz->ops->get_trip_temp(tz, i, &temp);
585 temp /= MCELSIUS;
586 threshold_code = temp_to_code(data, temp);
587
588 rising_threshold = readl(data->base + rising_reg_offset);
589 rising_threshold |= (threshold_code << j * 8);
590 writel(rising_threshold, data->base + rising_reg_offset);
591
592 /* Write temperature code for falling threshold */
593 tz->ops->get_trip_hyst(tz, i, &temp_hist);
594 temp_hist = temp - (temp_hist / MCELSIUS);
595 threshold_code = temp_to_code(data, temp_hist);
596
597 falling_threshold = readl(data->base + falling_reg_offset);
598 falling_threshold &= ~(0xff << j * 8);
599 falling_threshold |= (threshold_code << j * 8);
600 writel(falling_threshold, data->base + falling_reg_offset);
601 }
602
603 data->tmu_clear_irqs(data);
604out:
605 return ret;
606}
607
487static int exynos5440_tmu_initialize(struct platform_device *pdev) 608static int exynos5440_tmu_initialize(struct platform_device *pdev)
488{ 609{
489 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 610 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
@@ -643,6 +764,48 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
643 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 764 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
644} 765}
645 766
767static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
768{
769 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
770 struct thermal_zone_device *tz = data->tzd;
771 unsigned int con, interrupt_en, pd_det_en;
772
773 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
774
775 if (on) {
776 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
777 interrupt_en =
778 (of_thermal_is_trip_valid(tz, 7)
779 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
780 (of_thermal_is_trip_valid(tz, 6)
781 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
782 (of_thermal_is_trip_valid(tz, 5)
783 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
784 (of_thermal_is_trip_valid(tz, 4)
785 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
786 (of_thermal_is_trip_valid(tz, 3)
787 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
788 (of_thermal_is_trip_valid(tz, 2)
789 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
790 (of_thermal_is_trip_valid(tz, 1)
791 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
792 (of_thermal_is_trip_valid(tz, 0)
793 << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
794
795 interrupt_en |=
796 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
797 } else {
798 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
799 interrupt_en = 0; /* Disable all interrupts */
800 }
801
802 pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
803
804 writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
805 writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
806 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
807}
808
646static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 809static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
647{ 810{
648 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 811 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
@@ -770,6 +933,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
770 933
771 if (data->soc == SOC_ARCH_EXYNOS5260) 934 if (data->soc == SOC_ARCH_EXYNOS5260)
772 emul_con = EXYNOS5260_EMUL_CON; 935 emul_con = EXYNOS5260_EMUL_CON;
936 if (data->soc == SOC_ARCH_EXYNOS5433)
937 emul_con = EXYNOS5433_TMU_EMUL_CON;
773 else if (data->soc == SOC_ARCH_EXYNOS7) 938 else if (data->soc == SOC_ARCH_EXYNOS7)
774 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 939 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
775 else 940 else
@@ -882,6 +1047,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
882 } else if (data->soc == SOC_ARCH_EXYNOS7) { 1047 } else if (data->soc == SOC_ARCH_EXYNOS7) {
883 tmu_intstat = EXYNOS7_TMU_REG_INTPEND; 1048 tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
884 tmu_intclear = EXYNOS7_TMU_REG_INTPEND; 1049 tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
1050 } else if (data->soc == SOC_ARCH_EXYNOS5433) {
1051 tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
1052 tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
885 } else { 1053 } else {
886 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 1054 tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
887 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; 1055 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -926,6 +1094,7 @@ static const struct of_device_id exynos_tmu_match[] = {
926 { .compatible = "samsung,exynos5260-tmu", }, 1094 { .compatible = "samsung,exynos5260-tmu", },
927 { .compatible = "samsung,exynos5420-tmu", }, 1095 { .compatible = "samsung,exynos5420-tmu", },
928 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", }, 1096 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
1097 { .compatible = "samsung,exynos5433-tmu", },
929 { .compatible = "samsung,exynos5440-tmu", }, 1098 { .compatible = "samsung,exynos5440-tmu", },
930 { .compatible = "samsung,exynos7-tmu", }, 1099 { .compatible = "samsung,exynos7-tmu", },
931 { /* sentinel */ }, 1100 { /* sentinel */ },
@@ -949,6 +1118,8 @@ static int exynos_of_get_soc_type(struct device_node *np)
949 else if (of_device_is_compatible(np, 1118 else if (of_device_is_compatible(np,
950 "samsung,exynos5420-tmu-ext-triminfo")) 1119 "samsung,exynos5420-tmu-ext-triminfo"))
951 return SOC_ARCH_EXYNOS5420_TRIMINFO; 1120 return SOC_ARCH_EXYNOS5420_TRIMINFO;
1121 else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
1122 return SOC_ARCH_EXYNOS5433;
952 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu")) 1123 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
953 return SOC_ARCH_EXYNOS5440; 1124 return SOC_ARCH_EXYNOS5440;
954 else if (of_device_is_compatible(np, "samsung,exynos7-tmu")) 1125 else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
@@ -1069,6 +1240,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
1069 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1240 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1070 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1241 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1071 break; 1242 break;
1243 case SOC_ARCH_EXYNOS5433:
1244 data->tmu_initialize = exynos5433_tmu_initialize;
1245 data->tmu_control = exynos5433_tmu_control;
1246 data->tmu_read = exynos4412_tmu_read;
1247 data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1248 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1249 break;
1072 case SOC_ARCH_EXYNOS5440: 1250 case SOC_ARCH_EXYNOS5440:
1073 data->tmu_initialize = exynos5440_tmu_initialize; 1251 data->tmu_initialize = exynos5440_tmu_initialize;
1074 data->tmu_control = exynos5440_tmu_control; 1252 data->tmu_control = exynos5440_tmu_control;
@@ -1172,7 +1350,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1172 goto err_clk_sec; 1350 goto err_clk_sec;
1173 } 1351 }
1174 1352
1175 if (data->soc == SOC_ARCH_EXYNOS7) { 1353 switch (data->soc) {
1354 case SOC_ARCH_EXYNOS5433:
1355 case SOC_ARCH_EXYNOS7:
1176 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); 1356 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1177 if (IS_ERR(data->sclk)) { 1357 if (IS_ERR(data->sclk)) {
1178 dev_err(&pdev->dev, "Failed to get sclk\n"); 1358 dev_err(&pdev->dev, "Failed to get sclk\n");
@@ -1184,7 +1364,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1184 goto err_clk; 1364 goto err_clk;
1185 } 1365 }
1186 } 1366 }
1187 } 1367 break;
1368 default:
1369 break;
1370 };
1188 1371
1189 ret = exynos_tmu_initialize(pdev); 1372 ret = exynos_tmu_initialize(pdev);
1190 if (ret) { 1373 if (ret) {
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index 4d71ec6c9aa0..440c7140b660 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -33,6 +33,7 @@ enum soc_type {
33 SOC_ARCH_EXYNOS5260, 33 SOC_ARCH_EXYNOS5260,
34 SOC_ARCH_EXYNOS5420, 34 SOC_ARCH_EXYNOS5420,
35 SOC_ARCH_EXYNOS5420_TRIMINFO, 35 SOC_ARCH_EXYNOS5420_TRIMINFO,
36 SOC_ARCH_EXYNOS5433,
36 SOC_ARCH_EXYNOS5440, 37 SOC_ARCH_EXYNOS5440,
37 SOC_ARCH_EXYNOS7, 38 SOC_ARCH_EXYNOS7,
38}; 39};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4108db7e10c1..04659bfb888b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -75,6 +75,58 @@ static struct thermal_governor *__find_governor(const char *name)
75 return NULL; 75 return NULL;
76} 76}
77 77
78/**
79 * bind_previous_governor() - bind the previous governor of the thermal zone
80 * @tz: a valid pointer to a struct thermal_zone_device
81 * @failed_gov_name: the name of the governor that failed to register
82 *
83 * Register the previous governor of the thermal zone after a new
84 * governor has failed to be bound.
85 */
86static void bind_previous_governor(struct thermal_zone_device *tz,
87 const char *failed_gov_name)
88{
89 if (tz->governor && tz->governor->bind_to_tz) {
90 if (tz->governor->bind_to_tz(tz)) {
91 dev_err(&tz->device,
92 "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n",
93 failed_gov_name, tz->governor->name, tz->type);
94 tz->governor = NULL;
95 }
96 }
97}
98
99/**
100 * thermal_set_governor() - Switch to another governor
101 * @tz: a valid pointer to a struct thermal_zone_device
102 * @new_gov: pointer to the new governor
103 *
104 * Change the governor of thermal zone @tz.
105 *
106 * Return: 0 on success, an error if the new governor's bind_to_tz() failed.
107 */
108static int thermal_set_governor(struct thermal_zone_device *tz,
109 struct thermal_governor *new_gov)
110{
111 int ret = 0;
112
113 if (tz->governor && tz->governor->unbind_from_tz)
114 tz->governor->unbind_from_tz(tz);
115
116 if (new_gov && new_gov->bind_to_tz) {
117 ret = new_gov->bind_to_tz(tz);
118 if (ret) {
119 bind_previous_governor(tz, new_gov->name);
120
121 return ret;
122 }
123 }
124
125 tz->governor = new_gov;
126
127 return ret;
128}
129
78int thermal_register_governor(struct thermal_governor *governor) 130int thermal_register_governor(struct thermal_governor *governor)
79{ 131{
80 int err; 132 int err;
@@ -107,8 +159,15 @@ int thermal_register_governor(struct thermal_governor *governor)
107 159
108 name = pos->tzp->governor_name; 160 name = pos->tzp->governor_name;
109 161
110 if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) 162 if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) {
111 pos->governor = governor; 163 int ret;
164
165 ret = thermal_set_governor(pos, governor);
166 if (ret)
167 dev_err(&pos->device,
168 "Failed to set governor %s for thermal zone %s: %d\n",
169 governor->name, pos->type, ret);
170 }
112 } 171 }
113 172
114 mutex_unlock(&thermal_list_lock); 173 mutex_unlock(&thermal_list_lock);
@@ -134,7 +193,7 @@ void thermal_unregister_governor(struct thermal_governor *governor)
134 list_for_each_entry(pos, &thermal_tz_list, node) { 193 list_for_each_entry(pos, &thermal_tz_list, node) {
135 if (!strncasecmp(pos->governor->name, governor->name, 194 if (!strncasecmp(pos->governor->name, governor->name,
136 THERMAL_NAME_LENGTH)) 195 THERMAL_NAME_LENGTH))
137 pos->governor = NULL; 196 thermal_set_governor(pos, NULL);
138 } 197 }
139 198
140 mutex_unlock(&thermal_list_lock); 199 mutex_unlock(&thermal_list_lock);
@@ -218,7 +277,8 @@ static void print_bind_err_msg(struct thermal_zone_device *tz,
218 277
219static void __bind(struct thermal_zone_device *tz, int mask, 278static void __bind(struct thermal_zone_device *tz, int mask,
220 struct thermal_cooling_device *cdev, 279 struct thermal_cooling_device *cdev,
221 unsigned long *limits) 280 unsigned long *limits,
281 unsigned int weight)
222{ 282{
223 int i, ret; 283 int i, ret;
224 284
@@ -233,7 +293,8 @@ static void __bind(struct thermal_zone_device *tz, int mask,
233 upper = limits[i * 2 + 1]; 293 upper = limits[i * 2 + 1];
234 } 294 }
235 ret = thermal_zone_bind_cooling_device(tz, i, cdev, 295 ret = thermal_zone_bind_cooling_device(tz, i, cdev,
236 upper, lower); 296 upper, lower,
297 weight);
237 if (ret) 298 if (ret)
238 print_bind_err_msg(tz, cdev, ret); 299 print_bind_err_msg(tz, cdev, ret);
239 } 300 }
@@ -280,7 +341,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
280 continue; 341 continue;
281 tzp->tbp[i].cdev = cdev; 342 tzp->tbp[i].cdev = cdev;
282 __bind(pos, tzp->tbp[i].trip_mask, cdev, 343 __bind(pos, tzp->tbp[i].trip_mask, cdev,
283 tzp->tbp[i].binding_limits); 344 tzp->tbp[i].binding_limits,
345 tzp->tbp[i].weight);
284 } 346 }
285 } 347 }
286 348
@@ -319,7 +381,8 @@ static void bind_tz(struct thermal_zone_device *tz)
319 continue; 381 continue;
320 tzp->tbp[i].cdev = pos; 382 tzp->tbp[i].cdev = pos;
321 __bind(tz, tzp->tbp[i].trip_mask, pos, 383 __bind(tz, tzp->tbp[i].trip_mask, pos,
322 tzp->tbp[i].binding_limits); 384 tzp->tbp[i].binding_limits,
385 tzp->tbp[i].weight);
323 } 386 }
324 } 387 }
325exit: 388exit:
@@ -713,7 +776,8 @@ passive_store(struct device *dev, struct device_attribute *attr,
713 thermal_zone_bind_cooling_device(tz, 776 thermal_zone_bind_cooling_device(tz,
714 THERMAL_TRIPS_NONE, cdev, 777 THERMAL_TRIPS_NONE, cdev,
715 THERMAL_NO_LIMIT, 778 THERMAL_NO_LIMIT,
716 THERMAL_NO_LIMIT); 779 THERMAL_NO_LIMIT,
780 THERMAL_WEIGHT_DEFAULT);
717 } 781 }
718 mutex_unlock(&thermal_list_lock); 782 mutex_unlock(&thermal_list_lock);
719 if (!tz->passive_delay) 783 if (!tz->passive_delay)
@@ -765,8 +829,9 @@ policy_store(struct device *dev, struct device_attribute *attr,
765 if (!gov) 829 if (!gov)
766 goto exit; 830 goto exit;
767 831
768 tz->governor = gov; 832 ret = thermal_set_governor(tz, gov);
769 ret = count; 833 if (!ret)
834 ret = count;
770 835
771exit: 836exit:
772 mutex_unlock(&tz->lock); 837 mutex_unlock(&tz->lock);
@@ -810,6 +875,158 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
810static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); 875static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
811#endif/*CONFIG_THERMAL_EMULATION*/ 876#endif/*CONFIG_THERMAL_EMULATION*/
812 877
878static ssize_t
879sustainable_power_show(struct device *dev, struct device_attribute *devattr,
880 char *buf)
881{
882 struct thermal_zone_device *tz = to_thermal_zone(dev);
883
884 if (tz->tzp)
885 return sprintf(buf, "%u\n", tz->tzp->sustainable_power);
886 else
887 return -EIO;
888}
889
890static ssize_t
891sustainable_power_store(struct device *dev, struct device_attribute *devattr,
892 const char *buf, size_t count)
893{
894 struct thermal_zone_device *tz = to_thermal_zone(dev);
895 u32 sustainable_power;
896
897 if (!tz->tzp)
898 return -EIO;
899
900 if (kstrtou32(buf, 10, &sustainable_power))
901 return -EINVAL;
902
903 tz->tzp->sustainable_power = sustainable_power;
904
905 return count;
906}
907static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
908 sustainable_power_store);
909
910#define create_s32_tzp_attr(name) \
911 static ssize_t \
912 name##_show(struct device *dev, struct device_attribute *devattr, \
913 char *buf) \
914 { \
915 struct thermal_zone_device *tz = to_thermal_zone(dev); \
916 \
917 if (tz->tzp) \
918 return sprintf(buf, "%u\n", tz->tzp->name); \
919 else \
920 return -EIO; \
921 } \
922 \
923 static ssize_t \
924 name##_store(struct device *dev, struct device_attribute *devattr, \
925 const char *buf, size_t count) \
926 { \
927 struct thermal_zone_device *tz = to_thermal_zone(dev); \
928 s32 value; \
929 \
930 if (!tz->tzp) \
931 return -EIO; \
932 \
933 if (kstrtos32(buf, 10, &value)) \
934 return -EINVAL; \
935 \
936 tz->tzp->name = value; \
937 \
938 return count; \
939 } \
940 static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store)
941
942create_s32_tzp_attr(k_po);
943create_s32_tzp_attr(k_pu);
944create_s32_tzp_attr(k_i);
945create_s32_tzp_attr(k_d);
946create_s32_tzp_attr(integral_cutoff);
947create_s32_tzp_attr(slope);
948create_s32_tzp_attr(offset);
949#undef create_s32_tzp_attr
950
951static struct device_attribute *dev_tzp_attrs[] = {
952 &dev_attr_sustainable_power,
953 &dev_attr_k_po,
954 &dev_attr_k_pu,
955 &dev_attr_k_i,
956 &dev_attr_k_d,
957 &dev_attr_integral_cutoff,
958 &dev_attr_slope,
959 &dev_attr_offset,
960};
961
962static int create_tzp_attrs(struct device *dev)
963{
964 int i;
965
966 for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) {
967 int ret;
968 struct device_attribute *dev_attr = dev_tzp_attrs[i];
969
970 ret = device_create_file(dev, dev_attr);
971 if (ret)
972 return ret;
973 }
974
975 return 0;
976}
977
978/**
979 * power_actor_get_max_power() - get the maximum power that a cdev can consume
980 * @cdev: pointer to &thermal_cooling_device
981 * @tz: a valid thermal zone device pointer
982 * @max_power: pointer in which to store the maximum power
983 *
984 * Calculate the maximum power consumption in milliwats that the
985 * cooling device can currently consume and store it in @max_power.
986 *
987 * Return: 0 on success, -EINVAL if @cdev doesn't support the
988 * power_actor API or -E* on other error.
989 */
990int power_actor_get_max_power(struct thermal_cooling_device *cdev,
991 struct thermal_zone_device *tz, u32 *max_power)
992{
993 if (!cdev_is_power_actor(cdev))
994 return -EINVAL;
995
996 return cdev->ops->state2power(cdev, tz, 0, max_power);
997}
998
999/**
1000 * power_actor_set_power() - limit the maximum power that a cooling device can consume
1001 * @cdev: pointer to &thermal_cooling_device
1002 * @instance: thermal instance to update
1003 * @power: the power in milliwatts
1004 *
1005 * Set the cooling device to consume at most @power milliwatts.
1006 *
1007 * Return: 0 on success, -EINVAL if the cooling device does not
1008 * implement the power actor API or -E* for other failures.
1009 */
1010int power_actor_set_power(struct thermal_cooling_device *cdev,
1011 struct thermal_instance *instance, u32 power)
1012{
1013 unsigned long state;
1014 int ret;
1015
1016 if (!cdev_is_power_actor(cdev))
1017 return -EINVAL;
1018
1019 ret = cdev->ops->power2state(cdev, instance->tz, power, &state);
1020 if (ret)
1021 return ret;
1022
1023 instance->target = state;
1024 cdev->updated = false;
1025 thermal_cdev_update(cdev);
1026
1027 return 0;
1028}
1029
813static DEVICE_ATTR(type, 0444, type_show, NULL); 1030static DEVICE_ATTR(type, 0444, type_show, NULL);
814static DEVICE_ATTR(temp, 0444, temp_show, NULL); 1031static DEVICE_ATTR(temp, 0444, temp_show, NULL);
815static DEVICE_ATTR(mode, 0644, mode_show, mode_store); 1032static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -917,6 +1134,34 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
917 NULL, 1134 NULL,
918}; 1135};
919 1136
1137static ssize_t
1138thermal_cooling_device_weight_show(struct device *dev,
1139 struct device_attribute *attr, char *buf)
1140{
1141 struct thermal_instance *instance;
1142
1143 instance = container_of(attr, struct thermal_instance, weight_attr);
1144
1145 return sprintf(buf, "%d\n", instance->weight);
1146}
1147
1148static ssize_t
1149thermal_cooling_device_weight_store(struct device *dev,
1150 struct device_attribute *attr,
1151 const char *buf, size_t count)
1152{
1153 struct thermal_instance *instance;
1154 int ret, weight;
1155
1156 ret = kstrtoint(buf, 0, &weight);
1157 if (ret)
1158 return ret;
1159
1160 instance = container_of(attr, struct thermal_instance, weight_attr);
1161 instance->weight = weight;
1162
1163 return count;
1164}
920/* Device management */ 1165/* Device management */
921 1166
922/** 1167/**
@@ -931,6 +1176,9 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
931 * @lower: the Minimum cooling state can be used for this trip point. 1176 * @lower: the Minimum cooling state can be used for this trip point.
932 * THERMAL_NO_LIMIT means no lower limit, 1177 * THERMAL_NO_LIMIT means no lower limit,
933 * and the cooling device can be in cooling state 0. 1178 * and the cooling device can be in cooling state 0.
1179 * @weight: The weight of the cooling device to be bound to the
1180 * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the
1181 * default value
934 * 1182 *
935 * This interface function bind a thermal cooling device to the certain trip 1183 * This interface function bind a thermal cooling device to the certain trip
936 * point of a thermal zone device. 1184 * point of a thermal zone device.
@@ -941,7 +1189,8 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
941int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, 1189int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
942 int trip, 1190 int trip,
943 struct thermal_cooling_device *cdev, 1191 struct thermal_cooling_device *cdev,
944 unsigned long upper, unsigned long lower) 1192 unsigned long upper, unsigned long lower,
1193 unsigned int weight)
945{ 1194{
946 struct thermal_instance *dev; 1195 struct thermal_instance *dev;
947 struct thermal_instance *pos; 1196 struct thermal_instance *pos;
@@ -986,6 +1235,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
986 dev->upper = upper; 1235 dev->upper = upper;
987 dev->lower = lower; 1236 dev->lower = lower;
988 dev->target = THERMAL_NO_TARGET; 1237 dev->target = THERMAL_NO_TARGET;
1238 dev->weight = weight;
989 1239
990 result = get_idr(&tz->idr, &tz->lock, &dev->id); 1240 result = get_idr(&tz->idr, &tz->lock, &dev->id);
991 if (result) 1241 if (result)
@@ -1006,6 +1256,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
1006 if (result) 1256 if (result)
1007 goto remove_symbol_link; 1257 goto remove_symbol_link;
1008 1258
1259 sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
1260 sysfs_attr_init(&dev->weight_attr.attr);
1261 dev->weight_attr.attr.name = dev->weight_attr_name;
1262 dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
1263 dev->weight_attr.show = thermal_cooling_device_weight_show;
1264 dev->weight_attr.store = thermal_cooling_device_weight_store;
1265 result = device_create_file(&tz->device, &dev->weight_attr);
1266 if (result)
1267 goto remove_trip_file;
1268
1009 mutex_lock(&tz->lock); 1269 mutex_lock(&tz->lock);
1010 mutex_lock(&cdev->lock); 1270 mutex_lock(&cdev->lock);
1011 list_for_each_entry(pos, &tz->thermal_instances, tz_node) 1271 list_for_each_entry(pos, &tz->thermal_instances, tz_node)
@@ -1023,6 +1283,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
1023 if (!result) 1283 if (!result)
1024 return 0; 1284 return 0;
1025 1285
1286 device_remove_file(&tz->device, &dev->weight_attr);
1287remove_trip_file:
1026 device_remove_file(&tz->device, &dev->attr); 1288 device_remove_file(&tz->device, &dev->attr);
1027remove_symbol_link: 1289remove_symbol_link:
1028 sysfs_remove_link(&tz->device.kobj, dev->name); 1290 sysfs_remove_link(&tz->device.kobj, dev->name);
@@ -1377,7 +1639,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
1377 tz->trip_temp_attrs[indx].name; 1639 tz->trip_temp_attrs[indx].name;
1378 tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; 1640 tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
1379 tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; 1641 tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
1380 if (mask & (1 << indx)) { 1642 if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
1643 mask & (1 << indx)) {
1381 tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; 1644 tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
1382 tz->trip_temp_attrs[indx].attr.store = 1645 tz->trip_temp_attrs[indx].attr.store =
1383 trip_point_temp_store; 1646 trip_point_temp_store;
@@ -1454,7 +1717,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
1454struct thermal_zone_device *thermal_zone_device_register(const char *type, 1717struct thermal_zone_device *thermal_zone_device_register(const char *type,
1455 int trips, int mask, void *devdata, 1718 int trips, int mask, void *devdata,
1456 struct thermal_zone_device_ops *ops, 1719 struct thermal_zone_device_ops *ops,
1457 const struct thermal_zone_params *tzp, 1720 struct thermal_zone_params *tzp,
1458 int passive_delay, int polling_delay) 1721 int passive_delay, int polling_delay)
1459{ 1722{
1460 struct thermal_zone_device *tz; 1723 struct thermal_zone_device *tz;
@@ -1462,6 +1725,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1462 int result; 1725 int result;
1463 int count; 1726 int count;
1464 int passive = 0; 1727 int passive = 0;
1728 struct thermal_governor *governor;
1465 1729
1466 if (type && strlen(type) >= THERMAL_NAME_LENGTH) 1730 if (type && strlen(type) >= THERMAL_NAME_LENGTH)
1467 return ERR_PTR(-EINVAL); 1731 return ERR_PTR(-EINVAL);
@@ -1548,13 +1812,24 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1548 if (result) 1812 if (result)
1549 goto unregister; 1813 goto unregister;
1550 1814
1815 /* Add thermal zone params */
1816 result = create_tzp_attrs(&tz->device);
1817 if (result)
1818 goto unregister;
1819
1551 /* Update 'this' zone's governor information */ 1820 /* Update 'this' zone's governor information */
1552 mutex_lock(&thermal_governor_lock); 1821 mutex_lock(&thermal_governor_lock);
1553 1822
1554 if (tz->tzp) 1823 if (tz->tzp)
1555 tz->governor = __find_governor(tz->tzp->governor_name); 1824 governor = __find_governor(tz->tzp->governor_name);
1556 else 1825 else
1557 tz->governor = def_governor; 1826 governor = def_governor;
1827
1828 result = thermal_set_governor(tz, governor);
1829 if (result) {
1830 mutex_unlock(&thermal_governor_lock);
1831 goto unregister;
1832 }
1558 1833
1559 mutex_unlock(&thermal_governor_lock); 1834 mutex_unlock(&thermal_governor_lock);
1560 1835
@@ -1643,7 +1918,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1643 device_remove_file(&tz->device, &dev_attr_mode); 1918 device_remove_file(&tz->device, &dev_attr_mode);
1644 device_remove_file(&tz->device, &dev_attr_policy); 1919 device_remove_file(&tz->device, &dev_attr_policy);
1645 remove_trip_attrs(tz); 1920 remove_trip_attrs(tz);
1646 tz->governor = NULL; 1921 thermal_set_governor(tz, NULL);
1647 1922
1648 thermal_remove_hwmon_sysfs(tz); 1923 thermal_remove_hwmon_sysfs(tz);
1649 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 1924 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
@@ -1799,7 +2074,11 @@ static int __init thermal_register_governors(void)
1799 if (result) 2074 if (result)
1800 return result; 2075 return result;
1801 2076
1802 return thermal_gov_user_space_register(); 2077 result = thermal_gov_user_space_register();
2078 if (result)
2079 return result;
2080
2081 return thermal_gov_power_allocator_register();
1803} 2082}
1804 2083
1805static void thermal_unregister_governors(void) 2084static void thermal_unregister_governors(void)
@@ -1808,6 +2087,7 @@ static void thermal_unregister_governors(void)
1808 thermal_gov_fair_share_unregister(); 2087 thermal_gov_fair_share_unregister();
1809 thermal_gov_bang_bang_unregister(); 2088 thermal_gov_bang_bang_unregister();
1810 thermal_gov_user_space_unregister(); 2089 thermal_gov_user_space_unregister();
2090 thermal_gov_power_allocator_unregister();
1811} 2091}
1812 2092
1813static int __init thermal_init(void) 2093static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 8e391812e503..d7ac1fccd659 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -46,8 +46,11 @@ struct thermal_instance {
46 unsigned long target; /* expected cooling state */ 46 unsigned long target; /* expected cooling state */
47 char attr_name[THERMAL_NAME_LENGTH]; 47 char attr_name[THERMAL_NAME_LENGTH];
48 struct device_attribute attr; 48 struct device_attribute attr;
49 char weight_attr_name[THERMAL_NAME_LENGTH];
50 struct device_attribute weight_attr;
49 struct list_head tz_node; /* node in tz->thermal_instances */ 51 struct list_head tz_node; /* node in tz->thermal_instances */
50 struct list_head cdev_node; /* node in cdev->thermal_instances */ 52 struct list_head cdev_node; /* node in cdev->thermal_instances */
53 unsigned int weight; /* The weight of the cooling device */
51}; 54};
52 55
53int thermal_register_governor(struct thermal_governor *); 56int thermal_register_governor(struct thermal_governor *);
@@ -85,6 +88,14 @@ static inline int thermal_gov_user_space_register(void) { return 0; }
85static inline void thermal_gov_user_space_unregister(void) {} 88static inline void thermal_gov_user_space_unregister(void) {}
86#endif /* CONFIG_THERMAL_GOV_USER_SPACE */ 89#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
87 90
91#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
92int thermal_gov_power_allocator_register(void);
93void thermal_gov_power_allocator_unregister(void);
94#else
95static inline int thermal_gov_power_allocator_register(void) { return 0; }
96static inline void thermal_gov_power_allocator_unregister(void) {}
97#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
98
88/* device tree support */ 99/* device tree support */
89#ifdef CONFIG_THERMAL_OF 100#ifdef CONFIG_THERMAL_OF
90int of_parse_thermal_zones(void); 101int of_parse_thermal_zones(void);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index bc14dc874594..10c47c048f7a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -43,6 +43,8 @@
43 43
44#include "ti-bandgap.h" 44#include "ti-bandgap.h"
45 45
46static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id);
47
46/*** Helper functions to access registers and their bitfields ***/ 48/*** Helper functions to access registers and their bitfields ***/
47 49
48/** 50/**
@@ -103,19 +105,15 @@ do { \
103 */ 105 */
104static int ti_bandgap_power(struct ti_bandgap *bgp, bool on) 106static int ti_bandgap_power(struct ti_bandgap *bgp, bool on)
105{ 107{
106 int i, ret = 0; 108 int i;
107 109
108 if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) { 110 if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH))
109 ret = -ENOTSUPP; 111 return -ENOTSUPP;
110 goto exit;
111 }
112 112
113 for (i = 0; i < bgp->conf->sensor_count; i++) 113 for (i = 0; i < bgp->conf->sensor_count; i++)
114 /* active on 0 */ 114 /* active on 0 */
115 RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on); 115 RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on);
116 116 return 0;
117exit:
118 return ret;
119} 117}
120 118
121/** 119/**
@@ -298,18 +296,13 @@ static
298int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t) 296int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
299{ 297{
300 const struct ti_bandgap_data *conf = bgp->conf; 298 const struct ti_bandgap_data *conf = bgp->conf;
301 int ret = 0;
302 299
303 /* look up for temperature in the table and return the temperature */ 300 /* look up for temperature in the table and return the temperature */
304 if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) { 301 if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val)
305 ret = -ERANGE; 302 return -ERANGE;
306 goto exit;
307 }
308 303
309 *t = bgp->conf->conv_table[adc_val - conf->adc_start_val]; 304 *t = bgp->conf->conv_table[adc_val - conf->adc_start_val];
310 305 return 0;
311exit:
312 return ret;
313} 306}
314 307
315/** 308/**
@@ -330,16 +323,14 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
330{ 323{
331 const struct ti_bandgap_data *conf = bgp->conf; 324 const struct ti_bandgap_data *conf = bgp->conf;
332 const int *conv_table = bgp->conf->conv_table; 325 const int *conv_table = bgp->conf->conv_table;
333 int high, low, mid, ret = 0; 326 int high, low, mid;
334 327
335 low = 0; 328 low = 0;
336 high = conf->adc_end_val - conf->adc_start_val; 329 high = conf->adc_end_val - conf->adc_start_val;
337 mid = (high + low) / 2; 330 mid = (high + low) / 2;
338 331
339 if (temp < conv_table[low] || temp > conv_table[high]) { 332 if (temp < conv_table[low] || temp > conv_table[high])
340 ret = -ERANGE; 333 return -ERANGE;
341 goto exit;
342 }
343 334
344 while (low < high) { 335 while (low < high) {
345 if (temp < conv_table[mid]) 336 if (temp < conv_table[mid])
@@ -350,9 +341,7 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
350 } 341 }
351 342
352 *adc = conf->adc_start_val + low; 343 *adc = conf->adc_start_val + low;
353 344 return 0;
354exit:
355 return ret;
356} 345}
357 346
358/** 347/**
@@ -378,13 +367,11 @@ int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val,
378 */ 367 */
379 ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp); 368 ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp);
380 if (ret < 0) 369 if (ret < 0)
381 goto exit; 370 return ret;
382 371
383 temp += hyst_val; 372 temp += hyst_val;
384 373
385 ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum); 374 ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum);
386
387exit:
388 return ret; 375 return ret;
389} 376}
390 377
@@ -542,22 +529,18 @@ exit:
542 */ 529 */
543static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id) 530static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
544{ 531{
545 int ret = 0;
546
547 if (!bgp || IS_ERR(bgp)) { 532 if (!bgp || IS_ERR(bgp)) {
548 pr_err("%s: invalid bandgap pointer\n", __func__); 533 pr_err("%s: invalid bandgap pointer\n", __func__);
549 ret = -EINVAL; 534 return -EINVAL;
550 goto exit;
551 } 535 }
552 536
553 if ((id < 0) || (id >= bgp->conf->sensor_count)) { 537 if ((id < 0) || (id >= bgp->conf->sensor_count)) {
554 dev_err(bgp->dev, "%s: sensor id out of range (%d)\n", 538 dev_err(bgp->dev, "%s: sensor id out of range (%d)\n",
555 __func__, id); 539 __func__, id);
556 ret = -ERANGE; 540 return -ERANGE;
557 } 541 }
558 542
559exit: 543 return 0;
560 return ret;
561} 544}
562 545
563/** 546/**
@@ -585,12 +568,10 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
585 568
586 ret = ti_bandgap_validate(bgp, id); 569 ret = ti_bandgap_validate(bgp, id);
587 if (ret) 570 if (ret)
588 goto exit; 571 return ret;
589 572
590 if (!TI_BANDGAP_HAS(bgp, TALERT)) { 573 if (!TI_BANDGAP_HAS(bgp, TALERT))
591 ret = -ENOTSUPP; 574 return -ENOTSUPP;
592 goto exit;
593 }
594 575
595 ts_data = bgp->conf->sensors[id].ts_data; 576 ts_data = bgp->conf->sensors[id].ts_data;
596 tsr = bgp->conf->sensors[id].registers; 577 tsr = bgp->conf->sensors[id].registers;
@@ -603,17 +584,15 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
603 } 584 }
604 585
605 if (ret) 586 if (ret)
606 goto exit; 587 return ret;
607 588
608 ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val); 589 ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val);
609 if (ret < 0) 590 if (ret < 0)
610 goto exit; 591 return ret;
611 592
612 spin_lock(&bgp->lock); 593 spin_lock(&bgp->lock);
613 ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot); 594 ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot);
614 spin_unlock(&bgp->lock); 595 spin_unlock(&bgp->lock);
615
616exit:
617 return ret; 596 return ret;
618} 597}
619 598
@@ -656,7 +635,7 @@ static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id,
656 635
657 temp = ti_bandgap_readl(bgp, tsr->bgap_threshold); 636 temp = ti_bandgap_readl(bgp, tsr->bgap_threshold);
658 temp = (temp & mask) >> __ffs(mask); 637 temp = (temp & mask) >> __ffs(mask);
659 ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 638 ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
660 if (ret) { 639 if (ret) {
661 dev_err(bgp->dev, "failed to read thot\n"); 640 dev_err(bgp->dev, "failed to read thot\n");
662 ret = -EIO; 641 ret = -EIO;
@@ -926,11 +905,17 @@ int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id,
926 if (ret) 905 if (ret)
927 return ret; 906 return ret;
928 907
908 if (!TI_BANDGAP_HAS(bgp, MODE_CONFIG)) {
909 ret = ti_bandgap_force_single_read(bgp, id);
910 if (ret)
911 return ret;
912 }
913
929 spin_lock(&bgp->lock); 914 spin_lock(&bgp->lock);
930 temp = ti_bandgap_read_temp(bgp, id); 915 temp = ti_bandgap_read_temp(bgp, id);
931 spin_unlock(&bgp->lock); 916 spin_unlock(&bgp->lock);
932 917
933 ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 918 ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
934 if (ret) 919 if (ret)
935 return -EIO; 920 return -EIO;
936 921
@@ -991,7 +976,8 @@ void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id)
991static int 976static int
992ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) 977ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
993{ 978{
994 u32 temp = 0, counter = 1000; 979 u32 counter = 1000;
980 struct temp_sensor_registers *tsr;
995 981
996 /* Select single conversion mode */ 982 /* Select single conversion mode */
997 if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) 983 if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
@@ -999,16 +985,27 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
999 985
1000 /* Start of Conversion = 1 */ 986 /* Start of Conversion = 1 */
1001 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1); 987 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1);
1002 /* Wait until DTEMP is updated */
1003 temp = ti_bandgap_read_temp(bgp, id);
1004 988
1005 while ((temp == 0) && --counter) 989 /* Wait for EOCZ going up */
1006 temp = ti_bandgap_read_temp(bgp, id); 990 tsr = bgp->conf->sensors[id].registers;
1007 /* REVISIT: Check correct condition for end of conversion */ 991
992 while (--counter) {
993 if (ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
994 tsr->bgap_eocz_mask)
995 break;
996 }
1008 997
1009 /* Start of Conversion = 0 */ 998 /* Start of Conversion = 0 */
1010 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0); 999 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0);
1011 1000
1001 /* Wait for EOCZ going down */
1002 counter = 1000;
1003 while (--counter) {
1004 if (!(ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
1005 tsr->bgap_eocz_mask))
1006 break;
1007 }
1008
1012 return 0; 1009 return 0;
1013} 1010}
1014 1011
@@ -1294,11 +1291,10 @@ int ti_bandgap_probe(struct platform_device *pdev)
1294 goto free_irqs; 1291 goto free_irqs;
1295 } 1292 }
1296 1293
1297 bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); 1294 bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name);
1298 ret = IS_ERR(bgp->div_clk); 1295 ret = IS_ERR(bgp->div_clk);
1299 if (ret) { 1296 if (ret) {
1300 dev_err(&pdev->dev, 1297 dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n");
1301 "failed to request div_ts_ck clock ref\n");
1302 ret = PTR_ERR(bgp->div_clk); 1298 ret = PTR_ERR(bgp->div_clk);
1303 goto free_irqs; 1299 goto free_irqs;
1304 } 1300 }
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index a38c1756442a..c7c5b3779dac 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -75,7 +75,7 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
75} 75}
76 76
77/* thermal zone ops */ 77/* thermal zone ops */
78/* Get temperature callback function for thermal zone*/ 78/* Get temperature callback function for thermal zone */
79static inline int __ti_thermal_get_temp(void *devdata, long *temp) 79static inline int __ti_thermal_get_temp(void *devdata, long *temp)
80{ 80{
81 struct thermal_zone_device *pcb_tz = NULL; 81 struct thermal_zone_device *pcb_tz = NULL;
@@ -146,7 +146,8 @@ static int ti_thermal_bind(struct thermal_zone_device *thermal,
146 return thermal_zone_bind_cooling_device(thermal, 0, cdev, 146 return thermal_zone_bind_cooling_device(thermal, 0, cdev,
147 /* bind with min and max states defined by cpu_cooling */ 147 /* bind with min and max states defined by cpu_cooling */
148 THERMAL_NO_LIMIT, 148 THERMAL_NO_LIMIT,
149 THERMAL_NO_LIMIT); 149 THERMAL_NO_LIMIT,
150 THERMAL_WEIGHT_DEFAULT);
150} 151}
151 152
152/* Unbind callback functions for thermal zone */ 153/* Unbind callback functions for thermal zone */
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 9ea3d9d49ffc..50d1d2cb091a 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,7 +68,7 @@ struct phy_dev_entry {
68 struct thermal_zone_device *tzone; 68 struct thermal_zone_device *tzone;
69}; 69};
70 70
71static const struct thermal_zone_params pkg_temp_tz_params = { 71static struct thermal_zone_params pkg_temp_tz_params = {
72 .no_hwmon = true, 72 .no_hwmon = true,
73}; 73};
74 74