aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/mcheck/therm_throt.c
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2010-07-29 20:13:45 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-08-03 18:58:56 -0400
commit0199114c31798af5b83841b21759b64171060d9b (patch)
treea50a9eb0b1f2a43fb92d6143931fb0f688d766e8 /arch/x86/kernel/cpu/mcheck/therm_throt.c
parent55d435a227bd28c77afab326de44dfacc0b15059 (diff)
x86, hwmon: Package Level Thermal/Power: power limit
Power limit notification feature is published in Intel 64 and IA-32 Architectures SDMV Vol 3A 14.5.6 Power Limit Notification. It is implemented first on Intel Sandy Bridge platform. The patch handles notification interrupt. Interrupt handler dumps power limit information in log_buf, logs the event in mce log, and increases the event counters (core_power_limit and package_power_limit). Upper level applications could use the data to detect system health or diagnose functionality/performance issues. In the future, the event could be handled in a more fancy way. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> LKML-Reference: <1280448826-12004-5-git-send-email-fenghua.yu@intel.com> Reviewed-by: Len Brown <len.brown@intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/therm_throt.c')
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c183
1 files changed, 129 insertions, 54 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d307f9f64c23..c2a8b26d4fea 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -34,20 +34,25 @@
34/* How long to wait between reporting thermal events */ 34/* How long to wait between reporting thermal events */
35#define CHECK_INTERVAL (300 * HZ) 35#define CHECK_INTERVAL (300 * HZ)
36 36
37#define THERMAL_THROTTLING_EVENT 0
38#define POWER_LIMIT_EVENT 1
39
37/* 40/*
38 * Current thermal throttling state: 41 * Current thermal event state:
39 */ 42 */
40struct _thermal_state { 43struct _thermal_state {
41 bool is_throttled; 44 bool new_event;
42 45 int event;
43 u64 next_check; 46 u64 next_check;
44 unsigned long throttle_count; 47 unsigned long count;
45 unsigned long last_throttle_count; 48 unsigned long last_count;
46}; 49};
47 50
48struct thermal_state { 51struct thermal_state {
49 struct _thermal_state core; 52 struct _thermal_state core_throttle;
50 struct _thermal_state package; 53 struct _thermal_state core_power_limit;
54 struct _thermal_state package_throttle;
55 struct _thermal_state package_power_limit;
51}; 56};
52 57
53static DEFINE_PER_CPU(struct thermal_state, thermal_state); 58static DEFINE_PER_CPU(struct thermal_state, thermal_state);
@@ -62,9 +67,9 @@ static u32 lvtthmr_init __read_mostly;
62 therm_throt_sysdev_show_##_name, \ 67 therm_throt_sysdev_show_##_name, \
63 NULL) \ 68 NULL) \
64 69
65#define define_therm_throt_sysdev_show_func(level, name) \ 70#define define_therm_throt_sysdev_show_func(event, name) \
66 \ 71 \
67static ssize_t therm_throt_sysdev_show_##level##_##name( \ 72static ssize_t therm_throt_sysdev_show_##event##_##name( \
68 struct sys_device *dev, \ 73 struct sys_device *dev, \
69 struct sysdev_attribute *attr, \ 74 struct sysdev_attribute *attr, \
70 char *buf) \ 75 char *buf) \
@@ -75,7 +80,7 @@ static ssize_t therm_throt_sysdev_show_##level##_##name( \
75 preempt_disable(); /* CPU hotplug */ \ 80 preempt_disable(); /* CPU hotplug */ \
76 if (cpu_online(cpu)) { \ 81 if (cpu_online(cpu)) { \
77 ret = sprintf(buf, "%lu\n", \ 82 ret = sprintf(buf, "%lu\n", \
78 per_cpu(thermal_state, cpu).level.name); \ 83 per_cpu(thermal_state, cpu).event.name); \
79 } else \ 84 } else \
80 ret = 0; \ 85 ret = 0; \
81 preempt_enable(); \ 86 preempt_enable(); \
@@ -83,23 +88,32 @@ static ssize_t therm_throt_sysdev_show_##level##_##name( \
83 return ret; \ 88 return ret; \
84} 89}
85 90
86define_therm_throt_sysdev_show_func(core, throttle_count); 91define_therm_throt_sysdev_show_func(core_throttle, count);
87define_therm_throt_sysdev_one_ro(core_throttle_count); 92define_therm_throt_sysdev_one_ro(core_throttle_count);
88 93
89define_therm_throt_sysdev_show_func(package, throttle_count); 94define_therm_throt_sysdev_show_func(core_power_limit, count);
95define_therm_throt_sysdev_one_ro(core_power_limit_count);
96
97define_therm_throt_sysdev_show_func(package_throttle, count);
90define_therm_throt_sysdev_one_ro(package_throttle_count); 98define_therm_throt_sysdev_one_ro(package_throttle_count);
91 99
100define_therm_throt_sysdev_show_func(package_power_limit, count);
101define_therm_throt_sysdev_one_ro(package_power_limit_count);
102
92static struct attribute *thermal_throttle_attrs[] = { 103static struct attribute *thermal_throttle_attrs[] = {
93 &attr_core_throttle_count.attr, 104 &attr_core_throttle_count.attr,
94 NULL 105 NULL
95}; 106};
96 107
97static struct attribute_group thermal_throttle_attr_group = { 108static struct attribute_group thermal_attr_group = {
98 .attrs = thermal_throttle_attrs, 109 .attrs = thermal_throttle_attrs,
99 .name = "thermal_throttle" 110 .name = "thermal_throttle"
100}; 111};
101#endif /* CONFIG_SYSFS */ 112#endif /* CONFIG_SYSFS */
102 113
114#define CORE_LEVEL 0
115#define PACKAGE_LEVEL 1
116
103/*** 117/***
104 * therm_throt_process - Process thermal throttling event from interrupt 118 * therm_throt_process - Process thermal throttling event from interrupt
105 * @curr: Whether the condition is current or not (boolean), since the 119 * @curr: Whether the condition is current or not (boolean), since the
@@ -116,49 +130,70 @@ static struct attribute_group thermal_throttle_attr_group = {
116 * 1 : Event should be logged further, and a message has been 130 * 1 : Event should be logged further, and a message has been
117 * printed to the syslog. 131 * printed to the syslog.
118 */ 132 */
119#define CORE_LEVEL 0 133static int therm_throt_process(bool new_event, int event, int level)
120#define PACKAGE_LEVEL 1
121static int therm_throt_process(bool is_throttled, int level)
122{ 134{
123 struct _thermal_state *state; 135 struct _thermal_state *state;
124 unsigned int this_cpu; 136 unsigned int this_cpu = smp_processor_id();
125 bool was_throttled; 137 bool old_event;
126 u64 now; 138 u64 now;
139 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
127 140
128 this_cpu = smp_processor_id();
129 now = get_jiffies_64(); 141 now = get_jiffies_64();
130 if (level == CORE_LEVEL) 142 if (level == CORE_LEVEL) {
131 state = &per_cpu(thermal_state, this_cpu).core; 143 if (event == THERMAL_THROTTLING_EVENT)
132 else 144 state = &pstate->core_throttle;
133 state = &per_cpu(thermal_state, this_cpu).package; 145 else if (event == POWER_LIMIT_EVENT)
146 state = &pstate->core_power_limit;
147 else
148 return 0;
149 } else if (level == PACKAGE_LEVEL) {
150 if (event == THERMAL_THROTTLING_EVENT)
151 state = &pstate->package_throttle;
152 else if (event == POWER_LIMIT_EVENT)
153 state = &pstate->package_power_limit;
154 else
155 return 0;
156 } else
157 return 0;
134 158
135 was_throttled = state->is_throttled; 159 old_event = state->new_event;
136 state->is_throttled = is_throttled; 160 state->new_event = new_event;
137 161
138 if (is_throttled) 162 if (new_event)
139 state->throttle_count++; 163 state->count++;
140 164
141 if (time_before64(now, state->next_check) && 165 if (time_before64(now, state->next_check) &&
142 state->throttle_count != state->last_throttle_count) 166 state->count != state->last_count)
143 return 0; 167 return 0;
144 168
145 state->next_check = now + CHECK_INTERVAL; 169 state->next_check = now + CHECK_INTERVAL;
146 state->last_throttle_count = state->throttle_count; 170 state->last_count = state->count;
147 171
148 /* if we just entered the thermal event */ 172 /* if we just entered the thermal event */
149 if (is_throttled) { 173 if (new_event) {
150 printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", 174 if (event == THERMAL_THROTTLING_EVENT)
151 this_cpu, 175 printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
152 level == CORE_LEVEL ? "Core" : "Package", 176 this_cpu,
153 state->throttle_count); 177 level == CORE_LEVEL ? "Core" : "Package",
178 state->count);
179 else
180 printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
181 this_cpu,
182 level == CORE_LEVEL ? "Core" : "Package",
183 state->count);
154 184
155 add_taint(TAINT_MACHINE_CHECK); 185 add_taint(TAINT_MACHINE_CHECK);
156 return 1; 186 return 1;
157 } 187 }
158 if (was_throttled) { 188 if (old_event) {
159 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", 189 if (event == THERMAL_THROTTLING_EVENT)
160 this_cpu, 190 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
161 level == CORE_LEVEL ? "Core" : "Package"); 191 this_cpu,
192 level == CORE_LEVEL ? "Core" : "Package");
193 else
194 printk(KERN_INFO "CPU%d: %s power limit normal\n",
195 this_cpu,
196 level == CORE_LEVEL ? "Core" : "Package");
162 return 1; 197 return 1;
163 } 198 }
164 199
@@ -172,21 +207,29 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
172 int err; 207 int err;
173 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 208 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
174 209
175 err = sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); 210 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
176 if (err) 211 if (err)
177 return err; 212 return err;
178 213
214 if (cpu_has(c, X86_FEATURE_PLN))
215 err = sysfs_add_file_to_group(&sys_dev->kobj,
216 &attr_core_power_limit_count.attr,
217 thermal_attr_group.name);
179 if (cpu_has(c, X86_FEATURE_PTS)) 218 if (cpu_has(c, X86_FEATURE_PTS))
180 err = sysfs_add_file_to_group(&sys_dev->kobj, 219 err = sysfs_add_file_to_group(&sys_dev->kobj,
181 &attr_package_throttle_count.attr, 220 &attr_package_throttle_count.attr,
182 thermal_throttle_attr_group.name); 221 thermal_attr_group.name);
222 if (cpu_has(c, X86_FEATURE_PLN))
223 err = sysfs_add_file_to_group(&sys_dev->kobj,
224 &attr_package_power_limit_count.attr,
225 thermal_attr_group.name);
183 226
184 return err; 227 return err;
185} 228}
186 229
187static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 230static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
188{ 231{
189 sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 232 sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group);
190} 233}
191 234
192/* Mutex protecting device creation against CPU hotplug: */ 235/* Mutex protecting device creation against CPU hotplug: */
@@ -257,6 +300,17 @@ device_initcall(thermal_throttle_init_device);
257 300
258#endif /* CONFIG_SYSFS */ 301#endif /* CONFIG_SYSFS */
259 302
303/*
304 * Set up the most two significant bit to notify mce log that this thermal
305 * event type.
306 * This is a temp solution. May be changed in the future with mce log
307 * infrasture.
308 */
309#define CORE_THROTTLED (0)
310#define CORE_POWER_LIMIT ((__u64)1 << 62)
311#define PACKAGE_THROTTLED ((__u64)2 << 62)
312#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
313
260/* Thermal transition interrupt handler */ 314/* Thermal transition interrupt handler */
261static void intel_thermal_interrupt(void) 315static void intel_thermal_interrupt(void)
262{ 316{
@@ -264,21 +318,31 @@ static void intel_thermal_interrupt(void)
264 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 318 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
265 319
266 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 320 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
321
267 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, 322 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
323 THERMAL_THROTTLING_EVENT,
268 CORE_LEVEL) != 0) 324 CORE_LEVEL) != 0)
269 mce_log_therm_throt_event(msr_val); 325 mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
326
327 if (cpu_has(c, X86_FEATURE_PLN))
328 if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
329 POWER_LIMIT_EVENT,
330 CORE_LEVEL) != 0)
331 mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
270 332
271 if (cpu_has(c, X86_FEATURE_PTS)) { 333 if (cpu_has(c, X86_FEATURE_PTS)) {
272 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 334 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
273 if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, 335 if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
336 THERMAL_THROTTLING_EVENT,
274 PACKAGE_LEVEL) != 0) 337 PACKAGE_LEVEL) != 0)
275 /* 338 mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
276 * Set up the most significant bit to notify mce log 339 if (cpu_has(c, X86_FEATURE_PLN))
277 * that this thermal event is a package level event. 340 if (therm_throt_process(msr_val &
278 * This is a temp solution. May be changed in the future 341 PACKAGE_THERM_STATUS_POWER_LIMIT,
279 * with mce log infrasture. 342 POWER_LIMIT_EVENT,
280 */ 343 PACKAGE_LEVEL) != 0)
281 mce_log_therm_throt_event(((__u64)1 << 63) | msr_val); 344 mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
345 | msr_val);
282 } 346 }
283} 347}
284 348
@@ -381,14 +445,25 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
381 apic_write(APIC_LVTTHMR, h); 445 apic_write(APIC_LVTTHMR, h);
382 446
383 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); 447 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
384 wrmsr(MSR_IA32_THERM_INTERRUPT, 448 if (cpu_has(c, X86_FEATURE_PLN))
385 l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); 449 wrmsr(MSR_IA32_THERM_INTERRUPT,
450 l | (THERM_INT_LOW_ENABLE
451 | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
452 else
453 wrmsr(MSR_IA32_THERM_INTERRUPT,
454 l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
386 455
387 if (cpu_has(c, X86_FEATURE_PTS)) { 456 if (cpu_has(c, X86_FEATURE_PTS)) {
388 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 457 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
389 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, 458 if (cpu_has(c, X86_FEATURE_PLN))
390 l | (PACKAGE_THERM_INT_LOW_ENABLE 459 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
391 | PACKAGE_THERM_INT_HIGH_ENABLE), h); 460 l | (PACKAGE_THERM_INT_LOW_ENABLE
461 | PACKAGE_THERM_INT_HIGH_ENABLE
462 | PACKAGE_THERM_INT_PLN_ENABLE), h);
463 else
464 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
465 l | (PACKAGE_THERM_INT_LOW_ENABLE
466 | PACKAGE_THERM_INT_HIGH_ENABLE), h);
392 } 467 }
393 468
394 smp_thermal_vector = intel_thermal_interrupt; 469 smp_thermal_vector = intel_thermal_interrupt;