aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-04 08:18:10 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 03:05:43 -0500
commite39ad415ac15116df213dfa2aa2a4f1b0857af9c (patch)
tree9db4cb172bbcf07cb9db381fbfc7e8cda33b4eda /arch/x86
parent7503bfbae89eba07b46441a5d1594647f6b8ab7d (diff)
cpumask: use work_on_cpu in acpi-cpufreq.c for read_measured_perf_ctrs
Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for read_measured_perf_ctrs(). Basically splits off the work function from get_measured_perf which is run on the designated cpu. Moves definition of struct perf_cur out of function local namespace, and is used as the work function argument. References in get_measured_perf use values in the perf_cur struct. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c83
1 files changed, 43 insertions, 40 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 4e4f2b04dac2..06fcd8f9323c 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -245,6 +245,30 @@ static u32 get_cur_val(const struct cpumask *mask)
245 return cmd.val; 245 return cmd.val;
246} 246}
247 247
248struct perf_cur {
249 union {
250 struct {
251 u32 lo;
252 u32 hi;
253 } split;
254 u64 whole;
255 } aperf_cur, mperf_cur;
256};
257
258
259static long read_measured_perf_ctrs(void *_cur)
260{
261 struct perf_cur *cur = _cur;
262
263 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
264 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
265
266 wrmsr(MSR_IA32_APERF, 0, 0);
267 wrmsr(MSR_IA32_MPERF, 0, 0);
268
269 return 0;
270}
271
248/* 272/*
249 * Return the measured active (C0) frequency on this CPU since last call 273 * Return the measured active (C0) frequency on this CPU since last call
250 * to this function. 274 * to this function.
@@ -261,31 +285,12 @@ static u32 get_cur_val(const struct cpumask *mask)
261static unsigned int get_measured_perf(struct cpufreq_policy *policy, 285static unsigned int get_measured_perf(struct cpufreq_policy *policy,
262 unsigned int cpu) 286 unsigned int cpu)
263{ 287{
264 union { 288 struct perf_cur cur;
265 struct {
266 u32 lo;
267 u32 hi;
268 } split;
269 u64 whole;
270 } aperf_cur, mperf_cur;
271
272 cpumask_t saved_mask;
273 unsigned int perf_percent; 289 unsigned int perf_percent;
274 unsigned int retval; 290 unsigned int retval;
275 291
276 saved_mask = current->cpus_allowed; 292 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur))
277 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
278 if (get_cpu() != cpu) {
279 /* We were not able to run on requested processor */
280 put_cpu();
281 return 0; 293 return 0;
282 }
283
284 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
285 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
286
287 wrmsr(MSR_IA32_APERF, 0,0);
288 wrmsr(MSR_IA32_MPERF, 0,0);
289 294
290#ifdef __i386__ 295#ifdef __i386__
291 /* 296 /*
@@ -293,37 +298,39 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
293 * Get an approximate value. Return failure in case we cannot get 298 * Get an approximate value. Return failure in case we cannot get
294 * an approximate value. 299 * an approximate value.
295 */ 300 */
296 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { 301 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) {
297 int shift_count; 302 int shift_count;
298 u32 h; 303 u32 h;
299 304
300 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); 305 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi);
301 shift_count = fls(h); 306 shift_count = fls(h);
302 307
303 aperf_cur.whole >>= shift_count; 308 cur.aperf_cur.whole >>= shift_count;
304 mperf_cur.whole >>= shift_count; 309 cur.mperf_cur.whole >>= shift_count;
305 } 310 }
306 311
307 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { 312 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) {
308 int shift_count = 7; 313 int shift_count = 7;
309 aperf_cur.split.lo >>= shift_count; 314 cur.aperf_cur.split.lo >>= shift_count;
310 mperf_cur.split.lo >>= shift_count; 315 cur.mperf_cur.split.lo >>= shift_count;
311 } 316 }
312 317
313 if (aperf_cur.split.lo && mperf_cur.split.lo) 318 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo)
314 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; 319 perf_percent = (cur.aperf_cur.split.lo * 100) /
320 cur.mperf_cur.split.lo;
315 else 321 else
316 perf_percent = 0; 322 perf_percent = 0;
317 323
318#else 324#else
319 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { 325 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) {
320 int shift_count = 7; 326 int shift_count = 7;
321 aperf_cur.whole >>= shift_count; 327 cur.aperf_cur.whole >>= shift_count;
322 mperf_cur.whole >>= shift_count; 328 cur.mperf_cur.whole >>= shift_count;
323 } 329 }
324 330
325 if (aperf_cur.whole && mperf_cur.whole) 331 if (cur.aperf_cur.whole && cur.mperf_cur.whole)
326 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; 332 perf_percent = (cur.aperf_cur.whole * 100) /
333 cur.mperf_cur.whole;
327 else 334 else
328 perf_percent = 0; 335 perf_percent = 0;
329 336
@@ -331,10 +338,6 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
331 338
332 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 339 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100;
333 340
334 put_cpu();
335 set_cpus_allowed_ptr(current, &saved_mask);
336
337 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
338 return retval; 341 return retval;
339} 342}
340 343
@@ -352,7 +355,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
352 } 355 }
353 356
354 cached_freq = data->freq_table[data->acpi_data->state].frequency; 357 cached_freq = data->freq_table[data->acpi_data->state].frequency;
355 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); 358 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
356 if (freq != cached_freq) { 359 if (freq != cached_freq) {
357 /* 360 /*
358 * The dreaded BIOS frequency change behind our back. 361 * The dreaded BIOS frequency change behind our back.