aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-04 08:18:08 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 03:05:37 -0500
commit4d8bb5374924fc736ce275bfd1619b87a2106860 (patch)
tree31c6068da861067ce17c673757e1b9f719e8ebd6 /arch/x86/kernel/cpu
parentc74f31c035f46a095a0c72f80246a65b314205a5 (diff)
cpumask: use cpumask_var_t in acpi-cpufreq.c
Impact: cleanup, reduce stack usage, use new cpumask API. Replace the cpumask_t in struct drv_cmd with a cpumask_var_t. Remove unneeded online_policy_cpus cpumask_t in acpi_cpufreq_target. Update refs to use new cpumask API. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 0b31939862d6..fb594170dc53 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -145,7 +145,7 @@ typedef union {
145 145
146struct drv_cmd { 146struct drv_cmd {
147 unsigned int type; 147 unsigned int type;
148 cpumask_t mask; 148 cpumask_var_t mask;
149 drv_addr_union addr; 149 drv_addr_union addr;
150 u32 val; 150 u32 val;
151}; 151};
@@ -193,7 +193,7 @@ static void drv_read(struct drv_cmd *cmd)
193 cpumask_t saved_mask = current->cpus_allowed; 193 cpumask_t saved_mask = current->cpus_allowed;
194 cmd->val = 0; 194 cmd->val = 0;
195 195
196 set_cpus_allowed_ptr(current, &cmd->mask); 196 set_cpus_allowed_ptr(current, cmd->mask);
197 do_drv_read(cmd); 197 do_drv_read(cmd);
198 set_cpus_allowed_ptr(current, &saved_mask); 198 set_cpus_allowed_ptr(current, &saved_mask);
199} 199}
@@ -203,8 +203,8 @@ static void drv_write(struct drv_cmd *cmd)
203 cpumask_t saved_mask = current->cpus_allowed; 203 cpumask_t saved_mask = current->cpus_allowed;
204 unsigned int i; 204 unsigned int i;
205 205
206 for_each_cpu_mask_nr(i, cmd->mask) { 206 for_each_cpu(i, cmd->mask) {
207 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 207 set_cpus_allowed_ptr(current, cpumask_of(i));
208 do_drv_write(cmd); 208 do_drv_write(cmd);
209 } 209 }
210 210
@@ -212,22 +212,22 @@ static void drv_write(struct drv_cmd *cmd)
212 return; 212 return;
213} 213}
214 214
215static u32 get_cur_val(const cpumask_t *mask) 215static u32 get_cur_val(const struct cpumask *mask)
216{ 216{
217 struct acpi_processor_performance *perf; 217 struct acpi_processor_performance *perf;
218 struct drv_cmd cmd; 218 struct drv_cmd cmd;
219 219
220 if (unlikely(cpus_empty(*mask))) 220 if (unlikely(cpumask_empty(mask)))
221 return 0; 221 return 0;
222 222
223 switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { 223 switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
224 case SYSTEM_INTEL_MSR_CAPABLE: 224 case SYSTEM_INTEL_MSR_CAPABLE:
225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
227 break; 227 break;
228 case SYSTEM_IO_CAPABLE: 228 case SYSTEM_IO_CAPABLE:
229 cmd.type = SYSTEM_IO_CAPABLE; 229 cmd.type = SYSTEM_IO_CAPABLE;
230 perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; 230 perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
231 cmd.addr.io.port = perf->control_register.address; 231 cmd.addr.io.port = perf->control_register.address;
232 cmd.addr.io.bit_width = perf->control_register.bit_width; 232 cmd.addr.io.bit_width = perf->control_register.bit_width;
233 break; 233 break;
@@ -235,7 +235,7 @@ static u32 get_cur_val(const cpumask_t *mask)
235 return 0; 235 return 0;
236 } 236 }
237 237
238 cmd.mask = *mask; 238 cpumask_copy(cmd.mask, mask);
239 239
240 drv_read(&cmd); 240 drv_read(&cmd);
241 241
@@ -386,7 +386,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
387 struct acpi_processor_performance *perf; 387 struct acpi_processor_performance *perf;
388 struct cpufreq_freqs freqs; 388 struct cpufreq_freqs freqs;
389 cpumask_t online_policy_cpus;
390 struct drv_cmd cmd; 389 struct drv_cmd cmd;
391 unsigned int next_state = 0; /* Index into freq_table */ 390 unsigned int next_state = 0; /* Index into freq_table */
392 unsigned int next_perf_state = 0; /* Index into perf table */ 391 unsigned int next_perf_state = 0; /* Index into perf table */
@@ -401,20 +400,18 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
401 return -ENODEV; 400 return -ENODEV;
402 } 401 }
403 402
403 if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
404 return -ENOMEM;
405
404 perf = data->acpi_data; 406 perf = data->acpi_data;
405 result = cpufreq_frequency_table_target(policy, 407 result = cpufreq_frequency_table_target(policy,
406 data->freq_table, 408 data->freq_table,
407 target_freq, 409 target_freq,
408 relation, &next_state); 410 relation, &next_state);
409 if (unlikely(result)) 411 if (unlikely(result)) {
410 return -ENODEV; 412 result = -ENODEV;
411 413 goto out;
412#ifdef CONFIG_HOTPLUG_CPU 414 }
413 /* cpufreq holds the hotplug lock, so we are safe from here on */
414 cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus);
415#else
416 online_policy_cpus = policy->cpus;
417#endif
418 415
419 next_perf_state = data->freq_table[next_state].index; 416 next_perf_state = data->freq_table[next_state].index;
420 if (perf->state == next_perf_state) { 417 if (perf->state == next_perf_state) {
@@ -425,7 +422,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
425 } else { 422 } else {
426 dprintk("Already at target state (P%d)\n", 423 dprintk("Already at target state (P%d)\n",
427 next_perf_state); 424 next_perf_state);
428 return 0; 425 goto out;
429 } 426 }
430 } 427 }
431 428
@@ -444,19 +441,19 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
444 cmd.val = (u32) perf->states[next_perf_state].control; 441 cmd.val = (u32) perf->states[next_perf_state].control;
445 break; 442 break;
446 default: 443 default:
447 return -ENODEV; 444 result = -ENODEV;
445 goto out;
448 } 446 }
449 447
450 cpus_clear(cmd.mask); 448 /* cpufreq holds the hotplug lock, so we are safe from here on */
451
452 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 449 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
453 cmd.mask = online_policy_cpus; 450 cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
454 else 451 else
455 cpu_set(policy->cpu, cmd.mask); 452 cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
456 453
457 freqs.old = perf->states[perf->state].core_frequency * 1000; 454 freqs.old = perf->states[perf->state].core_frequency * 1000;
458 freqs.new = data->freq_table[next_state].frequency; 455 freqs.new = data->freq_table[next_state].frequency;
459 for_each_cpu_mask_nr(i, cmd.mask) { 456 for_each_cpu(i, cmd.mask) {
460 freqs.cpu = i; 457 freqs.cpu = i;
461 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 458 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
462 } 459 }
@@ -464,19 +461,22 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
464 drv_write(&cmd); 461 drv_write(&cmd);
465 462
466 if (acpi_pstate_strict) { 463 if (acpi_pstate_strict) {
467 if (!check_freqs(&cmd.mask, freqs.new, data)) { 464 if (!check_freqs(cmd.mask, freqs.new, data)) {
468 dprintk("acpi_cpufreq_target failed (%d)\n", 465 dprintk("acpi_cpufreq_target failed (%d)\n",
469 policy->cpu); 466 policy->cpu);
470 return -EAGAIN; 467 result = -EAGAIN;
468 goto out;
471 } 469 }
472 } 470 }
473 471
474 for_each_cpu_mask_nr(i, cmd.mask) { 472 for_each_cpu(i, cmd.mask) {
475 freqs.cpu = i; 473 freqs.cpu = i;
476 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 474 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
477 } 475 }
478 perf->state = next_perf_state; 476 perf->state = next_perf_state;
479 477
478out:
479 free_cpumask_var(cmd.mask);
480 return result; 480 return result;
481} 481}
482 482