aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:50 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:50 -0400
commit7ad728f98162cb1af06a85b2a5fc422dddd4fb78 (patch)
tree85a326e35ff5d37d89aa7a687a623cded6fcb190 /arch/x86/kernel/cpu
parentfcef8576d8a64fc603e719c97d423f9f6d4e0e8b (diff)
cpumask: x86: convert cpu_sibling_map/cpu_core_map to cpumask_var_t
Impact: reduce per-cpu size for CONFIG_CPUMASK_OFFSTACK=y In most places it's cleaner to use the accessors cpu_sibling_mask() and cpu_core_mask() wrappers which already exist. I couldn't avoid cleaning up the access in oprofile, either. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c6
-rw-r--r--arch/x86/kernel/cpu/proc.c2
5 files changed, 14 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 3178c3acd97e..d8341d17c189 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
203 unsigned int i; 203 unsigned int i;
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 206 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
207#endif 207#endif
208 208
209 /* Errata workaround */ 209 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 6428aa17b40e..e8fd76f98883 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -56,7 +56,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
56static int cpu_family = CPU_OPTERON; 56static int cpu_family = CPU_OPTERON;
57 57
58#ifndef CONFIG_SMP 58#ifndef CONFIG_SMP
59DEFINE_PER_CPU(cpumask_t, cpu_core_map); 59static inline const struct cpumask *cpu_core_mask(int cpu)
60{
61 return cpumask_of(0);
62}
60#endif 63#endif
61 64
62/* Return a frequency in MHz, given an input fid */ 65/* Return a frequency in MHz, given an input fid */
@@ -654,7 +657,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
654 657
655 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 658 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
656 data->powernow_table = powernow_table; 659 data->powernow_table = powernow_table;
657 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 660 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
658 print_basics(data); 661 print_basics(data);
659 662
660 for (j = 0; j < data->numps; j++) 663 for (j = 0; j < data->numps; j++)
@@ -808,7 +811,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
808 811
809 /* fill in data */ 812 /* fill in data */
810 data->numps = data->acpi_data.state_count; 813 data->numps = data->acpi_data.state_count;
811 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 814 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
812 print_basics(data); 815 print_basics(data);
813 powernow_k8_acpi_pst_values(data, 0); 816 powernow_k8_acpi_pst_values(data, 0);
814 817
@@ -1224,7 +1227,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1224 if (cpu_family == CPU_HW_PSTATE) 1227 if (cpu_family == CPU_HW_PSTATE)
1225 cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); 1228 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1226 else 1229 else
1227 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1230 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1228 data->available_cores = pol->cpus; 1231 data->available_cores = pol->cpus;
1229 1232
1230 if (cpu_family == CPU_HW_PSTATE) 1233 if (cpu_family == CPU_HW_PSTATE)
@@ -1286,7 +1289,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1286 unsigned int khz = 0; 1289 unsigned int khz = 0;
1287 unsigned int first; 1290 unsigned int first;
1288 1291
1289 first = first_cpu(per_cpu(cpu_core_map, cpu)); 1292 first = cpumask_first(cpu_core_mask(cpu));
1290 data = per_cpu(powernow_data, first); 1293 data = per_cpu(powernow_data, first);
1291 1294
1292 if (!data) 1295 if (!data)
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index dedc1e98f168..1f0ec83d343b 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 325 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index c5a32f92d07e..1f429ee3477d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -477,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
477 477
478#ifdef CONFIG_SMP 478#ifdef CONFIG_SMP
479 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 479 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
480 i = cpumask_first(&per_cpu(cpu_core_map, cpu)); 480 i = cpumask_first(cpu_core_mask(cpu));
481 481
482 /* first core not up yet */ 482 /* first core not up yet */
483 if (cpu_data(i).cpu_core_id) 483 if (cpu_data(i).cpu_core_id)
@@ -497,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
497 if (err) 497 if (err)
498 goto out; 498 goto out;
499 499
500 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); 500 cpumask_copy(b->cpus, cpu_core_mask(cpu));
501 per_cpu(threshold_banks, cpu)[bank] = b; 501 per_cpu(threshold_banks, cpu)[bank] = b;
502 goto out; 502 goto out;
503 } 503 }
@@ -521,7 +521,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
521#ifndef CONFIG_SMP 521#ifndef CONFIG_SMP
522 cpumask_setall(b->cpus); 522 cpumask_setall(b->cpus);
523#else 523#else
524 cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); 524 cpumask_copy(b->cpus, cpu_core_mask(cpu));
525#endif 525#endif
526 526
527 per_cpu(threshold_banks, cpu)[bank] = b; 527 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index d67e0e48bc2d..4dd610e226e0 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpus_weight(per_cpu(cpu_core_map, cpu))); 17 cpumask_weight(cpu_sibling_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);