diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-23 21:37:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-23 21:37:44 -0400 |
commit | 26dcce0fabbef75ae426461edf21b5030bad60f3 (patch) | |
tree | 56c64fa47dc29f7ea5a8fd0cab0459fb0a05a2bc /arch/x86/kernel/cpu | |
parent | d7b6de14a0ef8a376f9d57b867545b47302b7bfb (diff) | |
parent | eb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (diff) |
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits)
NR_CPUS: Replace NR_CPUS in speedstep-centrino.c
cpumask: Provide a generic set of CPUMASK_ALLOC macros, FIXUP
NR_CPUS: Replace NR_CPUS in cpufreq userspace routines
NR_CPUS: Replace per_cpu(..., smp_processor_id()) with __get_cpu_var
NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genapic_flat_64.c
NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genx2apic_uv_x.c
NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/proc.c
NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/mcheck/mce_64.c
cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c, fix
cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target
cpumask: Provide a generic set of CPUMASK_ALLOC macros
cpumask: Optimize cpumask_of_cpu in lib/smp_processor_id.c
cpumask: Optimize cpumask_of_cpu in kernel/time/tick-common.c
cpumask: Optimize cpumask_of_cpu in drivers/misc/sgi-xp/xpc_main.c
cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/ldt.c
cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/io_apic_64.c
cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr
Revert "cpumask: introduce new APIs"
cpumask: make for_each_cpu_mask a bit smaller
net: Pass reference to cpumask variable in net/sunrpc/svc.c
...
Fix up trivial conflicts in drivers/cpufreq/cpufreq.c manually
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 23 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 157 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/proc.c | 2 |
9 files changed, 135 insertions, 87 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9fa..ff2fff56f0a8 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd) | |||
200 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | { | 201 | { |
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | cpumask_of_cpu_ptr_declare(cpu_mask); | ||
203 | unsigned int i; | 204 | unsigned int i; |
204 | 205 | ||
205 | for_each_cpu_mask(i, cmd->mask) { | 206 | for_each_cpu_mask_nr(i, cmd->mask) { |
206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 207 | cpumask_of_cpu_ptr_next(cpu_mask, i); |
208 | set_cpus_allowed_ptr(current, cpu_mask); | ||
207 | do_drv_write(cmd); | 209 | do_drv_write(cmd); |
208 | } | 210 | } |
209 | 211 | ||
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
267 | } aperf_cur, mperf_cur; | 269 | } aperf_cur, mperf_cur; |
268 | 270 | ||
269 | cpumask_t saved_mask; | 271 | cpumask_t saved_mask; |
272 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
270 | unsigned int perf_percent; | 273 | unsigned int perf_percent; |
271 | unsigned int retval; | 274 | unsigned int retval; |
272 | 275 | ||
273 | saved_mask = current->cpus_allowed; | 276 | saved_mask = current->cpus_allowed; |
274 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 277 | set_cpus_allowed_ptr(current, cpu_mask); |
275 | if (get_cpu() != cpu) { | 278 | if (get_cpu() != cpu) { |
276 | /* We were not able to run on requested processor */ | 279 | /* We were not able to run on requested processor */ |
277 | put_cpu(); | 280 | put_cpu(); |
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
337 | 340 | ||
338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 341 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
339 | { | 342 | { |
343 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 344 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
341 | unsigned int freq; | 345 | unsigned int freq; |
342 | unsigned int cached_freq; | 346 | unsigned int cached_freq; |
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
349 | } | 353 | } |
350 | 354 | ||
351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 355 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
352 | freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); | 356 | freq = extract_freq(get_cur_val(cpu_mask), data); |
353 | if (freq != cached_freq) { | 357 | if (freq != cached_freq) { |
354 | /* | 358 | /* |
355 | * The dreaded BIOS frequency change behind our back. | 359 | * The dreaded BIOS frequency change behind our back. |
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
451 | 455 | ||
452 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 456 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
453 | freqs.new = data->freq_table[next_state].frequency; | 457 | freqs.new = data->freq_table[next_state].frequency; |
454 | for_each_cpu_mask(i, cmd.mask) { | 458 | for_each_cpu_mask_nr(i, cmd.mask) { |
455 | freqs.cpu = i; | 459 | freqs.cpu = i; |
456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 460 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
457 | } | 461 | } |
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
466 | } | 470 | } |
467 | } | 471 | } |
468 | 472 | ||
469 | for_each_cpu_mask(i, cmd.mask) { | 473 | for_each_cpu_mask_nr(i, cmd.mask) { |
470 | freqs.cpu = i; | 474 | freqs.cpu = i; |
471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 475 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
472 | } | 476 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5dc..f1685fb91fbd 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* notifiers */ | 124 | /* notifiers */ |
125 | for_each_cpu_mask(i, policy->cpus) { | 125 | for_each_cpu_mask_nr(i, policy->cpus) { |
126 | freqs.cpu = i; | 126 | freqs.cpu = i; |
127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
128 | } | 128 | } |
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | 130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software |
131 | * Developer's Manual, Volume 3 | 131 | * Developer's Manual, Volume 3 |
132 | */ | 132 | */ |
133 | for_each_cpu_mask(i, policy->cpus) | 133 | for_each_cpu_mask_nr(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | 134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); |
135 | 135 | ||
136 | /* notifiers */ | 136 | /* notifiers */ |
137 | for_each_cpu_mask(i, policy->cpus) { | 137 | for_each_cpu_mask_nr(i, policy->cpus) { |
138 | freqs.cpu = i; | 138 | freqs.cpu = i; |
139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
140 | } | 140 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 206791eb46e3..53c7b6936973 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi | |||
479 | static int check_supported_cpu(unsigned int cpu) | 479 | static int check_supported_cpu(unsigned int cpu) |
480 | { | 480 | { |
481 | cpumask_t oldmask; | 481 | cpumask_t oldmask; |
482 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
482 | u32 eax, ebx, ecx, edx; | 483 | u32 eax, ebx, ecx, edx; |
483 | unsigned int rc = 0; | 484 | unsigned int rc = 0; |
484 | 485 | ||
485 | oldmask = current->cpus_allowed; | 486 | oldmask = current->cpus_allowed; |
486 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 487 | set_cpus_allowed_ptr(current, cpu_mask); |
487 | 488 | ||
488 | if (smp_processor_id() != cpu) { | 489 | if (smp_processor_id() != cpu) { |
489 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | 490 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
966 | freqs.old = find_khz_freq_from_fid(data->currfid); | 967 | freqs.old = find_khz_freq_from_fid(data->currfid); |
967 | freqs.new = find_khz_freq_from_fid(fid); | 968 | freqs.new = find_khz_freq_from_fid(fid); |
968 | 969 | ||
969 | for_each_cpu_mask(i, *(data->available_cores)) { | 970 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
970 | freqs.cpu = i; | 971 | freqs.cpu = i; |
971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 972 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
972 | } | 973 | } |
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
974 | res = transition_fid_vid(data, fid, vid); | 975 | res = transition_fid_vid(data, fid, vid); |
975 | freqs.new = find_khz_freq_from_fid(data->currfid); | 976 | freqs.new = find_khz_freq_from_fid(data->currfid); |
976 | 977 | ||
977 | for_each_cpu_mask(i, *(data->available_cores)) { | 978 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
978 | freqs.cpu = i; | 979 | freqs.cpu = i; |
979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 980 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
980 | } | 981 | } |
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 998 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 999 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
999 | 1000 | ||
1000 | for_each_cpu_mask(i, *(data->available_cores)) { | 1001 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1001 | freqs.cpu = i; | 1002 | freqs.cpu = i; |
1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1003 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1003 | } | 1004 | } |
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1005 | res = transition_pstate(data, pstate); | 1006 | res = transition_pstate(data, pstate); |
1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1007 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1007 | 1008 | ||
1008 | for_each_cpu_mask(i, *(data->available_cores)) { | 1009 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1009 | freqs.cpu = i; | 1010 | freqs.cpu = i; |
1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1011 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1011 | } | 1012 | } |
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1016 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | 1017 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1017 | { | 1018 | { |
1018 | cpumask_t oldmask; | 1019 | cpumask_t oldmask; |
1020 | cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | ||
1019 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1021 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1020 | u32 checkfid; | 1022 | u32 checkfid; |
1021 | u32 checkvid; | 1023 | u32 checkvid; |
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1030 | 1032 | ||
1031 | /* only run on specific CPU from here on */ | 1033 | /* only run on specific CPU from here on */ |
1032 | oldmask = current->cpus_allowed; | 1034 | oldmask = current->cpus_allowed; |
1033 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1035 | set_cpus_allowed_ptr(current, cpu_mask); |
1034 | 1036 | ||
1035 | if (smp_processor_id() != pol->cpu) { | 1037 | if (smp_processor_id() != pol->cpu) { |
1036 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1038 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1105 | { | 1107 | { |
1106 | struct powernow_k8_data *data; | 1108 | struct powernow_k8_data *data; |
1107 | cpumask_t oldmask; | 1109 | cpumask_t oldmask; |
1110 | cpumask_of_cpu_ptr_declare(newmask); | ||
1108 | int rc; | 1111 | int rc; |
1109 | 1112 | ||
1110 | if (!cpu_online(pol->cpu)) | 1113 | if (!cpu_online(pol->cpu)) |
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1156 | 1159 | ||
1157 | /* only run on specific CPU from here on */ | 1160 | /* only run on specific CPU from here on */ |
1158 | oldmask = current->cpus_allowed; | 1161 | oldmask = current->cpus_allowed; |
1159 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1162 | cpumask_of_cpu_ptr_next(newmask, pol->cpu); |
1163 | set_cpus_allowed_ptr(current, newmask); | ||
1160 | 1164 | ||
1161 | if (smp_processor_id() != pol->cpu) { | 1165 | if (smp_processor_id() != pol->cpu) { |
1162 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1166 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1178 | set_cpus_allowed_ptr(current, &oldmask); | 1182 | set_cpus_allowed_ptr(current, &oldmask); |
1179 | 1183 | ||
1180 | if (cpu_family == CPU_HW_PSTATE) | 1184 | if (cpu_family == CPU_HW_PSTATE) |
1181 | pol->cpus = cpumask_of_cpu(pol->cpu); | 1185 | pol->cpus = *newmask; |
1182 | else | 1186 | else |
1183 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | 1187 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); |
1184 | data->available_cores = &(pol->cpus); | 1188 | data->available_cores = &(pol->cpus); |
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1244 | { | 1248 | { |
1245 | struct powernow_k8_data *data; | 1249 | struct powernow_k8_data *data; |
1246 | cpumask_t oldmask = current->cpus_allowed; | 1250 | cpumask_t oldmask = current->cpus_allowed; |
1251 | cpumask_of_cpu_ptr(newmask, cpu); | ||
1247 | unsigned int khz = 0; | 1252 | unsigned int khz = 0; |
1248 | unsigned int first; | 1253 | unsigned int first; |
1249 | 1254 | ||
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1253 | if (!data) | 1258 | if (!data) |
1254 | return -EINVAL; | 1259 | return -EINVAL; |
1255 | 1260 | ||
1256 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 1261 | set_cpus_allowed_ptr(current, newmask); |
1257 | if (smp_processor_id() != cpu) { | 1262 | if (smp_processor_id() != cpu) { |
1258 | printk(KERN_ERR PFX | 1263 | printk(KERN_ERR PFX |
1259 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1264 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67e..ca2ac13b7af2 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -28,7 +28,8 @@ | |||
28 | #define PFX "speedstep-centrino: " | 28 | #define PFX "speedstep-centrino: " |
29 | #define MAINTAINER "cpufreq@lists.linux.org.uk" | 29 | #define MAINTAINER "cpufreq@lists.linux.org.uk" |
30 | 30 | ||
31 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | 31 | #define dprintk(msg...) \ |
32 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | ||
32 | 33 | ||
33 | #define INTEL_MSR_RANGE (0xffff) | 34 | #define INTEL_MSR_RANGE (0xffff) |
34 | 35 | ||
@@ -66,11 +67,12 @@ struct cpu_model | |||
66 | 67 | ||
67 | struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ | 68 | struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ |
68 | }; | 69 | }; |
69 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); | 70 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, |
71 | const struct cpu_id *x); | ||
70 | 72 | ||
71 | /* Operating points for current CPU */ | 73 | /* Operating points for current CPU */ |
72 | static struct cpu_model *centrino_model[NR_CPUS]; | 74 | static DEFINE_PER_CPU(struct cpu_model *, centrino_model); |
73 | static const struct cpu_id *centrino_cpu[NR_CPUS]; | 75 | static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu); |
74 | 76 | ||
75 | static struct cpufreq_driver centrino_driver; | 77 | static struct cpufreq_driver centrino_driver; |
76 | 78 | ||
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
255 | return -ENOENT; | 257 | return -ENOENT; |
256 | } | 258 | } |
257 | 259 | ||
258 | centrino_model[policy->cpu] = model; | 260 | per_cpu(centrino_model, policy->cpu) = model; |
259 | 261 | ||
260 | dprintk("found \"%s\": max frequency: %dkHz\n", | 262 | dprintk("found \"%s\": max frequency: %dkHz\n", |
261 | model->model_name, model->max_freq); | 263 | model->model_name, model->max_freq); |
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
264 | } | 266 | } |
265 | 267 | ||
266 | #else | 268 | #else |
267 | static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } | 269 | static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) |
270 | { | ||
271 | return -ENODEV; | ||
272 | } | ||
268 | #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ | 273 | #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ |
269 | 274 | ||
270 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) | 275 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, |
276 | const struct cpu_id *x) | ||
271 | { | 277 | { |
272 | if ((c->x86 == x->x86) && | 278 | if ((c->x86 == x->x86) && |
273 | (c->x86_model == x->x86_model) && | 279 | (c->x86_model == x->x86_model) && |
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) | |||
286 | * for centrino, as some DSDTs are buggy. | 292 | * for centrino, as some DSDTs are buggy. |
287 | * Ideally, this can be done using the acpi_data structure. | 293 | * Ideally, this can be done using the acpi_data structure. |
288 | */ | 294 | */ |
289 | if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || | 295 | if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || |
290 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || | 296 | (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || |
291 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { | 297 | (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { |
292 | msr = (msr >> 8) & 0xff; | 298 | msr = (msr >> 8) & 0xff; |
293 | return msr * 100000; | 299 | return msr * 100000; |
294 | } | 300 | } |
295 | 301 | ||
296 | if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) | 302 | if ((!per_cpu(centrino_model, cpu)) || |
303 | (!per_cpu(centrino_model, cpu)->op_points)) | ||
297 | return 0; | 304 | return 0; |
298 | 305 | ||
299 | msr &= 0xffff; | 306 | msr &= 0xffff; |
300 | for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { | 307 | for (i = 0; |
301 | if (msr == centrino_model[cpu]->op_points[i].index) | 308 | per_cpu(centrino_model, cpu)->op_points[i].frequency |
302 | return centrino_model[cpu]->op_points[i].frequency; | 309 | != CPUFREQ_TABLE_END; |
310 | i++) { | ||
311 | if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) | ||
312 | return per_cpu(centrino_model, cpu)-> | ||
313 | op_points[i].frequency; | ||
303 | } | 314 | } |
304 | if (failsafe) | 315 | if (failsafe) |
305 | return centrino_model[cpu]->op_points[i-1].frequency; | 316 | return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; |
306 | else | 317 | else |
307 | return 0; | 318 | return 0; |
308 | } | 319 | } |
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
313 | unsigned l, h; | 324 | unsigned l, h; |
314 | unsigned clock_freq; | 325 | unsigned clock_freq; |
315 | cpumask_t saved_mask; | 326 | cpumask_t saved_mask; |
327 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
316 | 328 | ||
317 | saved_mask = current->cpus_allowed; | 329 | saved_mask = current->cpus_allowed; |
318 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 330 | set_cpus_allowed_ptr(current, new_mask); |
319 | if (smp_processor_id() != cpu) | 331 | if (smp_processor_id() != cpu) |
320 | return 0; | 332 | return 0; |
321 | 333 | ||
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
347 | int i; | 359 | int i; |
348 | 360 | ||
349 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ | 361 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ |
350 | if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) | 362 | if (cpu->x86_vendor != X86_VENDOR_INTEL || |
363 | !cpu_has(cpu, X86_FEATURE_EST)) | ||
351 | return -ENODEV; | 364 | return -ENODEV; |
352 | 365 | ||
353 | if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) | 366 | if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) |
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
361 | break; | 374 | break; |
362 | 375 | ||
363 | if (i != N_IDS) | 376 | if (i != N_IDS) |
364 | centrino_cpu[policy->cpu] = &cpu_ids[i]; | 377 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; |
365 | 378 | ||
366 | if (!centrino_cpu[policy->cpu]) { | 379 | if (!per_cpu(centrino_cpu, policy->cpu)) { |
367 | dprintk("found unsupported CPU with " | 380 | dprintk("found unsupported CPU with " |
368 | "Enhanced SpeedStep: send /proc/cpuinfo to " | 381 | "Enhanced SpeedStep: send /proc/cpuinfo to " |
369 | MAINTAINER "\n"); | 382 | MAINTAINER "\n"); |
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
386 | /* check to see if it stuck */ | 399 | /* check to see if it stuck */ |
387 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 400 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
388 | if (!(l & (1<<16))) { | 401 | if (!(l & (1<<16))) { |
389 | printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); | 402 | printk(KERN_INFO PFX |
403 | "couldn't enable Enhanced SpeedStep\n"); | ||
390 | return -ENODEV; | 404 | return -ENODEV; |
391 | } | 405 | } |
392 | } | 406 | } |
393 | 407 | ||
394 | freq = get_cur_freq(policy->cpu); | 408 | freq = get_cur_freq(policy->cpu); |
395 | 409 | policy->cpuinfo.transition_latency = 10000; | |
396 | policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ | 410 | /* 10uS transition latency */ |
397 | policy->cur = freq; | 411 | policy->cur = freq; |
398 | 412 | ||
399 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); | 413 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); |
400 | 414 | ||
401 | ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); | 415 | ret = cpufreq_frequency_table_cpuinfo(policy, |
416 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
402 | if (ret) | 417 | if (ret) |
403 | return (ret); | 418 | return (ret); |
404 | 419 | ||
405 | cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); | 420 | cpufreq_frequency_table_get_attr( |
421 | per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); | ||
406 | 422 | ||
407 | return 0; | 423 | return 0; |
408 | } | 424 | } |
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
411 | { | 427 | { |
412 | unsigned int cpu = policy->cpu; | 428 | unsigned int cpu = policy->cpu; |
413 | 429 | ||
414 | if (!centrino_model[cpu]) | 430 | if (!per_cpu(centrino_model, cpu)) |
415 | return -ENODEV; | 431 | return -ENODEV; |
416 | 432 | ||
417 | cpufreq_frequency_table_put_attr(cpu); | 433 | cpufreq_frequency_table_put_attr(cpu); |
418 | 434 | ||
419 | centrino_model[cpu] = NULL; | 435 | per_cpu(centrino_model, cpu) = NULL; |
420 | 436 | ||
421 | return 0; | 437 | return 0; |
422 | } | 438 | } |
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
430 | */ | 446 | */ |
431 | static int centrino_verify (struct cpufreq_policy *policy) | 447 | static int centrino_verify (struct cpufreq_policy *policy) |
432 | { | 448 | { |
433 | return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); | 449 | return cpufreq_frequency_table_verify(policy, |
450 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
434 | } | 451 | } |
435 | 452 | ||
436 | /** | 453 | /** |
437 | * centrino_setpolicy - set a new CPUFreq policy | 454 | * centrino_setpolicy - set a new CPUFreq policy |
438 | * @policy: new policy | 455 | * @policy: new policy |
439 | * @target_freq: the target frequency | 456 | * @target_freq: the target frequency |
440 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | 457 | * @relation: how that frequency relates to achieved frequency |
458 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
441 | * | 459 | * |
442 | * Sets a new CPUFreq policy. | 460 | * Sets a new CPUFreq policy. |
443 | */ | 461 | */ |
462 | struct allmasks { | ||
463 | cpumask_t online_policy_cpus; | ||
464 | cpumask_t saved_mask; | ||
465 | cpumask_t set_mask; | ||
466 | cpumask_t covered_cpus; | ||
467 | }; | ||
468 | |||
444 | static int centrino_target (struct cpufreq_policy *policy, | 469 | static int centrino_target (struct cpufreq_policy *policy, |
445 | unsigned int target_freq, | 470 | unsigned int target_freq, |
446 | unsigned int relation) | 471 | unsigned int relation) |
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
448 | unsigned int newstate = 0; | 473 | unsigned int newstate = 0; |
449 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | 474 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; |
450 | struct cpufreq_freqs freqs; | 475 | struct cpufreq_freqs freqs; |
451 | cpumask_t online_policy_cpus; | ||
452 | cpumask_t saved_mask; | ||
453 | cpumask_t set_mask; | ||
454 | cpumask_t covered_cpus; | ||
455 | int retval = 0; | 476 | int retval = 0; |
456 | unsigned int j, k, first_cpu, tmp; | 477 | unsigned int j, k, first_cpu, tmp; |
457 | 478 | CPUMASK_ALLOC(allmasks); | |
458 | if (unlikely(centrino_model[cpu] == NULL)) | 479 | CPUMASK_PTR(online_policy_cpus, allmasks); |
459 | return -ENODEV; | 480 | CPUMASK_PTR(saved_mask, allmasks); |
481 | CPUMASK_PTR(set_mask, allmasks); | ||
482 | CPUMASK_PTR(covered_cpus, allmasks); | ||
483 | |||
484 | if (unlikely(allmasks == NULL)) | ||
485 | return -ENOMEM; | ||
486 | |||
487 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { | ||
488 | retval = -ENODEV; | ||
489 | goto out; | ||
490 | } | ||
460 | 491 | ||
461 | if (unlikely(cpufreq_frequency_table_target(policy, | 492 | if (unlikely(cpufreq_frequency_table_target(policy, |
462 | centrino_model[cpu]->op_points, | 493 | per_cpu(centrino_model, cpu)->op_points, |
463 | target_freq, | 494 | target_freq, |
464 | relation, | 495 | relation, |
465 | &newstate))) { | 496 | &newstate))) { |
466 | return -EINVAL; | 497 | retval = -EINVAL; |
498 | goto out; | ||
467 | } | 499 | } |
468 | 500 | ||
469 | #ifdef CONFIG_HOTPLUG_CPU | 501 | #ifdef CONFIG_HOTPLUG_CPU |
470 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 502 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
471 | cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); | 503 | cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); |
472 | #else | 504 | #else |
473 | online_policy_cpus = policy->cpus; | 505 | *online_policy_cpus = policy->cpus; |
474 | #endif | 506 | #endif |
475 | 507 | ||
476 | saved_mask = current->cpus_allowed; | 508 | *saved_mask = current->cpus_allowed; |
477 | first_cpu = 1; | 509 | first_cpu = 1; |
478 | cpus_clear(covered_cpus); | 510 | cpus_clear(*covered_cpus); |
479 | for_each_cpu_mask(j, online_policy_cpus) { | 511 | for_each_cpu_mask_nr(j, *online_policy_cpus) { |
480 | /* | 512 | /* |
481 | * Support for SMP systems. | 513 | * Support for SMP systems. |
482 | * Make sure we are running on CPU that wants to change freq | 514 | * Make sure we are running on CPU that wants to change freq |
483 | */ | 515 | */ |
484 | cpus_clear(set_mask); | 516 | cpus_clear(*set_mask); |
485 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 517 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
486 | cpus_or(set_mask, set_mask, online_policy_cpus); | 518 | cpus_or(*set_mask, *set_mask, *online_policy_cpus); |
487 | else | 519 | else |
488 | cpu_set(j, set_mask); | 520 | cpu_set(j, *set_mask); |
489 | 521 | ||
490 | set_cpus_allowed_ptr(current, &set_mask); | 522 | set_cpus_allowed_ptr(current, set_mask); |
491 | preempt_disable(); | 523 | preempt_disable(); |
492 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | 524 | if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { |
493 | dprintk("couldn't limit to CPUs in this domain\n"); | 525 | dprintk("couldn't limit to CPUs in this domain\n"); |
494 | retval = -EAGAIN; | 526 | retval = -EAGAIN; |
495 | if (first_cpu) { | 527 | if (first_cpu) { |
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
500 | break; | 532 | break; |
501 | } | 533 | } |
502 | 534 | ||
503 | msr = centrino_model[cpu]->op_points[newstate].index; | 535 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; |
504 | 536 | ||
505 | if (first_cpu) { | 537 | if (first_cpu) { |
506 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 538 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 549 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
518 | target_freq, freqs.old, freqs.new, msr); | 550 | target_freq, freqs.old, freqs.new, msr); |
519 | 551 | ||
520 | for_each_cpu_mask(k, online_policy_cpus) { | 552 | for_each_cpu_mask_nr(k, *online_policy_cpus) { |
521 | freqs.cpu = k; | 553 | freqs.cpu = k; |
522 | cpufreq_notify_transition(&freqs, | 554 | cpufreq_notify_transition(&freqs, |
523 | CPUFREQ_PRECHANGE); | 555 | CPUFREQ_PRECHANGE); |
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
536 | break; | 568 | break; |
537 | } | 569 | } |
538 | 570 | ||
539 | cpu_set(j, covered_cpus); | 571 | cpu_set(j, *covered_cpus); |
540 | preempt_enable(); | 572 | preempt_enable(); |
541 | } | 573 | } |
542 | 574 | ||
543 | for_each_cpu_mask(k, online_policy_cpus) { | 575 | for_each_cpu_mask_nr(k, *online_policy_cpus) { |
544 | freqs.cpu = k; | 576 | freqs.cpu = k; |
545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 577 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
546 | } | 578 | } |
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
553 | * Best effort undo.. | 585 | * Best effort undo.. |
554 | */ | 586 | */ |
555 | 587 | ||
556 | if (!cpus_empty(covered_cpus)) { | 588 | if (!cpus_empty(*covered_cpus)) { |
557 | for_each_cpu_mask(j, covered_cpus) { | 589 | cpumask_of_cpu_ptr_declare(new_mask); |
558 | set_cpus_allowed_ptr(current, | 590 | |
559 | &cpumask_of_cpu(j)); | 591 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | cpumask_of_cpu_ptr_next(new_mask, j); | ||
593 | set_cpus_allowed_ptr(current, new_mask); | ||
560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 594 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
561 | } | 595 | } |
562 | } | 596 | } |
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
564 | tmp = freqs.new; | 598 | tmp = freqs.new; |
565 | freqs.new = freqs.old; | 599 | freqs.new = freqs.old; |
566 | freqs.old = tmp; | 600 | freqs.old = tmp; |
567 | for_each_cpu_mask(j, online_policy_cpus) { | 601 | for_each_cpu_mask_nr(j, *online_policy_cpus) { |
568 | freqs.cpu = j; | 602 | freqs.cpu = j; |
569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 603 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 604 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
571 | } | 605 | } |
572 | } | 606 | } |
573 | set_cpus_allowed_ptr(current, &saved_mask); | 607 | set_cpus_allowed_ptr(current, saved_mask); |
574 | return 0; | 608 | retval = 0; |
609 | goto out; | ||
575 | 610 | ||
576 | migrate_end: | 611 | migrate_end: |
577 | preempt_enable(); | 612 | preempt_enable(); |
578 | set_cpus_allowed_ptr(current, &saved_mask); | 613 | set_cpus_allowed_ptr(current, saved_mask); |
579 | return 0; | 614 | out: |
615 | CPUMASK_FREE(allmasks); | ||
616 | return retval; | ||
580 | } | 617 | } |
581 | 618 | ||
582 | static struct freq_attr* centrino_attr[] = { | 619 | static struct freq_attr* centrino_attr[] = { |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fdf..2f3728dc24f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) | |||
244 | 244 | ||
245 | static unsigned int speedstep_get(unsigned int cpu) | 245 | static unsigned int speedstep_get(unsigned int cpu) |
246 | { | 246 | { |
247 | return _speedstep_get(&cpumask_of_cpu(cpu)); | 247 | cpumask_of_cpu_ptr(newmask, cpu); |
248 | return _speedstep_get(newmask); | ||
248 | } | 249 | } |
249 | 250 | ||
250 | /** | 251 | /** |
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
279 | 280 | ||
280 | cpus_allowed = current->cpus_allowed; | 281 | cpus_allowed = current->cpus_allowed; |
281 | 282 | ||
282 | for_each_cpu_mask(i, policy->cpus) { | 283 | for_each_cpu_mask_nr(i, policy->cpus) { |
283 | freqs.cpu = i; | 284 | freqs.cpu = i; |
284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 285 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
285 | } | 286 | } |
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
292 | /* allow to be run on all CPUs */ | 293 | /* allow to be run on all CPUs */ |
293 | set_cpus_allowed_ptr(current, &cpus_allowed); | 294 | set_cpus_allowed_ptr(current, &cpus_allowed); |
294 | 295 | ||
295 | for_each_cpu_mask(i, policy->cpus) { | 296 | for_each_cpu_mask_nr(i, policy->cpus) { |
296 | freqs.cpu = i; | 297 | freqs.cpu = i; |
297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 298 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
298 | } | 299 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ff517f0b8cc4..650d40f7912b 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
489 | int sibling; | 489 | int sibling; |
490 | 490 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 491 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { | 492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 495 | } |
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
516 | unsigned long j; | 516 | unsigned long j; |
517 | int retval; | 517 | int retval; |
518 | cpumask_t oldmask; | 518 | cpumask_t oldmask; |
519 | cpumask_of_cpu_ptr(newmask, cpu); | ||
519 | 520 | ||
520 | if (num_cache_leaves == 0) | 521 | if (num_cache_leaves == 0) |
521 | return -ENOENT; | 522 | return -ENOENT; |
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
526 | return -ENOMEM; | 527 | return -ENOMEM; |
527 | 528 | ||
528 | oldmask = current->cpus_allowed; | 529 | oldmask = current->cpus_allowed; |
529 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 530 | retval = set_cpus_allowed_ptr(current, newmask); |
530 | if (retval) | 531 | if (retval) |
531 | goto out; | 532 | goto out; |
532 | 533 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 9ab65be82427..65a339678ece 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
580 | char __user *buf = ubuf; | 580 | char __user *buf = ubuf; |
581 | int i, err; | 581 | int i, err; |
582 | 582 | ||
583 | cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); | 583 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
584 | if (!cpu_tsc) | 584 | if (!cpu_tsc) |
585 | return -ENOMEM; | 585 | return -ENOMEM; |
586 | 586 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e1193..88736cadbaa6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | if (err) | 527 | if (err) |
528 | goto out_free; | 528 | goto out_free; |
529 | 529 | ||
530 | for_each_cpu_mask(i, b->cpus) { | 530 | for_each_cpu_mask_nr(i, b->cpus) { |
531 | if (i == cpu) | 531 | if (i == cpu) |
532 | continue; | 532 | continue; |
533 | 533 | ||
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
617 | #endif | 617 | #endif |
618 | 618 | ||
619 | /* remove all sibling symlinks before unregistering */ | 619 | /* remove all sibling symlinks before unregistering */ |
620 | for_each_cpu_mask(i, b->cpus) { | 620 | for_each_cpu_mask_nr(i, b->cpus) { |
621 | if (i == cpu) | 621 | if (i == cpu) |
622 | continue; | 622 | continue; |
623 | 623 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 0d0d9057e7c0..a26c480b9491 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
160 | { | 160 | { |
161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
162 | *pos = first_cpu(cpu_online_map); | 162 | *pos = first_cpu(cpu_online_map); |
163 | if ((*pos) < NR_CPUS && cpu_online(*pos)) | 163 | if ((*pos) < nr_cpu_ids && cpu_online(*pos)) |
164 | return &cpu_data(*pos); | 164 | return &cpu_data(*pos); |
165 | return NULL; | 165 | return NULL; |
166 | } | 166 | } |