diff options
author | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:24:50 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:24:50 -0500 |
commit | bac30d1a78d0f11c613968fc8b351a91ed465386 (patch) | |
tree | e52f3c876522a2f6047a6ec1c27df2e8a79486b8 /arch/i386 | |
parent | e8222502ee6157e2713da9e0792c21f4ad458d50 (diff) | |
parent | ca9ba4471c1203bb6e759b76e83167fec54fe590 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 9 | ||||
-rw-r--r-- | arch/i386/boot/video.S | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.h | 4 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 77 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 17 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 24 | ||||
-rw-r--r-- | arch/i386/kernel/syscall_table.S | 2 | ||||
-rw-r--r-- | arch/i386/kernel/timers/timer_pm.c | 104 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 17 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/discontig.c | 12 | ||||
-rw-r--r-- | arch/i386/mm/pgtable.c | 2 | ||||
-rw-r--r-- | arch/i386/oprofile/nmi_int.c | 2 |
18 files changed, 213 insertions, 92 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index f7db71d0b913..f17bd1d2707e 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -231,6 +231,15 @@ config SCHED_SMT | |||
231 | cost of slightly increased overhead in some places. If unsure say | 231 | cost of slightly increased overhead in some places. If unsure say |
232 | N here. | 232 | N here. |
233 | 233 | ||
234 | config SCHED_MC | ||
235 | bool "Multi-core scheduler support" | ||
236 | depends on SMP | ||
237 | default y | ||
238 | help | ||
239 | Multi-core scheduler support improves the CPU scheduler's decision | ||
240 | making when dealing with multi-core CPU chips at a cost of slightly | ||
241 | increased overhead in some places. If unsure say N here. | ||
242 | |||
234 | source "kernel/Kconfig.preempt" | 243 | source "kernel/Kconfig.preempt" |
235 | 244 | ||
236 | config X86_UP_APIC | 245 | config X86_UP_APIC |
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 2ac40c8244c4..0000a2674537 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S | |||
@@ -1924,6 +1924,7 @@ skip10: movb %ah, %al | |||
1924 | ret | 1924 | ret |
1925 | 1925 | ||
1926 | store_edid: | 1926 | store_edid: |
1927 | #ifdef CONFIG_FB_FIRMWARE_EDID | ||
1927 | pushw %es # just save all registers | 1928 | pushw %es # just save all registers |
1928 | pushw %ax | 1929 | pushw %ax |
1929 | pushw %bx | 1930 | pushw %bx |
@@ -1954,6 +1955,7 @@ store_edid: | |||
1954 | popw %bx | 1955 | popw %bx |
1955 | popw %ax | 1956 | popw %ax |
1956 | popw %es | 1957 | popw %es |
1958 | #endif | ||
1957 | ret | 1959 | ret |
1958 | 1960 | ||
1959 | # VIDEO_SELECT-only variables | 1961 | # VIDEO_SELECT-only variables |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 7e3d6b6a4e96..a06a49075f10 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -266,7 +266,7 @@ static void __init early_cpu_detect(void) | |||
266 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) | 266 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
267 | { | 267 | { |
268 | u32 tfms, xlvl; | 268 | u32 tfms, xlvl; |
269 | int junk; | 269 | int ebx; |
270 | 270 | ||
271 | if (have_cpuid_p()) { | 271 | if (have_cpuid_p()) { |
272 | /* Get vendor name */ | 272 | /* Get vendor name */ |
@@ -282,7 +282,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
282 | /* Intel-defined flags: level 0x00000001 */ | 282 | /* Intel-defined flags: level 0x00000001 */ |
283 | if ( c->cpuid_level >= 0x00000001 ) { | 283 | if ( c->cpuid_level >= 0x00000001 ) { |
284 | u32 capability, excap; | 284 | u32 capability, excap; |
285 | cpuid(0x00000001, &tfms, &junk, &excap, &capability); | 285 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
286 | c->x86_capability[0] = capability; | 286 | c->x86_capability[0] = capability; |
287 | c->x86_capability[4] = excap; | 287 | c->x86_capability[4] = excap; |
288 | c->x86 = (tfms >> 8) & 15; | 288 | c->x86 = (tfms >> 8) & 15; |
@@ -292,6 +292,11 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
292 | if (c->x86 >= 0x6) | 292 | if (c->x86 >= 0x6) |
293 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 293 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
294 | c->x86_mask = tfms & 15; | 294 | c->x86_mask = tfms & 15; |
295 | #ifdef CONFIG_SMP | ||
296 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | ||
297 | #else | ||
298 | c->apicid = (ebx >> 24) & 0xFF; | ||
299 | #endif | ||
295 | } else { | 300 | } else { |
296 | /* Have CPUID level 0 only - unheard of */ | 301 | /* Have CPUID level 0 only - unheard of */ |
297 | c->x86 = 4; | 302 | c->x86 = 4; |
@@ -474,7 +479,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
474 | 479 | ||
475 | cpuid(1, &eax, &ebx, &ecx, &edx); | 480 | cpuid(1, &eax, &ebx, &ecx, &edx); |
476 | 481 | ||
477 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | ||
478 | 482 | ||
479 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 483 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
480 | return; | 484 | return; |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 1e70823e1cb5..712a26bd4457 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1095,10 +1095,15 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) | |||
1095 | 1095 | ||
1096 | static unsigned int powernowk8_get (unsigned int cpu) | 1096 | static unsigned int powernowk8_get (unsigned int cpu) |
1097 | { | 1097 | { |
1098 | struct powernow_k8_data *data = powernow_data[cpu]; | 1098 | struct powernow_k8_data *data; |
1099 | cpumask_t oldmask = current->cpus_allowed; | 1099 | cpumask_t oldmask = current->cpus_allowed; |
1100 | unsigned int khz = 0; | 1100 | unsigned int khz = 0; |
1101 | 1101 | ||
1102 | data = powernow_data[first_cpu(cpu_core_map[cpu])]; | ||
1103 | |||
1104 | if (!data) | ||
1105 | return -EINVAL; | ||
1106 | |||
1102 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 1107 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
1103 | if (smp_processor_id() != cpu) { | 1108 | if (smp_processor_id() != cpu) { |
1104 | printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1109 | printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h index 00ea899c17e1..79a7c5c87edc 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -182,10 +182,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); | |||
182 | 182 | ||
183 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); | 183 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); |
184 | 184 | ||
185 | #ifndef for_each_cpu_mask | ||
186 | #define for_each_cpu_mask(i,mask) for (i=0;i<1;i++) | ||
187 | #endif | ||
188 | |||
189 | #ifdef CONFIG_SMP | 185 | #ifdef CONFIG_SMP |
190 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | 186 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) |
191 | { | 187 | { |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ce61921369e5..9df87b03612c 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
173 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 173 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ |
174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
176 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | ||
177 | #ifdef CONFIG_SMP | ||
178 | unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); | ||
179 | #endif | ||
176 | 180 | ||
177 | if (c->cpuid_level > 3) { | 181 | if (c->cpuid_level > 3) { |
178 | static int is_initialized; | 182 | static int is_initialized; |
@@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
205 | break; | 209 | break; |
206 | case 2: | 210 | case 2: |
207 | new_l2 = this_leaf.size/1024; | 211 | new_l2 = this_leaf.size/1024; |
212 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | ||
213 | index_msb = get_count_order(num_threads_sharing); | ||
214 | l2_id = c->apicid >> index_msb; | ||
208 | break; | 215 | break; |
209 | case 3: | 216 | case 3: |
210 | new_l3 = this_leaf.size/1024; | 217 | new_l3 = this_leaf.size/1024; |
218 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | ||
219 | index_msb = get_count_order(num_threads_sharing); | ||
220 | l3_id = c->apicid >> index_msb; | ||
211 | break; | 221 | break; |
212 | default: | 222 | default: |
213 | break; | 223 | break; |
@@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
215 | } | 225 | } |
216 | } | 226 | } |
217 | } | 227 | } |
218 | if (c->cpuid_level > 1) { | 228 | /* |
229 | * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for | ||
230 | * trace cache | ||
231 | */ | ||
232 | if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { | ||
219 | /* supports eax=2 call */ | 233 | /* supports eax=2 call */ |
220 | int i, j, n; | 234 | int i, j, n; |
221 | int regs[4]; | 235 | int regs[4]; |
222 | unsigned char *dp = (unsigned char *)regs; | 236 | unsigned char *dp = (unsigned char *)regs; |
237 | int only_trace = 0; | ||
238 | |||
239 | if (num_cache_leaves != 0 && c->x86 == 15) | ||
240 | only_trace = 1; | ||
223 | 241 | ||
224 | /* Number of times to iterate */ | 242 | /* Number of times to iterate */ |
225 | n = cpuid_eax(2) & 0xFF; | 243 | n = cpuid_eax(2) & 0xFF; |
@@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
241 | while (cache_table[k].descriptor != 0) | 259 | while (cache_table[k].descriptor != 0) |
242 | { | 260 | { |
243 | if (cache_table[k].descriptor == des) { | 261 | if (cache_table[k].descriptor == des) { |
262 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | ||
263 | break; | ||
244 | switch (cache_table[k].cache_type) { | 264 | switch (cache_table[k].cache_type) { |
245 | case LVL_1_INST: | 265 | case LVL_1_INST: |
246 | l1i += cache_table[k].size; | 266 | l1i += cache_table[k].size; |
@@ -266,34 +286,45 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
266 | } | 286 | } |
267 | } | 287 | } |
268 | } | 288 | } |
289 | } | ||
269 | 290 | ||
270 | if (new_l1d) | 291 | if (new_l1d) |
271 | l1d = new_l1d; | 292 | l1d = new_l1d; |
272 | 293 | ||
273 | if (new_l1i) | 294 | if (new_l1i) |
274 | l1i = new_l1i; | 295 | l1i = new_l1i; |
275 | 296 | ||
276 | if (new_l2) | 297 | if (new_l2) { |
277 | l2 = new_l2; | 298 | l2 = new_l2; |
299 | #ifdef CONFIG_SMP | ||
300 | cpu_llc_id[cpu] = l2_id; | ||
301 | #endif | ||
302 | } | ||
278 | 303 | ||
279 | if (new_l3) | 304 | if (new_l3) { |
280 | l3 = new_l3; | 305 | l3 = new_l3; |
306 | #ifdef CONFIG_SMP | ||
307 | cpu_llc_id[cpu] = l3_id; | ||
308 | #endif | ||
309 | } | ||
281 | 310 | ||
282 | if ( trace ) | 311 | if (trace) |
283 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | 312 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); |
284 | else if ( l1i ) | 313 | else if ( l1i ) |
285 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | 314 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); |
286 | if ( l1d ) | ||
287 | printk(", L1 D cache: %dK\n", l1d); | ||
288 | else | ||
289 | printk("\n"); | ||
290 | if ( l2 ) | ||
291 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
292 | if ( l3 ) | ||
293 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
294 | 315 | ||
295 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | 316 | if (l1d) |
296 | } | 317 | printk(", L1 D cache: %dK\n", l1d); |
318 | else | ||
319 | printk("\n"); | ||
320 | |||
321 | if (l2) | ||
322 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
323 | |||
324 | if (l3) | ||
325 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
326 | |||
327 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | ||
297 | 328 | ||
298 | return l2; | 329 | return l2; |
299 | } | 330 | } |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 311b4e7266f1..3b329af4afc5 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -381,7 +381,7 @@ static void do_irq_balance(void) | |||
381 | unsigned long imbalance = 0; | 381 | unsigned long imbalance = 0; |
382 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
383 | 383 | ||
384 | for_each_cpu(i) { | 384 | for_each_possible_cpu(i) { |
385 | int package_index; | 385 | int package_index; |
386 | CPU_IRQ(i) = 0; | 386 | CPU_IRQ(i) = 0; |
387 | if (!cpu_online(i)) | 387 | if (!cpu_online(i)) |
@@ -632,7 +632,7 @@ static int __init balanced_irq_init(void) | |||
632 | else | 632 | else |
633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | 633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); |
634 | failed: | 634 | failed: |
635 | for_each_cpu(i) { | 635 | for_each_possible_cpu(i) { |
636 | kfree(irq_cpu_data[i].irq_delta); | 636 | kfree(irq_cpu_data[i].irq_delta); |
637 | irq_cpu_data[i].irq_delta = NULL; | 637 | irq_cpu_data[i].irq_delta = NULL; |
638 | kfree(irq_cpu_data[i].last_irq); | 638 | kfree(irq_cpu_data[i].last_irq); |
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index dd780a00553f..e7c138f66c5a 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c | |||
@@ -459,26 +459,9 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
461 | 461 | ||
462 | static int microcode_ioctl (struct inode *inode, struct file *file, | ||
463 | unsigned int cmd, unsigned long arg) | ||
464 | { | ||
465 | switch (cmd) { | ||
466 | /* | ||
467 | * XXX: will be removed after microcode_ctl | ||
468 | * is updated to ignore failure of this ioctl() | ||
469 | */ | ||
470 | case MICROCODE_IOCFREE: | ||
471 | return 0; | ||
472 | default: | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | |||
478 | static struct file_operations microcode_fops = { | 462 | static struct file_operations microcode_fops = { |
479 | .owner = THIS_MODULE, | 463 | .owner = THIS_MODULE, |
480 | .write = microcode_write, | 464 | .write = microcode_write, |
481 | .ioctl = microcode_ioctl, | ||
482 | .open = microcode_open, | 465 | .open = microcode_open, |
483 | }; | 466 | }; |
484 | 467 | ||
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 9074818b9473..d43b498ec745 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void) | |||
138 | if (nmi_watchdog == NMI_LOCAL_APIC) | 138 | if (nmi_watchdog == NMI_LOCAL_APIC) |
139 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 139 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); |
140 | 140 | ||
141 | for_each_cpu(cpu) | 141 | for_each_possible_cpu(cpu) |
142 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 142 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; |
143 | local_irq_enable(); | 143 | local_irq_enable(); |
144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
145 | 145 | ||
146 | for_each_cpu(cpu) { | 146 | for_each_possible_cpu(cpu) { |
147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
148 | /* Check cpu_callin_map here because that is set | 148 | /* Check cpu_callin_map here because that is set |
149 | after the timer is started. */ | 149 | after the timer is started. */ |
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) | |||
510 | * Just reset the alert counters, (other CPUs might be | 510 | * Just reset the alert counters, (other CPUs might be |
511 | * spinning on locks we hold): | 511 | * spinning on locks we hold): |
512 | */ | 512 | */ |
513 | for_each_cpu(i) | 513 | for_each_possible_cpu(i) |
514 | alert_counter[i] = 0; | 514 | alert_counter[i] = 0; |
515 | 515 | ||
516 | /* | 516 | /* |
@@ -529,7 +529,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
529 | * always switch the stack NMI-atomically, it's safe to use | 529 | * always switch the stack NMI-atomically, it's safe to use |
530 | * smp_processor_id(). | 530 | * smp_processor_id(). |
531 | */ | 531 | */ |
532 | int sum, cpu = smp_processor_id(); | 532 | unsigned int sum; |
533 | int cpu = smp_processor_id(); | ||
533 | 534 | ||
534 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; | 535 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; |
535 | 536 | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 6917daa159ab..8c08660b4e5d 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/kexec.h> | 46 | #include <linux/kexec.h> |
47 | #include <linux/crash_dump.h> | 47 | #include <linux/crash_dump.h> |
48 | #include <linux/dmi.h> | 48 | #include <linux/dmi.h> |
49 | #include <linux/pfn.h> | ||
49 | 50 | ||
50 | #include <video/edid.h> | 51 | #include <video/edid.h> |
51 | 52 | ||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 82371d83bfa9..a6969903f2d6 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -72,6 +72,9 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; | |||
72 | /* Core ID of each logical CPU */ | 72 | /* Core ID of each logical CPU */ |
73 | int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; | 73 | int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; |
74 | 74 | ||
75 | /* Last level cache ID of each logical CPU */ | ||
76 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; | ||
77 | |||
75 | /* representing HT siblings of each logical CPU */ | 78 | /* representing HT siblings of each logical CPU */ |
76 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; | 79 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; |
77 | EXPORT_SYMBOL(cpu_sibling_map); | 80 | EXPORT_SYMBOL(cpu_sibling_map); |
@@ -440,6 +443,18 @@ static void __devinit smp_callin(void) | |||
440 | 443 | ||
441 | static int cpucount; | 444 | static int cpucount; |
442 | 445 | ||
446 | /* maps the cpu to the sched domain representing multi-core */ | ||
447 | cpumask_t cpu_coregroup_map(int cpu) | ||
448 | { | ||
449 | struct cpuinfo_x86 *c = cpu_data + cpu; | ||
450 | /* | ||
451 | * For perf, we return last level cache shared map. | ||
452 | * TBD: when power saving sched policy is added, we will return | ||
453 | * cpu_core_map when power saving policy is enabled | ||
454 | */ | ||
455 | return c->llc_shared_map; | ||
456 | } | ||
457 | |||
443 | /* representing cpus for which sibling maps can be computed */ | 458 | /* representing cpus for which sibling maps can be computed */ |
444 | static cpumask_t cpu_sibling_setup_map; | 459 | static cpumask_t cpu_sibling_setup_map; |
445 | 460 | ||
@@ -459,12 +474,16 @@ set_cpu_sibling_map(int cpu) | |||
459 | cpu_set(cpu, cpu_sibling_map[i]); | 474 | cpu_set(cpu, cpu_sibling_map[i]); |
460 | cpu_set(i, cpu_core_map[cpu]); | 475 | cpu_set(i, cpu_core_map[cpu]); |
461 | cpu_set(cpu, cpu_core_map[i]); | 476 | cpu_set(cpu, cpu_core_map[i]); |
477 | cpu_set(i, c[cpu].llc_shared_map); | ||
478 | cpu_set(cpu, c[i].llc_shared_map); | ||
462 | } | 479 | } |
463 | } | 480 | } |
464 | } else { | 481 | } else { |
465 | cpu_set(cpu, cpu_sibling_map[cpu]); | 482 | cpu_set(cpu, cpu_sibling_map[cpu]); |
466 | } | 483 | } |
467 | 484 | ||
485 | cpu_set(cpu, c[cpu].llc_shared_map); | ||
486 | |||
468 | if (current_cpu_data.x86_max_cores == 1) { | 487 | if (current_cpu_data.x86_max_cores == 1) { |
469 | cpu_core_map[cpu] = cpu_sibling_map[cpu]; | 488 | cpu_core_map[cpu] = cpu_sibling_map[cpu]; |
470 | c[cpu].booted_cores = 1; | 489 | c[cpu].booted_cores = 1; |
@@ -472,6 +491,11 @@ set_cpu_sibling_map(int cpu) | |||
472 | } | 491 | } |
473 | 492 | ||
474 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 493 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
494 | if (cpu_llc_id[cpu] != BAD_APICID && | ||
495 | cpu_llc_id[cpu] == cpu_llc_id[i]) { | ||
496 | cpu_set(i, c[cpu].llc_shared_map); | ||
497 | cpu_set(cpu, c[i].llc_shared_map); | ||
498 | } | ||
475 | if (phys_proc_id[cpu] == phys_proc_id[i]) { | 499 | if (phys_proc_id[cpu] == phys_proc_id[i]) { |
476 | cpu_set(i, cpu_core_map[cpu]); | 500 | cpu_set(i, cpu_core_map[cpu]); |
477 | cpu_set(cpu, cpu_core_map[i]); | 501 | cpu_set(cpu, cpu_core_map[i]); |
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index ac687d00a1ce..326595f3fa4d 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -310,3 +310,5 @@ ENTRY(sys_call_table) | |||
310 | .long sys_pselect6 | 310 | .long sys_pselect6 |
311 | .long sys_ppoll | 311 | .long sys_ppoll |
312 | .long sys_unshare /* 310 */ | 312 | .long sys_unshare /* 310 */ |
313 | .long sys_set_robust_list | ||
314 | .long sys_get_robust_list | ||
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c index 264edaaac315..144e94a04933 100644 --- a/arch/i386/kernel/timers/timer_pm.c +++ b/arch/i386/kernel/timers/timer_pm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/pci.h> | ||
18 | #include <asm/types.h> | 19 | #include <asm/types.h> |
19 | #include <asm/timer.h> | 20 | #include <asm/timer.h> |
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
@@ -45,24 +46,31 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; | |||
45 | 46 | ||
46 | #define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ | 47 | #define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ |
47 | 48 | ||
49 | static int pmtmr_need_workaround __read_mostly = 1; | ||
50 | |||
48 | /*helper function to safely read acpi pm timesource*/ | 51 | /*helper function to safely read acpi pm timesource*/ |
49 | static inline u32 read_pmtmr(void) | 52 | static inline u32 read_pmtmr(void) |
50 | { | 53 | { |
51 | u32 v1=0,v2=0,v3=0; | 54 | if (pmtmr_need_workaround) { |
52 | /* It has been reported that because of various broken | 55 | u32 v1, v2, v3; |
53 | * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time | 56 | |
54 | * source is not latched, so you must read it multiple | 57 | /* It has been reported that because of various broken |
55 | * times to insure a safe value is read. | 58 | * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time |
56 | */ | 59 | * source is not latched, so you must read it multiple |
57 | do { | 60 | * times to insure a safe value is read. |
58 | v1 = inl(pmtmr_ioport); | 61 | */ |
59 | v2 = inl(pmtmr_ioport); | 62 | do { |
60 | v3 = inl(pmtmr_ioport); | 63 | v1 = inl(pmtmr_ioport); |
61 | } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) | 64 | v2 = inl(pmtmr_ioport); |
62 | || (v3 > v1 && v3 < v2)); | 65 | v3 = inl(pmtmr_ioport); |
63 | 66 | } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) | |
64 | /* mask the output to 24 bits */ | 67 | || (v3 > v1 && v3 < v2)); |
65 | return v2 & ACPI_PM_MASK; | 68 | |
69 | /* mask the output to 24 bits */ | ||
70 | return v2 & ACPI_PM_MASK; | ||
71 | } | ||
72 | |||
73 | return inl(pmtmr_ioport) & ACPI_PM_MASK; | ||
66 | } | 74 | } |
67 | 75 | ||
68 | 76 | ||
@@ -263,6 +271,72 @@ struct init_timer_opts __initdata timer_pmtmr_init = { | |||
263 | .opts = &timer_pmtmr, | 271 | .opts = &timer_pmtmr, |
264 | }; | 272 | }; |
265 | 273 | ||
274 | #ifdef CONFIG_PCI | ||
275 | /* | ||
276 | * PIIX4 Errata: | ||
277 | * | ||
278 | * The power management timer may return improper results when read. | ||
279 | * Although the timer value settles properly after incrementing, | ||
280 | * while incrementing there is a 3 ns window every 69.8 ns where the | ||
281 | * timer value is indeterminate (a 4.2% chance that the data will be | ||
282 | * incorrect when read). As a result, the ACPI free running count up | ||
283 | * timer specification is violated due to erroneous reads. | ||
284 | */ | ||
285 | static int __init pmtmr_bug_check(void) | ||
286 | { | ||
287 | static struct pci_device_id gray_list[] __initdata = { | ||
288 | /* these chipsets may have bug. */ | ||
289 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, | ||
290 | PCI_DEVICE_ID_INTEL_82801DB_0) }, | ||
291 | { }, | ||
292 | }; | ||
293 | struct pci_dev *dev; | ||
294 | int pmtmr_has_bug = 0; | ||
295 | u8 rev; | ||
296 | |||
297 | if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround) | ||
298 | return 0; | ||
299 | |||
300 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
301 | PCI_DEVICE_ID_INTEL_82371AB_3, NULL); | ||
302 | if (dev) { | ||
303 | pci_read_config_byte(dev, PCI_REVISION_ID, &rev); | ||
304 | /* the bug has been fixed in PIIX4M */ | ||
305 | if (rev < 3) { | ||
306 | printk(KERN_WARNING "* Found PM-Timer Bug on this " | ||
307 | "chipset. Due to workarounds for a bug,\n" | ||
308 | "* this time source is slow. Consider trying " | ||
309 | "other time sources (clock=)\n"); | ||
310 | pmtmr_has_bug = 1; | ||
311 | } | ||
312 | pci_dev_put(dev); | ||
313 | } | ||
314 | |||
315 | if (pci_dev_present(gray_list)) { | ||
316 | printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due" | ||
317 | " to workarounds for a bug,\n" | ||
318 | "* this time source is slow. If you are sure your timer" | ||
319 | " does not have\n" | ||
320 | "* this bug, please use \"pmtmr_good\" to disable the " | ||
321 | "workaround\n"); | ||
322 | pmtmr_has_bug = 1; | ||
323 | } | ||
324 | |||
325 | if (!pmtmr_has_bug) | ||
326 | pmtmr_need_workaround = 0; | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | device_initcall(pmtmr_bug_check); | ||
331 | #endif | ||
332 | |||
333 | static int __init pmtr_good_setup(char *__str) | ||
334 | { | ||
335 | pmtmr_need_workaround = 0; | ||
336 | return 1; | ||
337 | } | ||
338 | __setup("pmtmr_good", pmtr_good_setup); | ||
339 | |||
266 | MODULE_LICENSE("GPL"); | 340 | MODULE_LICENSE("GPL"); |
267 | MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); | 341 | MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); |
268 | MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86"); | 342 | MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86"); |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 4624f8ca2459..6b63a5aa1e46 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void); | |||
92 | asmlinkage void machine_check(void); | 92 | asmlinkage void machine_check(void); |
93 | 93 | ||
94 | static int kstack_depth_to_print = 24; | 94 | static int kstack_depth_to_print = 24; |
95 | struct notifier_block *i386die_chain; | 95 | ATOMIC_NOTIFIER_HEAD(i386die_chain); |
96 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
97 | 96 | ||
98 | int register_die_notifier(struct notifier_block *nb) | 97 | int register_die_notifier(struct notifier_block *nb) |
99 | { | 98 | { |
100 | int err = 0; | ||
101 | unsigned long flags; | ||
102 | |||
103 | vmalloc_sync_all(); | 99 | vmalloc_sync_all(); |
104 | spin_lock_irqsave(&die_notifier_lock, flags); | 100 | return atomic_notifier_chain_register(&i386die_chain, nb); |
105 | err = notifier_chain_register(&i386die_chain, nb); | ||
106 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
107 | return err; | ||
108 | } | 101 | } |
109 | EXPORT_SYMBOL(register_die_notifier); | 102 | EXPORT_SYMBOL(register_die_notifier); |
110 | 103 | ||
104 | int unregister_die_notifier(struct notifier_block *nb) | ||
105 | { | ||
106 | return atomic_notifier_chain_unregister(&i386die_chain, nb); | ||
107 | } | ||
108 | EXPORT_SYMBOL(unregister_die_notifier); | ||
109 | |||
111 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | 110 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) |
112 | { | 111 | { |
113 | return p > (void *)tinfo && | 112 | return p > (void *)tinfo && |
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 8165626a5c30..70e560a1b79a 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -1700,7 +1700,7 @@ after_handle_vic_irq(unsigned int irq) | |||
1700 | 1700 | ||
1701 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | 1701 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", |
1702 | cpu, irq); | 1702 | cpu, irq); |
1703 | for_each_cpu(real_cpu, mask) { | 1703 | for_each_possible_cpu(real_cpu, mask) { |
1704 | 1704 | ||
1705 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | 1705 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, |
1706 | VIC_PROCESSOR_ID); | 1706 | VIC_PROCESSOR_ID); |
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index c4af9638dbfa..fe6eb901326e 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/nodemask.h> | 31 | #include <linux/nodemask.h> |
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/kexec.h> | 33 | #include <linux/kexec.h> |
34 | #include <linux/pfn.h> | ||
34 | 35 | ||
35 | #include <asm/e820.h> | 36 | #include <asm/e820.h> |
36 | #include <asm/setup.h> | 37 | #include <asm/setup.h> |
@@ -352,17 +353,6 @@ void __init zone_sizes_init(void) | |||
352 | { | 353 | { |
353 | int nid; | 354 | int nid; |
354 | 355 | ||
355 | /* | ||
356 | * Insert nodes into pgdat_list backward so they appear in order. | ||
357 | * Clobber node 0's links and NULL out pgdat_list before starting. | ||
358 | */ | ||
359 | pgdat_list = NULL; | ||
360 | for (nid = MAX_NUMNODES - 1; nid >= 0; nid--) { | ||
361 | if (!node_online(nid)) | ||
362 | continue; | ||
363 | NODE_DATA(nid)->pgdat_next = pgdat_list; | ||
364 | pgdat_list = NODE_DATA(nid); | ||
365 | } | ||
366 | 356 | ||
367 | for_each_online_node(nid) { | 357 | for_each_online_node(nid) { |
368 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 358 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; |
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 9db3242103be..2889567e21a1 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c | |||
@@ -36,7 +36,7 @@ void show_mem(void) | |||
36 | printk(KERN_INFO "Mem-info:\n"); | 36 | printk(KERN_INFO "Mem-info:\n"); |
37 | show_free_areas(); | 37 | show_free_areas(); |
38 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 38 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
39 | for_each_pgdat(pgdat) { | 39 | for_each_online_pgdat(pgdat) { |
40 | pgdat_resize_lock(pgdat, &flags); | 40 | pgdat_resize_lock(pgdat, &flags); |
41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
42 | page = pgdat_page_nr(pgdat, i); | 42 | page = pgdat_page_nr(pgdat, i); |
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 1accce50c2c7..1a2076ce6f6a 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c | |||
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) | |||
122 | static void free_msrs(void) | 122 | static void free_msrs(void) |
123 | { | 123 | { |
124 | int i; | 124 | int i; |
125 | for_each_cpu(i) { | 125 | for_each_possible_cpu(i) { |
126 | kfree(cpu_msrs[i].counters); | 126 | kfree(cpu_msrs[i].counters); |
127 | cpu_msrs[i].counters = NULL; | 127 | cpu_msrs[i].counters = NULL; |
128 | kfree(cpu_msrs[i].controls); | 128 | kfree(cpu_msrs[i].controls); |