aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2006-06-29 19:57:46 -0400
committerLen Brown <len.brown@intel.com>2006-06-29 19:57:46 -0400
commitd120cfb544ed6161b9d32fb6c4648c471807ee6b (patch)
tree7757ad0198d8df76ff5c60f939a687687c41da00 /arch/i386/kernel
parent9dce0e950dbfab4148f35ac6f297d8638cdc63c4 (diff)
parentbf7e8511088963078484132636839b59e25cf14f (diff)
merge linus into release branch
Conflicts: drivers/acpi/acpi_memhotplug.c
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/asm-offsets.c4
-rw-r--r--arch/i386/kernel/cpu/amd.c6
-rw-r--r--arch/i386/kernel/cpu/common.c25
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c16
-rw-r--r--arch/i386/kernel/cpu/proc.c8
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/efi.c6
-rw-r--r--arch/i386/kernel/entry.S22
-rw-r--r--arch/i386/kernel/i8259.c6
-rw-r--r--arch/i386/kernel/io_apic.c27
-rw-r--r--arch/i386/kernel/irq.c27
-rw-r--r--arch/i386/kernel/microcode.c1
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/scx200.c66
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/signal.c4
-rw-r--r--arch/i386/kernel/smpboot.c37
-rw-r--r--arch/i386/kernel/sysenter.c126
-rw-r--r--arch/i386/kernel/topology.c28
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S4
-rw-r--r--arch/i386/kernel/vsyscall.lds.S4
21 files changed, 270 insertions, 153 deletions
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 1c3a809e6421..c80271f8f084 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
14#include <asm/fixmap.h> 14#include <asm/fixmap.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/elf.h>
17 18
18#define DEFINE(sym, val) \ 19#define DEFINE(sym, val) \
19 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 20 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -54,6 +55,7 @@ void foo(void)
54 OFFSET(TI_preempt_count, thread_info, preempt_count); 55 OFFSET(TI_preempt_count, thread_info, preempt_count);
55 OFFSET(TI_addr_limit, thread_info, addr_limit); 56 OFFSET(TI_addr_limit, thread_info, addr_limit);
56 OFFSET(TI_restart_block, thread_info, restart_block); 57 OFFSET(TI_restart_block, thread_info, restart_block);
58 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
57 BLANK(); 59 BLANK();
58 60
59 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); 61 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
@@ -69,7 +71,7 @@ void foo(void)
69 sizeof(struct tss_struct)); 71 sizeof(struct tss_struct));
70 72
71 DEFINE(PAGE_SIZE_asm, PAGE_SIZE); 73 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
72 DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); 74 DEFINE(VDSO_PRELINK, VDSO_PRELINK);
73 75
74 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); 76 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
75} 77}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index fd0457c9c827..e6a2d6b80cda 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -235,10 +235,10 @@ static void __init init_amd(struct cpuinfo_x86 *c)
235 while ((1 << bits) < c->x86_max_cores) 235 while ((1 << bits) < c->x86_max_cores)
236 bits++; 236 bits++;
237 } 237 }
238 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 238 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
239 phys_proc_id[cpu] >>= bits; 239 c->phys_proc_id >>= bits;
240 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 240 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
241 cpu, c->x86_max_cores, cpu_core_id[cpu]); 241 cpu, c->x86_max_cores, c->cpu_core_id);
242 } 242 }
243#endif 243#endif
244 244
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 44f2c5f2dda1..70c87de582c7 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
294 if (c->x86 >= 0x6) 294 if (c->x86 >= 0x6)
295 c->x86_model += ((tfms >> 16) & 0xF) << 4; 295 c->x86_model += ((tfms >> 16) & 0xF) << 4;
296 c->x86_mask = tfms & 15; 296 c->x86_mask = tfms & 15;
297#ifdef CONFIG_SMP 297#ifdef CONFIG_X86_HT
298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); 298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
299#else 299#else
300 c->apicid = (ebx >> 24) & 0xFF; 300 c->apicid = (ebx >> 24) & 0xFF;
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
319 early_intel_workaround(c); 319 early_intel_workaround(c);
320 320
321#ifdef CONFIG_X86_HT 321#ifdef CONFIG_X86_HT
322 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; 322 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
323#endif 323#endif
324} 324}
325 325
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
477{ 477{
478 u32 eax, ebx, ecx, edx; 478 u32 eax, ebx, ecx, edx;
479 int index_msb, core_bits; 479 int index_msb, core_bits;
480 int cpu = smp_processor_id();
481 480
482 cpuid(1, &eax, &ebx, &ecx, &edx); 481 cpuid(1, &eax, &ebx, &ecx, &edx);
483 482
484
485 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 483 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
486 return; 484 return;
487 485
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
492 } else if (smp_num_siblings > 1 ) { 490 } else if (smp_num_siblings > 1 ) {
493 491
494 if (smp_num_siblings > NR_CPUS) { 492 if (smp_num_siblings > NR_CPUS) {
495 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 493 printk(KERN_WARNING "CPU: Unsupported number of the "
494 "siblings %d", smp_num_siblings);
496 smp_num_siblings = 1; 495 smp_num_siblings = 1;
497 return; 496 return;
498 } 497 }
499 498
500 index_msb = get_count_order(smp_num_siblings); 499 index_msb = get_count_order(smp_num_siblings);
501 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 500 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
502 501
503 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 502 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
504 phys_proc_id[cpu]); 503 c->phys_proc_id);
505 504
506 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 505 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
507 506
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
509 508
510 core_bits = get_count_order(c->x86_max_cores); 509 core_bits = get_count_order(c->x86_max_cores);
511 510
512 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & 511 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
513 ((1 << core_bits) - 1); 512 ((1 << core_bits) - 1);
514 513
515 if (c->x86_max_cores > 1) 514 if (c->x86_max_cores > 1)
516 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 515 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
517 cpu_core_id[cpu]); 516 c->cpu_core_id);
518 } 517 }
519} 518}
520#endif 519#endif
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void)
613 set_in_cr4(X86_CR4_TSD); 612 set_in_cr4(X86_CR4_TSD);
614 } 613 }
615 614
615 /* The CPU hotplug case */
616 if (cpu_gdt_descr->address) {
617 gdt = (struct desc_struct *)cpu_gdt_descr->address;
618 memset(gdt, 0, PAGE_SIZE);
619 goto old_gdt;
620 }
616 /* 621 /*
617 * This is a horrible hack to allocate the GDT. The problem 622 * This is a horrible hack to allocate the GDT. The problem
618 * is that cpu_init() is called really early for the boot CPU 623 * is that cpu_init() is called really early for the boot CPU
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void)
631 local_irq_enable(); 636 local_irq_enable();
632 } 637 }
633 } 638 }
634 639old_gdt:
635 /* 640 /*
636 * Initialize the per-CPU GDT with the boot GDT, 641 * Initialize the per-CPU GDT with the boot GDT,
637 * and set up the GDT descriptor: 642 * and set up the GDT descriptor:
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 6c37b4fd8ce2..e9f0b928b0a9 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -159,13 +159,13 @@ union l2_cache {
159 unsigned val; 159 unsigned val;
160}; 160};
161 161
162static unsigned short assocs[] = { 162static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16, 164 [8] = 16,
165 [0xf] = 0xffff // ?? 165 [0xf] = 0xffff // ??
166 }; 166 };
167static unsigned char levels[] = { 1, 1, 2 }; 167static const unsigned char levels[] = { 1, 1, 2 };
168static unsigned char types[] = { 1, 2, 3 }; 168static const unsigned char types[] = { 1, 2, 3 };
169 169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx, 171 union _cpuid4_leaf_ebx *ebx,
@@ -261,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
264#ifdef CONFIG_SMP 264#ifdef CONFIG_X86_HT
265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
266#endif 266#endif
267 267
@@ -383,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
383 383
384 if (new_l2) { 384 if (new_l2) {
385 l2 = new_l2; 385 l2 = new_l2;
386#ifdef CONFIG_SMP 386#ifdef CONFIG_X86_HT
387 cpu_llc_id[cpu] = l2_id; 387 cpu_llc_id[cpu] = l2_id;
388#endif 388#endif
389 } 389 }
390 390
391 if (new_l3) { 391 if (new_l3) {
392 l3 = new_l3; 392 l3 = new_l3;
393#ifdef CONFIG_SMP 393#ifdef CONFIG_X86_HT
394 cpu_llc_id[cpu] = l3_id; 394 cpu_llc_id[cpu] = l3_id;
395#endif 395#endif
396 } 396 }
@@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
729 return; 729 return;
730} 730}
731 731
732static int cacheinfo_cpu_callback(struct notifier_block *nfb, 732static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
733 unsigned long action, void *hcpu) 733 unsigned long action, void *hcpu)
734{ 734{
735 unsigned int cpu = (unsigned long)hcpu; 735 unsigned int cpu = (unsigned long)hcpu;
@@ -747,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
747 return NOTIFY_OK; 747 return NOTIFY_OK;
748} 748}
749 749
750static struct notifier_block cacheinfo_cpu_notifier = 750static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
751{ 751{
752 .notifier_call = cacheinfo_cpu_callback, 752 .notifier_call = cacheinfo_cpu_callback,
753}; 753};
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index a19fcb262dbb..f54a15268ed7 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
18 * applications want to get the raw CPUID data, they should access 18 * applications want to get the raw CPUID data, they should access
19 * /dev/cpu/<cpu_nr>/cpuid instead. 19 * /dev/cpu/<cpu_nr>/cpuid instead.
20 */ 20 */
21 static char *x86_cap_flags[] = { 21 static const char * const x86_cap_flags[] = {
22 /* Intel-defined */ 22 /* Intel-defined */
23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 }; 64 };
65 static char *x86_power_flags[] = { 65 static const char * const x86_power_flags[] = {
66 "ts", /* temperature sensor */ 66 "ts", /* temperature sensor */
67 "fid", /* frequency id control */ 67 "fid", /* frequency id control */
68 "vid", /* voltage id control */ 68 "vid", /* voltage id control */
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
110#ifdef CONFIG_X86_HT 110#ifdef CONFIG_X86_HT
111 if (c->x86_max_cores * smp_num_siblings > 1) { 111 if (c->x86_max_cores * smp_num_siblings > 1) {
112 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 112 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
114 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 114 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
116 } 116 }
117#endif 117#endif
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 1d9a4abcdfc7..f6dfa9fb675c 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
183 return NOTIFY_OK; 183 return NOTIFY_OK;
184} 184}
185 185
186static struct notifier_block cpuid_class_cpu_notifier = 186static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
187{ 187{
188 .notifier_call = cpuid_class_cpu_callback, 188 .notifier_call = cpuid_class_cpu_callback,
189}; 189};
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 9202b67c4b2e..8beb0f07d999 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -601,8 +601,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
601 res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1); 601 res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1);
602 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 602 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
603 if (request_resource(&iomem_resource, res) < 0) 603 if (request_resource(&iomem_resource, res) < 0)
604 printk(KERN_ERR PFX "Failed to allocate res %s : 0x%lx-0x%lx\n", 604 printk(KERN_ERR PFX "Failed to allocate res %s : "
605 res->name, res->start, res->end); 605 "0x%llx-0x%llx\n", res->name,
606 (unsigned long long)res->start,
607 (unsigned long long)res->end);
606 /* 608 /*
607 * We don't know which region contains kernel data so we try 609 * We don't know which region contains kernel data so we try
608 * it repeatedly and let the resource manager test it. 610 * it repeatedly and let the resource manager test it.
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e6e4506e749a..fbdb933251b6 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -83,6 +83,12 @@ VM_MASK = 0x00020000
83#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
84#endif 84#endif
85 85
86#ifdef CONFIG_VM86
87#define resume_userspace_sig check_userspace
88#else
89#define resume_userspace_sig resume_userspace
90#endif
91
86#define SAVE_ALL \ 92#define SAVE_ALL \
87 cld; \ 93 cld; \
88 pushl %es; \ 94 pushl %es; \
@@ -211,6 +217,7 @@ ret_from_exception:
211 preempt_stop 217 preempt_stop
212ret_from_intr: 218ret_from_intr:
213 GET_THREAD_INFO(%ebp) 219 GET_THREAD_INFO(%ebp)
220check_userspace:
214 movl EFLAGS(%esp), %eax # mix EFLAGS and CS 221 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
215 movb CS(%esp), %al 222 movb CS(%esp), %al
216 testl $(VM_MASK | 3), %eax 223 testl $(VM_MASK | 3), %eax
@@ -263,7 +270,12 @@ sysenter_past_esp:
263 pushl $(__USER_CS) 270 pushl $(__USER_CS)
264 CFI_ADJUST_CFA_OFFSET 4 271 CFI_ADJUST_CFA_OFFSET 4
265 /*CFI_REL_OFFSET cs, 0*/ 272 /*CFI_REL_OFFSET cs, 0*/
266 pushl $SYSENTER_RETURN 273 /*
274 * Push current_thread_info()->sysenter_return to the stack.
275 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
276 * pushed above; +8 corresponds to copy_thread's esp0 setting.
277 */
278 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
267 CFI_ADJUST_CFA_OFFSET 4 279 CFI_ADJUST_CFA_OFFSET 4
268 CFI_REL_OFFSET eip, 0 280 CFI_REL_OFFSET eip, 0
269 281
@@ -415,7 +427,7 @@ work_notifysig: # deal with pending signals and
415 # vm86-space 427 # vm86-space
416 xorl %edx, %edx 428 xorl %edx, %edx
417 call do_notify_resume 429 call do_notify_resume
418 jmp resume_userspace 430 jmp resume_userspace_sig
419 431
420 ALIGN 432 ALIGN
421work_notifysig_v86: 433work_notifysig_v86:
@@ -428,7 +440,7 @@ work_notifysig_v86:
428 movl %eax, %esp 440 movl %eax, %esp
429 xorl %edx, %edx 441 xorl %edx, %edx
430 call do_notify_resume 442 call do_notify_resume
431 jmp resume_userspace 443 jmp resume_userspace_sig
432#endif 444#endif
433 445
434 # perform syscall exit tracing 446 # perform syscall exit tracing
@@ -515,7 +527,7 @@ ENTRY(irq_entries_start)
515 .if vector 527 .if vector
516 CFI_ADJUST_CFA_OFFSET -4 528 CFI_ADJUST_CFA_OFFSET -4
517 .endif 529 .endif
5181: pushl $vector-256 5301: pushl $~(vector)
519 CFI_ADJUST_CFA_OFFSET 4 531 CFI_ADJUST_CFA_OFFSET 4
520 jmp common_interrupt 532 jmp common_interrupt
521.data 533.data
@@ -535,7 +547,7 @@ common_interrupt:
535#define BUILD_INTERRUPT(name, nr) \ 547#define BUILD_INTERRUPT(name, nr) \
536ENTRY(name) \ 548ENTRY(name) \
537 RING0_INT_FRAME; \ 549 RING0_INT_FRAME; \
538 pushl $nr-256; \ 550 pushl $~(nr); \
539 CFI_ADJUST_CFA_OFFSET 4; \ 551 CFI_ADJUST_CFA_OFFSET 4; \
540 SAVE_ALL; \ 552 SAVE_ALL; \
541 movl %esp,%eax; \ 553 movl %esp,%eax; \
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index c1a42feba286..3c6063671a9f 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -132,7 +132,7 @@ void make_8259A_irq(unsigned int irq)
132{ 132{
133 disable_irq_nosync(irq); 133 disable_irq_nosync(irq);
134 io_apic_irqs &= ~(1<<irq); 134 io_apic_irqs &= ~(1<<irq);
135 irq_desc[irq].handler = &i8259A_irq_type; 135 irq_desc[irq].chip = &i8259A_irq_type;
136 enable_irq(irq); 136 enable_irq(irq);
137} 137}
138 138
@@ -386,12 +386,12 @@ void __init init_ISA_irqs (void)
386 /* 386 /*
387 * 16 old-style INTA-cycle interrupts: 387 * 16 old-style INTA-cycle interrupts:
388 */ 388 */
389 irq_desc[i].handler = &i8259A_irq_type; 389 irq_desc[i].chip = &i8259A_irq_type;
390 } else { 390 } else {
391 /* 391 /*
392 * 'high' PCI IRQs filled in on demand 392 * 'high' PCI IRQs filled in on demand
393 */ 393 */
394 irq_desc[i].handler = &no_irq_type; 394 irq_desc[i].chip = &no_irq_type;
395 } 395 }
396 } 396 }
397} 397}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 72ae414e4d49..ec9ea0269d36 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -581,7 +581,7 @@ static int balanced_irq(void *unused)
581 581
582 /* push everything to CPU 0 to give us a starting point. */ 582 /* push everything to CPU 0 to give us a starting point. */
583 for (i = 0 ; i < NR_IRQS ; i++) { 583 for (i = 0 ; i < NR_IRQS ; i++) {
584 pending_irq_cpumask[i] = cpumask_of_cpu(0); 584 irq_desc[i].pending_mask = cpumask_of_cpu(0);
585 set_pending_irq(i, cpumask_of_cpu(0)); 585 set_pending_irq(i, cpumask_of_cpu(0));
586 } 586 }
587 587
@@ -1205,15 +1205,17 @@ static struct hw_interrupt_type ioapic_edge_type;
1205#define IOAPIC_EDGE 0 1205#define IOAPIC_EDGE 0
1206#define IOAPIC_LEVEL 1 1206#define IOAPIC_LEVEL 1
1207 1207
1208static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1208static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1209{ 1209{
1210 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; 1210 unsigned idx;
1211
1212 idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
1211 1213
1212 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1214 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1213 trigger == IOAPIC_LEVEL) 1215 trigger == IOAPIC_LEVEL)
1214 irq_desc[idx].handler = &ioapic_level_type; 1216 irq_desc[idx].chip = &ioapic_level_type;
1215 else 1217 else
1216 irq_desc[idx].handler = &ioapic_edge_type; 1218 irq_desc[idx].chip = &ioapic_edge_type;
1217 set_intr_gate(vector, interrupt[idx]); 1219 set_intr_gate(vector, interrupt[idx]);
1218} 1220}
1219 1221
@@ -1325,7 +1327,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
1325 * The timer IRQ doesn't have to know that behind the 1327 * The timer IRQ doesn't have to know that behind the
1326 * scene we have a 8259A-master in AEOI mode ... 1328 * scene we have a 8259A-master in AEOI mode ...
1327 */ 1329 */
1328 irq_desc[0].handler = &ioapic_edge_type; 1330 irq_desc[0].chip = &ioapic_edge_type;
1329 1331
1330 /* 1332 /*
1331 * Add it to the IO-APIC irq-routing table: 1333 * Add it to the IO-APIC irq-routing table:
@@ -2069,6 +2071,13 @@ static void set_ioapic_affinity_vector (unsigned int vector,
2069#endif 2071#endif
2070#endif 2072#endif
2071 2073
2074static int ioapic_retrigger(unsigned int irq)
2075{
2076 send_IPI_self(IO_APIC_VECTOR(irq));
2077
2078 return 1;
2079}
2080
2072/* 2081/*
2073 * Level and edge triggered IO-APIC interrupts need different handling, 2082 * Level and edge triggered IO-APIC interrupts need different handling,
2074 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2083 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2088,6 +2097,7 @@ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
2088#ifdef CONFIG_SMP 2097#ifdef CONFIG_SMP
2089 .set_affinity = set_ioapic_affinity, 2098 .set_affinity = set_ioapic_affinity,
2090#endif 2099#endif
2100 .retrigger = ioapic_retrigger,
2091}; 2101};
2092 2102
2093static struct hw_interrupt_type ioapic_level_type __read_mostly = { 2103static struct hw_interrupt_type ioapic_level_type __read_mostly = {
@@ -2101,6 +2111,7 @@ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
2101#ifdef CONFIG_SMP 2111#ifdef CONFIG_SMP
2102 .set_affinity = set_ioapic_affinity, 2112 .set_affinity = set_ioapic_affinity,
2103#endif 2113#endif
2114 .retrigger = ioapic_retrigger,
2104}; 2115};
2105 2116
2106static inline void init_IO_APIC_traps(void) 2117static inline void init_IO_APIC_traps(void)
@@ -2135,7 +2146,7 @@ static inline void init_IO_APIC_traps(void)
2135 make_8259A_irq(irq); 2146 make_8259A_irq(irq);
2136 else 2147 else
2137 /* Strange. Oh, well.. */ 2148 /* Strange. Oh, well.. */
2138 irq_desc[irq].handler = &no_irq_type; 2149 irq_desc[irq].chip = &no_irq_type;
2139 } 2150 }
2140 } 2151 }
2141} 2152}
@@ -2351,7 +2362,7 @@ static inline void check_timer(void)
2351 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); 2362 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
2352 2363
2353 disable_8259A_irq(0); 2364 disable_8259A_irq(0);
2354 irq_desc[0].handler = &lapic_irq_type; 2365 irq_desc[0].chip = &lapic_irq_type;
2355 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ 2366 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
2356 enable_8259A_irq(0); 2367 enable_8259A_irq(0);
2357 2368
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 061533e0cb5e..16b491703967 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -53,13 +53,19 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
53 */ 53 */
54fastcall unsigned int do_IRQ(struct pt_regs *regs) 54fastcall unsigned int do_IRQ(struct pt_regs *regs)
55{ 55{
56 /* high bits used in ret_from_ code */ 56 /* high bit used in ret_from_ code */
57 int irq = regs->orig_eax & 0xff; 57 int irq = ~regs->orig_eax;
58#ifdef CONFIG_4KSTACKS 58#ifdef CONFIG_4KSTACKS
59 union irq_ctx *curctx, *irqctx; 59 union irq_ctx *curctx, *irqctx;
60 u32 *isp; 60 u32 *isp;
61#endif 61#endif
62 62
63 if (unlikely((unsigned)irq >= NR_IRQS)) {
64 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
65 __FUNCTION__, irq);
66 BUG();
67 }
68
63 irq_enter(); 69 irq_enter();
64#ifdef CONFIG_DEBUG_STACKOVERFLOW 70#ifdef CONFIG_DEBUG_STACKOVERFLOW
65 /* Debugging check for stack overflow: is there less than 1KB free? */ 71 /* Debugging check for stack overflow: is there less than 1KB free? */
@@ -76,6 +82,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
76 } 82 }
77#endif 83#endif
78 84
85 if (!irq_desc[irq].handle_irq) {
86 __do_IRQ(irq, regs);
87 goto out_exit;
88 }
79#ifdef CONFIG_4KSTACKS 89#ifdef CONFIG_4KSTACKS
80 90
81 curctx = (union irq_ctx *) current_thread_info(); 91 curctx = (union irq_ctx *) current_thread_info();
@@ -100,8 +110,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
100 * softirq checks work in the hardirq context. 110 * softirq checks work in the hardirq context.
101 */ 111 */
102 irqctx->tinfo.preempt_count = 112 irqctx->tinfo.preempt_count =
103 irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK | 113 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
104 curctx->tinfo.preempt_count & SOFTIRQ_MASK; 114 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
105 115
106 asm volatile( 116 asm volatile(
107 " xchgl %%ebx,%%esp \n" 117 " xchgl %%ebx,%%esp \n"
@@ -115,6 +125,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
115#endif 125#endif
116 __do_IRQ(irq, regs); 126 __do_IRQ(irq, regs);
117 127
128out_exit:
118 irq_exit(); 129 irq_exit();
119 130
120 return 1; 131 return 1;
@@ -243,7 +254,7 @@ int show_interrupts(struct seq_file *p, void *v)
243 for_each_online_cpu(j) 254 for_each_online_cpu(j)
244 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 255 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
245#endif 256#endif
246 seq_printf(p, " %14s", irq_desc[i].handler->typename); 257 seq_printf(p, " %14s", irq_desc[i].chip->typename);
247 seq_printf(p, " %s", action->name); 258 seq_printf(p, " %s", action->name);
248 259
249 for (action=action->next; action; action = action->next) 260 for (action=action->next; action; action = action->next)
@@ -285,13 +296,13 @@ void fixup_irqs(cpumask_t map)
285 if (irq == 2) 296 if (irq == 2)
286 continue; 297 continue;
287 298
288 cpus_and(mask, irq_affinity[irq], map); 299 cpus_and(mask, irq_desc[irq].affinity, map);
289 if (any_online_cpu(mask) == NR_CPUS) { 300 if (any_online_cpu(mask) == NR_CPUS) {
290 printk("Breaking affinity for irq %i\n", irq); 301 printk("Breaking affinity for irq %i\n", irq);
291 mask = map; 302 mask = map;
292 } 303 }
293 if (irq_desc[irq].handler->set_affinity) 304 if (irq_desc[irq].chip->set_affinity)
294 irq_desc[irq].handler->set_affinity(irq, mask); 305 irq_desc[irq].chip->set_affinity(irq, mask);
295 else if (irq_desc[irq].action && !(warned++)) 306 else if (irq_desc[irq].action && !(warned++))
296 printk("Cannot set affinity for irq %i\n", irq); 307 printk("Cannot set affinity for irq %i\n", irq);
297 } 308 }
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 0a865889b2a9..40b44cc0d14b 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -493,7 +493,6 @@ static struct file_operations microcode_fops = {
493static struct miscdevice microcode_dev = { 493static struct miscdevice microcode_dev = {
494 .minor = MICROCODE_MINOR, 494 .minor = MICROCODE_MINOR,
495 .name = "microcode", 495 .name = "microcode",
496 .devfs_name = "cpu/microcode",
497 .fops = &microcode_fops, 496 .fops = &microcode_fops,
498}; 497};
499 498
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 7a328230e540..d022cb8fd725 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti
266 return NOTIFY_OK; 266 return NOTIFY_OK;
267} 267}
268 268
269static struct notifier_block msr_class_cpu_notifier = 269static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
270{ 270{
271 .notifier_call = msr_class_cpu_callback, 271 .notifier_call = msr_class_cpu_callback,
272}; 272};
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 321f5fd26e75..9bf590cefc7d 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/mutex.h>
12#include <linux/pci.h> 13#include <linux/pci.h>
13 14
14#include <linux/scx200.h> 15#include <linux/scx200.h>
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = {
45 .probe = scx200_probe, 46 .probe = scx200_probe,
46}; 47};
47 48
48static DEFINE_SPINLOCK(scx200_gpio_config_lock); 49static DEFINE_MUTEX(scx200_gpio_config_lock);
49 50
50static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 51static void __devinit scx200_init_shadow(void)
51{ 52{
52 int bank; 53 int bank;
54
55 /* read the current values driven on the GPIO signals */
56 for (bank = 0; bank < 2; ++bank)
57 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
58}
59
60static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
61{
53 unsigned base; 62 unsigned base;
54 63
55 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || 64 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
63 } 72 }
64 73
65 scx200_gpio_base = base; 74 scx200_gpio_base = base;
66 75 scx200_init_shadow();
67 /* read the current values driven on the GPIO signals */
68 for (bank = 0; bank < 2; ++bank)
69 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
70 76
71 } else { 77 } else {
72 /* find the base of the Configuration Block */ 78 /* find the base of the Configuration Block */
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
87 return 0; 93 return 0;
88} 94}
89 95
90u32 scx200_gpio_configure(int index, u32 mask, u32 bits) 96u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
91{ 97{
92 u32 config, new_config; 98 u32 config, new_config;
93 unsigned long flags;
94 99
95 spin_lock_irqsave(&scx200_gpio_config_lock, flags); 100 mutex_lock(&scx200_gpio_config_lock);
96 101
97 outl(index, scx200_gpio_base + 0x20); 102 outl(index, scx200_gpio_base + 0x20);
98 config = inl(scx200_gpio_base + 0x24); 103 config = inl(scx200_gpio_base + 0x24);
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
100 new_config = (config & mask) | bits; 105 new_config = (config & mask) | bits;
101 outl(new_config, scx200_gpio_base + 0x24); 106 outl(new_config, scx200_gpio_base + 0x24);
102 107
103 spin_unlock_irqrestore(&scx200_gpio_config_lock, flags); 108 mutex_unlock(&scx200_gpio_config_lock);
104 109
105 return config; 110 return config;
106} 111}
107 112
108#if 0
109void scx200_gpio_dump(unsigned index)
110{
111 u32 config = scx200_gpio_configure(index, ~0, 0);
112 printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config);
113
114 if (config & 1)
115 printk(" OE"); /* output enabled */
116 else
117 printk(" TS"); /* tristate */
118 if (config & 2)
119 printk(" PP"); /* push pull */
120 else
121 printk(" OD"); /* open drain */
122 if (config & 4)
123 printk(" PUE"); /* pull up enabled */
124 else
125 printk(" PUD"); /* pull up disabled */
126 if (config & 8)
127 printk(" LOCKED"); /* locked */
128 if (config & 16)
129 printk(" LEVEL"); /* level input */
130 else
131 printk(" EDGE"); /* edge input */
132 if (config & 32)
133 printk(" HI"); /* trigger on rising edge */
134 else
135 printk(" LO"); /* trigger on falling edge */
136 if (config & 64)
137 printk(" DEBOUNCE"); /* debounce */
138 printk("\n");
139}
140#endif /* 0 */
141
142static int __init scx200_init(void) 113static int __init scx200_init(void)
143{ 114{
144 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); 115 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base);
159EXPORT_SYMBOL(scx200_gpio_shadow); 130EXPORT_SYMBOL(scx200_gpio_shadow);
160EXPORT_SYMBOL(scx200_gpio_configure); 131EXPORT_SYMBOL(scx200_gpio_configure);
161EXPORT_SYMBOL(scx200_cb_base); 132EXPORT_SYMBOL(scx200_cb_base);
162
163/*
164 Local variables:
165 compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules"
166 c-basic-offset: 8
167 End:
168*/
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 4a65040cc624..6712f0d2eb37 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1314,8 +1314,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1314 probe_roms(); 1314 probe_roms();
1315 for (i = 0; i < e820.nr_map; i++) { 1315 for (i = 0; i < e820.nr_map; i++) {
1316 struct resource *res; 1316 struct resource *res;
1317#ifndef CONFIG_RESOURCES_64BIT
1317 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) 1318 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1318 continue; 1319 continue;
1320#endif
1319 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1321 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1320 switch (e820.map[i].type) { 1322 switch (e820.map[i].type) {
1321 case E820_RAM: res->name = "System RAM"; break; 1323 case E820_RAM: res->name = "System RAM"; break;
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 5c352c3a9e7f..43002cfb40c4 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
351 goto give_sigsegv; 351 goto give_sigsegv;
352 } 352 }
353 353
354 restorer = &__kernel_sigreturn; 354 restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
355 if (ka->sa.sa_flags & SA_RESTORER) 355 if (ka->sa.sa_flags & SA_RESTORER)
356 restorer = ka->sa.sa_restorer; 356 restorer = ka->sa.sa_restorer;
357 357
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
447 goto give_sigsegv; 447 goto give_sigsegv;
448 448
449 /* Set up to return from userspace. */ 449 /* Set up to return from userspace. */
450 restorer = &__kernel_rt_sigreturn; 450 restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
451 if (ka->sa.sa_flags & SA_RESTORER) 451 if (ka->sa.sa_flags & SA_RESTORER)
452 restorer = ka->sa.sa_restorer; 452 restorer = ka->sa.sa_restorer;
453 err |= __put_user(restorer, &frame->pretcode); 453 err |= __put_user(restorer, &frame->pretcode);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bce5470ecb42..89e7315e539c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -67,12 +67,6 @@ int smp_num_siblings = 1;
67EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
68#endif 68#endif
69 69
70/* Package ID of each logical CPU */
71int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
72
73/* Core ID of each logical CPU */
74int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
75
76/* Last level cache ID of each logical CPU */ 70/* Last level cache ID of each logical CPU */
77int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 71int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
78 72
@@ -454,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu)
454 struct cpuinfo_x86 *c = cpu_data + cpu; 448 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /* 449 /*
456 * For perf, we return last level cache shared map. 450 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return 451 * And for power savings, we return cpu_core_map
458 * cpu_core_map when power saving policy is enabled
459 */ 452 */
460 return c->llc_shared_map; 453 if (sched_mc_power_savings || sched_smt_power_savings)
454 return cpu_core_map[cpu];
455 else
456 return c->llc_shared_map;
461} 457}
462 458
463/* representing cpus for which sibling maps can be computed */ 459/* representing cpus for which sibling maps can be computed */
@@ -473,8 +469,8 @@ set_cpu_sibling_map(int cpu)
473 469
474 if (smp_num_siblings > 1) { 470 if (smp_num_siblings > 1) {
475 for_each_cpu_mask(i, cpu_sibling_setup_map) { 471 for_each_cpu_mask(i, cpu_sibling_setup_map) {
476 if (phys_proc_id[cpu] == phys_proc_id[i] && 472 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
477 cpu_core_id[cpu] == cpu_core_id[i]) { 473 c[cpu].cpu_core_id == c[i].cpu_core_id) {
478 cpu_set(i, cpu_sibling_map[cpu]); 474 cpu_set(i, cpu_sibling_map[cpu]);
479 cpu_set(cpu, cpu_sibling_map[i]); 475 cpu_set(cpu, cpu_sibling_map[i]);
480 cpu_set(i, cpu_core_map[cpu]); 476 cpu_set(i, cpu_core_map[cpu]);
@@ -501,7 +497,7 @@ set_cpu_sibling_map(int cpu)
501 cpu_set(i, c[cpu].llc_shared_map); 497 cpu_set(i, c[cpu].llc_shared_map);
502 cpu_set(cpu, c[i].llc_shared_map); 498 cpu_set(cpu, c[i].llc_shared_map);
503 } 499 }
504 if (phys_proc_id[cpu] == phys_proc_id[i]) { 500 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
505 cpu_set(i, cpu_core_map[cpu]); 501 cpu_set(i, cpu_core_map[cpu]);
506 cpu_set(cpu, cpu_core_map[i]); 502 cpu_set(cpu, cpu_core_map[i]);
507 /* 503 /*
@@ -1056,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1056 struct warm_boot_cpu_info info; 1052 struct warm_boot_cpu_info info;
1057 struct work_struct task; 1053 struct work_struct task;
1058 int apicid, ret; 1054 int apicid, ret;
1055 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1059 1056
1060 apicid = x86_cpu_to_apicid[cpu]; 1057 apicid = x86_cpu_to_apicid[cpu];
1061 if (apicid == BAD_APICID) { 1058 if (apicid == BAD_APICID) {
@@ -1063,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1063 goto exit; 1060 goto exit;
1064 } 1061 }
1065 1062
1063 /*
1064 * the CPU isn't initialized at boot time, allocate gdt table here.
1065 * cpu_init will initialize it
1066 */
1067 if (!cpu_gdt_descr->address) {
1068 cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
1069 if (!cpu_gdt_descr->address)
1070 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1071 ret = -ENOMEM;
1072 goto exit;
1073 }
1074
1066 info.complete = &done; 1075 info.complete = &done;
1067 info.apicid = apicid; 1076 info.apicid = apicid;
1068 info.cpu = cpu; 1077 info.cpu = cpu;
@@ -1340,8 +1349,8 @@ remove_siblinginfo(int cpu)
1340 cpu_clear(cpu, cpu_sibling_map[sibling]); 1349 cpu_clear(cpu, cpu_sibling_map[sibling]);
1341 cpus_clear(cpu_sibling_map[cpu]); 1350 cpus_clear(cpu_sibling_map[cpu]);
1342 cpus_clear(cpu_core_map[cpu]); 1351 cpus_clear(cpu_core_map[cpu]);
1343 phys_proc_id[cpu] = BAD_APICID; 1352 c[cpu].phys_proc_id = 0;
1344 cpu_core_id[cpu] = BAD_APICID; 1353 c[cpu].cpu_core_id = 0;
1345 cpu_clear(cpu, cpu_sibling_setup_map); 1354 cpu_clear(cpu, cpu_sibling_setup_map);
1346} 1355}
1347 1356
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0bada1870bdf..713ba39d32c6 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -2,6 +2,8 @@
2 * linux/arch/i386/kernel/sysenter.c 2 * linux/arch/i386/kernel/sysenter.c
3 * 3 *
4 * (C) Copyright 2002 Linus Torvalds 4 * (C) Copyright 2002 Linus Torvalds
5 * Portions based on the vdso-randomization code from exec-shield:
6 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5 * 7 *
6 * This file contains the needed initializations to support sysenter. 8 * This file contains the needed initializations to support sysenter.
7 */ 9 */
@@ -13,12 +15,31 @@
13#include <linux/gfp.h> 15#include <linux/gfp.h>
14#include <linux/string.h> 16#include <linux/string.h>
15#include <linux/elf.h> 17#include <linux/elf.h>
18#include <linux/mm.h>
19#include <linux/module.h>
16 20
17#include <asm/cpufeature.h> 21#include <asm/cpufeature.h>
18#include <asm/msr.h> 22#include <asm/msr.h>
19#include <asm/pgtable.h> 23#include <asm/pgtable.h>
20#include <asm/unistd.h> 24#include <asm/unistd.h>
21 25
26/*
27 * Should the kernel map a VDSO page into processes and pass its
28 * address down to glibc upon exec()?
29 */
30unsigned int __read_mostly vdso_enabled = 1;
31
32EXPORT_SYMBOL_GPL(vdso_enabled);
33
34static int __init vdso_setup(char *s)
35{
36 vdso_enabled = simple_strtoul(s, NULL, 0);
37
38 return 1;
39}
40
41__setup("vdso=", vdso_setup);
42
22extern asmlinkage void sysenter_entry(void); 43extern asmlinkage void sysenter_entry(void);
23 44
24void enable_sep_cpu(void) 45void enable_sep_cpu(void)
@@ -45,23 +66,120 @@ void enable_sep_cpu(void)
45 */ 66 */
46extern const char vsyscall_int80_start, vsyscall_int80_end; 67extern const char vsyscall_int80_start, vsyscall_int80_end;
47extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; 68extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
69static void *syscall_page;
48 70
49int __init sysenter_setup(void) 71int __init sysenter_setup(void)
50{ 72{
51 void *page = (void *)get_zeroed_page(GFP_ATOMIC); 73 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
52 74
53 __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); 75#ifdef CONFIG_COMPAT_VDSO
76 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
77 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
78#else
79 /*
80 * In the non-compat case the ELF coredumping code needs the fixmap:
81 */
82 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
83#endif
54 84
55 if (!boot_cpu_has(X86_FEATURE_SEP)) { 85 if (!boot_cpu_has(X86_FEATURE_SEP)) {
56 memcpy(page, 86 memcpy(syscall_page,
57 &vsyscall_int80_start, 87 &vsyscall_int80_start,
58 &vsyscall_int80_end - &vsyscall_int80_start); 88 &vsyscall_int80_end - &vsyscall_int80_start);
59 return 0; 89 return 0;
60 } 90 }
61 91
62 memcpy(page, 92 memcpy(syscall_page,
63 &vsyscall_sysenter_start, 93 &vsyscall_sysenter_start,
64 &vsyscall_sysenter_end - &vsyscall_sysenter_start); 94 &vsyscall_sysenter_end - &vsyscall_sysenter_start);
65 95
66 return 0; 96 return 0;
67} 97}
98
99static struct page *syscall_nopage(struct vm_area_struct *vma,
100 unsigned long adr, int *type)
101{
102 struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
103 get_page(p);
104 return p;
105}
106
107/* Prevent VMA merging */
108static void syscall_vma_close(struct vm_area_struct *vma)
109{
110}
111
112static struct vm_operations_struct syscall_vm_ops = {
113 .close = syscall_vma_close,
114 .nopage = syscall_nopage,
115};
116
117/* Defined in vsyscall-sysenter.S */
118extern void SYSENTER_RETURN;
119
120/* Setup a VMA at program startup for the vsyscall page */
121int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
122{
123 struct vm_area_struct *vma;
124 struct mm_struct *mm = current->mm;
125 unsigned long addr;
126 int ret;
127
128 down_write(&mm->mmap_sem);
129 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
130 if (IS_ERR_VALUE(addr)) {
131 ret = addr;
132 goto up_fail;
133 }
134
135 vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
136 if (!vma) {
137 ret = -ENOMEM;
138 goto up_fail;
139 }
140
141 vma->vm_start = addr;
142 vma->vm_end = addr + PAGE_SIZE;
143 /* MAYWRITE to allow gdb to COW and set breakpoints */
144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 vma->vm_flags |= mm->def_flags;
146 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
147 vma->vm_ops = &syscall_vm_ops;
148 vma->vm_mm = mm;
149
150 ret = insert_vm_struct(mm, vma);
151 if (unlikely(ret)) {
152 kmem_cache_free(vm_area_cachep, vma);
153 goto up_fail;
154 }
155
156 current->mm->context.vdso = (void *)addr;
157 current_thread_info()->sysenter_return =
158 (void *)VDSO_SYM(&SYSENTER_RETURN);
159 mm->total_vm++;
160up_fail:
161 up_write(&mm->mmap_sem);
162 return ret;
163}
164
165const char *arch_vma_name(struct vm_area_struct *vma)
166{
167 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
168 return "[vdso]";
169 return NULL;
170}
171
172struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
173{
174 return NULL;
175}
176
177int in_gate_area(struct task_struct *task, unsigned long addr)
178{
179 return 0;
180}
181
182int in_gate_area_no_task(unsigned long addr)
183{
184 return 0;
185}
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 296355292c7c..e2e281d4bcc8 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -32,15 +32,8 @@
32 32
33static struct i386_cpu cpu_devices[NR_CPUS]; 33static struct i386_cpu cpu_devices[NR_CPUS];
34 34
35int arch_register_cpu(int num){ 35int arch_register_cpu(int num)
36 struct node *parent = NULL; 36{
37
38#ifdef CONFIG_NUMA
39 int node = cpu_to_node(num);
40 if (node_online(node))
41 parent = &node_devices[node].node;
42#endif /* CONFIG_NUMA */
43
44 /* 37 /*
45 * CPU0 cannot be offlined due to several 38 * CPU0 cannot be offlined due to several
46 * restrictions and assumptions in kernel. This basically 39 * restrictions and assumptions in kernel. This basically
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){
50 if (!num) 43 if (!num)
51 cpu_devices[num].cpu.no_control = 1; 44 cpu_devices[num].cpu.no_control = 1;
52 45
53 return register_cpu(&cpu_devices[num].cpu, num, parent); 46 return register_cpu(&cpu_devices[num].cpu, num);
54} 47}
55 48
56#ifdef CONFIG_HOTPLUG_CPU 49#ifdef CONFIG_HOTPLUG_CPU
57 50
58void arch_unregister_cpu(int num) { 51void arch_unregister_cpu(int num) {
59 struct node *parent = NULL; 52 return unregister_cpu(&cpu_devices[num].cpu);
60
61#ifdef CONFIG_NUMA
62 int node = cpu_to_node(num);
63 if (node_online(node))
64 parent = &node_devices[node].node;
65#endif /* CONFIG_NUMA */
66
67 return unregister_cpu(&cpu_devices[num].cpu, parent);
68} 53}
69EXPORT_SYMBOL(arch_register_cpu); 54EXPORT_SYMBOL(arch_register_cpu);
70EXPORT_SYMBOL(arch_unregister_cpu); 55EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu);
74 59
75#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
76#include <linux/mmzone.h> 61#include <linux/mmzone.h>
77#include <asm/node.h>
78
79struct i386_node node_devices[MAX_NUMNODES];
80 62
81static int __init topology_init(void) 63static int __init topology_init(void)
82{ 64{
83 int i; 65 int i;
84 66
85 for_each_online_node(i) 67 for_each_online_node(i)
86 arch_register_node(i); 68 register_one_node(i);
87 69
88 for_each_present_cpu(i) 70 for_each_present_cpu(i)
89 arch_register_cpu(i); 71 arch_register_cpu(i);
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 3b62baa6a371..1a36d26e15eb 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -42,10 +42,10 @@ __kernel_vsyscall:
42 /* 7: align return point with nop's to make disassembly easier */ 42 /* 7: align return point with nop's to make disassembly easier */
43 .space 7,0x90 43 .space 7,0x90
44 44
45 /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ 45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
46 jmp .Lenter_kernel 46 jmp .Lenter_kernel
47 /* 16: System call normal return point is here! */ 47 /* 16: System call normal return point is here! */
48 .globl SYSENTER_RETURN /* Symbol used by entry.S. */ 48 .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
49SYSENTER_RETURN: 49SYSENTER_RETURN:
50 pop %ebp 50 pop %ebp
51.Lpop_ebp: 51.Lpop_ebp:
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index 98699ca6e52d..e26975fc68b6 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
7 7
8SECTIONS 8SECTIONS
9{ 9{
10 . = VSYSCALL_BASE + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .dynsym : { *(.dynsym) } 13 .dynsym : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
20 For the layouts to match, we need to skip more than enough 20 For the layouts to match, we need to skip more than enough
21 space for the dynamic symbol table et al. If this amount 21 space for the dynamic symbol table et al. If this amount
22 is insufficient, ld -shared will barf. Just increase it here. */ 22 is insufficient, ld -shared will barf. Just increase it here. */
23 . = VSYSCALL_BASE + 0x400; 23 . = VDSO_PRELINK + 0x400;
24 24
25 .text : { *(.text) } :text =0x90909090 25 .text : { *(.text) } :text =0x90909090
26 .note : { *(.note.*) } :text :note 26 .note : { *(.note.*) } :text :note