diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:50:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:50:34 -0400 |
commit | a208f37a465e222218974ab20a31b42b7b4893b2 (patch) | |
tree | 77c6acdd4be32024330a14f2618b814126ce7a20 /arch/x86/kernel | |
parent | 511d9d34183662aada3890883e860b151d707e22 (diff) | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
Merge branch 'linus' into x86/x2apic
Diffstat (limited to 'arch/x86/kernel')
54 files changed, 611 insertions, 384 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 81280e93e792..673f1d12b420 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -6,6 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu | |||
6 | 6 | ||
7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | 7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) |
8 | 8 | ||
9 | ifdef CONFIG_FTRACE | ||
10 | # Do not profile debug utilities | ||
11 | CFLAGS_REMOVE_tsc.o = -pg | ||
12 | CFLAGS_REMOVE_rtc.o = -pg | ||
13 | endif | ||
14 | |||
9 | # | 15 | # |
10 | # vsyscalls (which work on the user stack) should have | 16 | # vsyscalls (which work on the user stack) should have |
11 | # no stack-protector checks: | 17 | # no stack-protector checks: |
@@ -57,6 +63,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o | |||
57 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o | 63 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o |
58 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o | 64 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o |
59 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 65 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
66 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | ||
60 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 67 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
61 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 68 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
62 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 69 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b314bcd08406..b41b27af33e6 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1411,7 +1411,6 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | |||
1411 | { | 1411 | { |
1412 | pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", d->ident); | 1412 | pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", d->ident); |
1413 | acpi_skip_timer_override = 1; | 1413 | acpi_skip_timer_override = 1; |
1414 | force_mask_ioapic_irq_2(); | ||
1415 | return 0; | 1414 | return 0; |
1416 | } | 1415 | } |
1417 | 1416 | ||
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c index de2d2e4ebad9..7c074eec39fb 100644 --- a/arch/x86/kernel/acpi/processor.c +++ b/arch/x86/kernel/acpi/processor.c | |||
@@ -56,6 +56,12 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) | |||
56 | if (cpu_has(c, X86_FEATURE_ACPI)) | 56 | if (cpu_has(c, X86_FEATURE_ACPI)) |
57 | buf[2] |= ACPI_PDC_T_FFH; | 57 | buf[2] |= ACPI_PDC_T_FFH; |
58 | 58 | ||
59 | /* | ||
60 | * If mwait/monitor is unsupported, C2/C3_FFH will be disabled | ||
61 | */ | ||
62 | if (!cpu_has(c, X86_FEATURE_MWAIT)) | ||
63 | buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); | ||
64 | |||
59 | obj->type = ACPI_TYPE_BUFFER; | 65 | obj->type = ACPI_TYPE_BUFFER; |
60 | obj->buffer.length = 12; | 66 | obj->buffer.length = 12; |
61 | obj->buffer.pointer = (u8 *) buf; | 67 | obj->buffer.pointer = (u8 *) buf; |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index e6a4b564ccaa..868de3d5c39d 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -23,6 +23,15 @@ static unsigned long acpi_realmode; | |||
23 | static char temp_stack[10240]; | 23 | static char temp_stack[10240]; |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* XXX: this macro should move to asm-x86/segment.h and be shared with the | ||
27 | boot code... */ | ||
28 | #define GDT_ENTRY(flags, base, limit) \ | ||
29 | (((u64)(base & 0xff000000) << 32) | \ | ||
30 | ((u64)flags << 40) | \ | ||
31 | ((u64)(limit & 0x00ff0000) << 32) | \ | ||
32 | ((u64)(base & 0x00ffffff) << 16) | \ | ||
33 | ((u64)(limit & 0x0000ffff))) | ||
34 | |||
26 | /** | 35 | /** |
27 | * acpi_save_state_mem - save kernel state | 36 | * acpi_save_state_mem - save kernel state |
28 | * | 37 | * |
@@ -51,18 +60,27 @@ int acpi_save_state_mem(void) | |||
51 | header->video_mode = saved_video_mode; | 60 | header->video_mode = saved_video_mode; |
52 | 61 | ||
53 | header->wakeup_jmp_seg = acpi_wakeup_address >> 4; | 62 | header->wakeup_jmp_seg = acpi_wakeup_address >> 4; |
63 | |||
64 | /* | ||
65 | * Set up the wakeup GDT. We set these up as Big Real Mode, | ||
66 | * that is, with limits set to 4 GB. At least the Lenovo | ||
67 | * Thinkpad X61 is known to need this for the video BIOS | ||
68 | * initialization quirk to work; this is likely to also | ||
69 | * be the case for other laptops or integrated video devices. | ||
70 | */ | ||
71 | |||
54 | /* GDT[0]: GDT self-pointer */ | 72 | /* GDT[0]: GDT self-pointer */ |
55 | header->wakeup_gdt[0] = | 73 | header->wakeup_gdt[0] = |
56 | (u64)(sizeof(header->wakeup_gdt) - 1) + | 74 | (u64)(sizeof(header->wakeup_gdt) - 1) + |
57 | ((u64)(acpi_wakeup_address + | 75 | ((u64)(acpi_wakeup_address + |
58 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) | 76 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) |
59 | << 16); | 77 | << 16); |
60 | /* GDT[1]: real-mode-like code segment */ | 78 | /* GDT[1]: big real mode-like code segment */ |
61 | header->wakeup_gdt[1] = (0x009bULL << 40) + | 79 | header->wakeup_gdt[1] = |
62 | ((u64)acpi_wakeup_address << 16) + 0xffff; | 80 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); |
63 | /* GDT[2]: real-mode-like data segment */ | 81 | /* GDT[2]: big real mode-like data segment */ |
64 | header->wakeup_gdt[2] = (0x0093ULL << 40) + | 82 | header->wakeup_gdt[2] = |
65 | ((u64)acpi_wakeup_address << 16) + 0xffff; | 83 | GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff); |
66 | 84 | ||
67 | #ifndef CONFIG_64BIT | 85 | #ifndef CONFIG_64BIT |
68 | store_gdt((struct desc_ptr *)&header->pmode_gdt); | 86 | store_gdt((struct desc_ptr *)&header->pmode_gdt); |
@@ -140,6 +158,8 @@ static int __init acpi_sleep_setup(char *str) | |||
140 | acpi_realmode_flags |= 2; | 158 | acpi_realmode_flags |= 2; |
141 | if (strncmp(str, "s3_beep", 7) == 0) | 159 | if (strncmp(str, "s3_beep", 7) == 0) |
142 | acpi_realmode_flags |= 4; | 160 | acpi_realmode_flags |= 4; |
161 | if (strncmp(str, "old_ordering", 12) == 0) | ||
162 | acpi_old_suspend_ordering(); | ||
143 | str = strchr(str, ','); | 163 | str = strchr(str, ','); |
144 | if (str != NULL) | 164 | if (str != NULL) |
145 | str += strspn(str, ", \t"); | 165 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 65c7857a90dd..2763cb37b553 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/spinlock.h> | 3 | #include <linux/mutex.h> |
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/kprobes.h> | 5 | #include <linux/kprobes.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
@@ -143,7 +143,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
143 | #ifdef CONFIG_X86_64 | 143 | #ifdef CONFIG_X86_64 |
144 | 144 | ||
145 | extern char __vsyscall_0; | 145 | extern char __vsyscall_0; |
146 | static inline const unsigned char*const * find_nop_table(void) | 146 | const unsigned char *const *find_nop_table(void) |
147 | { | 147 | { |
148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || |
149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | 149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; |
@@ -162,7 +162,7 @@ static const struct nop { | |||
162 | { -1, NULL } | 162 | { -1, NULL } |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static const unsigned char*const * find_nop_table(void) | 165 | const unsigned char *const *find_nop_table(void) |
166 | { | 166 | { |
167 | const unsigned char *const *noptable = intel_nops; | 167 | const unsigned char *const *noptable = intel_nops; |
168 | int i; | 168 | int i; |
@@ -279,7 +279,7 @@ struct smp_alt_module { | |||
279 | struct list_head next; | 279 | struct list_head next; |
280 | }; | 280 | }; |
281 | static LIST_HEAD(smp_alt_modules); | 281 | static LIST_HEAD(smp_alt_modules); |
282 | static DEFINE_SPINLOCK(smp_alt); | 282 | static DEFINE_MUTEX(smp_alt); |
283 | static int smp_mode = 1; /* protected by smp_alt */ | 283 | static int smp_mode = 1; /* protected by smp_alt */ |
284 | 284 | ||
285 | void alternatives_smp_module_add(struct module *mod, char *name, | 285 | void alternatives_smp_module_add(struct module *mod, char *name, |
@@ -312,12 +312,12 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
312 | __func__, smp->locks, smp->locks_end, | 312 | __func__, smp->locks, smp->locks_end, |
313 | smp->text, smp->text_end, smp->name); | 313 | smp->text, smp->text_end, smp->name); |
314 | 314 | ||
315 | spin_lock(&smp_alt); | 315 | mutex_lock(&smp_alt); |
316 | list_add_tail(&smp->next, &smp_alt_modules); | 316 | list_add_tail(&smp->next, &smp_alt_modules); |
317 | if (boot_cpu_has(X86_FEATURE_UP)) | 317 | if (boot_cpu_has(X86_FEATURE_UP)) |
318 | alternatives_smp_unlock(smp->locks, smp->locks_end, | 318 | alternatives_smp_unlock(smp->locks, smp->locks_end, |
319 | smp->text, smp->text_end); | 319 | smp->text, smp->text_end); |
320 | spin_unlock(&smp_alt); | 320 | mutex_unlock(&smp_alt); |
321 | } | 321 | } |
322 | 322 | ||
323 | void alternatives_smp_module_del(struct module *mod) | 323 | void alternatives_smp_module_del(struct module *mod) |
@@ -327,17 +327,17 @@ void alternatives_smp_module_del(struct module *mod) | |||
327 | if (smp_alt_once || noreplace_smp) | 327 | if (smp_alt_once || noreplace_smp) |
328 | return; | 328 | return; |
329 | 329 | ||
330 | spin_lock(&smp_alt); | 330 | mutex_lock(&smp_alt); |
331 | list_for_each_entry(item, &smp_alt_modules, next) { | 331 | list_for_each_entry(item, &smp_alt_modules, next) { |
332 | if (mod != item->mod) | 332 | if (mod != item->mod) |
333 | continue; | 333 | continue; |
334 | list_del(&item->next); | 334 | list_del(&item->next); |
335 | spin_unlock(&smp_alt); | 335 | mutex_unlock(&smp_alt); |
336 | DPRINTK("%s: %s\n", __func__, item->name); | 336 | DPRINTK("%s: %s\n", __func__, item->name); |
337 | kfree(item); | 337 | kfree(item); |
338 | return; | 338 | return; |
339 | } | 339 | } |
340 | spin_unlock(&smp_alt); | 340 | mutex_unlock(&smp_alt); |
341 | } | 341 | } |
342 | 342 | ||
343 | void alternatives_smp_switch(int smp) | 343 | void alternatives_smp_switch(int smp) |
@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp) | |||
359 | return; | 359 | return; |
360 | BUG_ON(!smp && (num_online_cpus() > 1)); | 360 | BUG_ON(!smp && (num_online_cpus() > 1)); |
361 | 361 | ||
362 | spin_lock(&smp_alt); | 362 | mutex_lock(&smp_alt); |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Avoid unnecessary switches because it forces JIT based VMs to | 365 | * Avoid unnecessary switches because it forces JIT based VMs to |
@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp) | |||
383 | mod->text, mod->text_end); | 383 | mod->text, mod->text_end); |
384 | } | 384 | } |
385 | smp_mode = smp; | 385 | smp_mode = smp; |
386 | spin_unlock(&smp_alt); | 386 | mutex_unlock(&smp_alt); |
387 | } | 387 | } |
388 | 388 | ||
389 | #endif | 389 | #endif |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index cb54d9e20f94..34101962fb0e 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -1374,6 +1374,10 @@ void __init smp_intr_init(void) | |||
1374 | 1374 | ||
1375 | /* IPI for generic function call */ | 1375 | /* IPI for generic function call */ |
1376 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1376 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
1377 | |||
1378 | /* IPI for single call function */ | ||
1379 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
1380 | call_function_single_interrupt); | ||
1377 | } | 1381 | } |
1378 | #endif | 1382 | #endif |
1379 | 1383 | ||
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 00e6d1370954..bf9b441331e9 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -204,6 +204,7 @@ | |||
204 | #include <linux/module.h> | 204 | #include <linux/module.h> |
205 | 205 | ||
206 | #include <linux/poll.h> | 206 | #include <linux/poll.h> |
207 | #include <linux/smp_lock.h> | ||
207 | #include <linux/types.h> | 208 | #include <linux/types.h> |
208 | #include <linux/stddef.h> | 209 | #include <linux/stddef.h> |
209 | #include <linux/timer.h> | 210 | #include <linux/timer.h> |
@@ -1212,9 +1213,9 @@ static int suspend(int vetoable) | |||
1212 | if (err != APM_SUCCESS) | 1213 | if (err != APM_SUCCESS) |
1213 | apm_error("suspend", err); | 1214 | apm_error("suspend", err); |
1214 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1215 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1215 | device_power_up(); | 1216 | device_power_up(PMSG_RESUME); |
1216 | local_irq_enable(); | 1217 | local_irq_enable(); |
1217 | device_resume(); | 1218 | device_resume(PMSG_RESUME); |
1218 | queue_event(APM_NORMAL_RESUME, NULL); | 1219 | queue_event(APM_NORMAL_RESUME, NULL); |
1219 | spin_lock(&user_list_lock); | 1220 | spin_lock(&user_list_lock); |
1220 | for (as = user_list; as != NULL; as = as->next) { | 1221 | for (as = user_list; as != NULL; as = as->next) { |
@@ -1239,7 +1240,7 @@ static void standby(void) | |||
1239 | apm_error("standby", err); | 1240 | apm_error("standby", err); |
1240 | 1241 | ||
1241 | local_irq_disable(); | 1242 | local_irq_disable(); |
1242 | device_power_up(); | 1243 | device_power_up(PMSG_RESUME); |
1243 | local_irq_enable(); | 1244 | local_irq_enable(); |
1244 | } | 1245 | } |
1245 | 1246 | ||
@@ -1325,7 +1326,7 @@ static void check_events(void) | |||
1325 | ignore_bounce = 1; | 1326 | ignore_bounce = 1; |
1326 | if ((event != APM_NORMAL_RESUME) | 1327 | if ((event != APM_NORMAL_RESUME) |
1327 | || (ignore_normal_resume == 0)) { | 1328 | || (ignore_normal_resume == 0)) { |
1328 | device_resume(); | 1329 | device_resume(PMSG_RESUME); |
1329 | queue_event(event, NULL); | 1330 | queue_event(event, NULL); |
1330 | } | 1331 | } |
1331 | ignore_normal_resume = 0; | 1332 | ignore_normal_resume = 0; |
@@ -1549,10 +1550,12 @@ static int do_open(struct inode *inode, struct file *filp) | |||
1549 | { | 1550 | { |
1550 | struct apm_user *as; | 1551 | struct apm_user *as; |
1551 | 1552 | ||
1553 | lock_kernel(); | ||
1552 | as = kmalloc(sizeof(*as), GFP_KERNEL); | 1554 | as = kmalloc(sizeof(*as), GFP_KERNEL); |
1553 | if (as == NULL) { | 1555 | if (as == NULL) { |
1554 | printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", | 1556 | printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", |
1555 | sizeof(*as)); | 1557 | sizeof(*as)); |
1558 | unlock_kernel(); | ||
1556 | return -ENOMEM; | 1559 | return -ENOMEM; |
1557 | } | 1560 | } |
1558 | as->magic = APM_BIOS_MAGIC; | 1561 | as->magic = APM_BIOS_MAGIC; |
@@ -1574,6 +1577,7 @@ static int do_open(struct inode *inode, struct file *filp) | |||
1574 | user_list = as; | 1577 | user_list = as; |
1575 | spin_unlock(&user_list_lock); | 1578 | spin_unlock(&user_list_lock); |
1576 | filp->private_data = as; | 1579 | filp->private_data = as; |
1580 | unlock_kernel(); | ||
1577 | return 0; | 1581 | return 0; |
1578 | } | 1582 | } |
1579 | 1583 | ||
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c index bd182b7616ee..7c36fb8a28d4 100644 --- a/arch/x86/kernel/cpu/amd_64.c +++ b/arch/x86/kernel/cpu/amd_64.c | |||
@@ -200,6 +200,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
200 | * benefit in doing so. | 200 | * benefit in doing so. |
201 | */ | 201 | */ |
202 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 202 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
203 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
203 | if ((tseg>>PMD_SHIFT) < | 204 | if ((tseg>>PMD_SHIFT) < |
204 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | 205 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
205 | ((tseg>>PMD_SHIFT) < | 206 | ((tseg>>PMD_SHIFT) < |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c index 2026d2119cdb..1d181c40e2e1 100644 --- a/arch/x86/kernel/cpu/centaur_64.c +++ b/arch/x86/kernel/cpu/centaur_64.c | |||
@@ -16,16 +16,6 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | |||
16 | 16 | ||
17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
18 | { | 18 | { |
19 | /* Cache sizes */ | ||
20 | unsigned n; | ||
21 | |||
22 | n = c->extended_cpuid_level; | ||
23 | if (n >= 0x80000008) { | ||
24 | unsigned eax = cpuid_eax(0x80000008); | ||
25 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
26 | c->x86_phys_bits = eax & 0xff; | ||
27 | } | ||
28 | |||
29 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | 19 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { |
30 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 20 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
31 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 21 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index e7bf3c2dc5fe..c6bee77ca9e6 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -98,7 +98,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
98 | 98 | ||
99 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 99 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
100 | { | 100 | { |
101 | unsigned int n, dummy, eax, ebx, ecx, edx; | 101 | unsigned int n, dummy, ebx, ecx, edx; |
102 | 102 | ||
103 | n = c->extended_cpuid_level; | 103 | n = c->extended_cpuid_level; |
104 | 104 | ||
@@ -121,11 +121,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
121 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 121 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
122 | c->x86_cache_size, ecx & 0xFF); | 122 | c->x86_cache_size, ecx & 0xFF); |
123 | } | 123 | } |
124 | if (n >= 0x80000008) { | ||
125 | cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); | ||
126 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
127 | c->x86_phys_bits = eax & 0xff; | ||
128 | } | ||
129 | } | 124 | } |
130 | 125 | ||
131 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 126 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
@@ -314,6 +309,13 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
314 | if (c->extended_cpuid_level >= 0x80000007) | 309 | if (c->extended_cpuid_level >= 0x80000007) |
315 | c->x86_power = cpuid_edx(0x80000007); | 310 | c->x86_power = cpuid_edx(0x80000007); |
316 | 311 | ||
312 | if (c->extended_cpuid_level >= 0x80000008) { | ||
313 | u32 eax = cpuid_eax(0x80000008); | ||
314 | |||
315 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
316 | c->x86_phys_bits = eax & 0xff; | ||
317 | } | ||
318 | |||
317 | /* Assume all 64-bit CPUs support 32-bit syscall */ | 319 | /* Assume all 64-bit CPUs support 32-bit syscall */ |
318 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | 320 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); |
319 | 321 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fe9224c51d37..70609efdf1da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -226,6 +226,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
226 | 226 | ||
227 | if (cpu_has_bts) | 227 | if (cpu_has_bts) |
228 | ds_init_intel(c); | 228 | ds_init_intel(c); |
229 | |||
230 | #ifdef CONFIG_X86_NUMAQ | ||
231 | numaq_tsc_disable(); | ||
232 | #endif | ||
229 | } | 233 | } |
230 | 234 | ||
231 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 235 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c index 02f773399e39..1019c58d39f0 100644 --- a/arch/x86/kernel/cpu/intel_64.c +++ b/arch/x86/kernel/cpu/intel_64.c | |||
@@ -54,9 +54,6 @@ static void __cpuinit srat_detect_node(void) | |||
54 | 54 | ||
55 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 55 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
56 | { | 56 | { |
57 | /* Cache sizes */ | ||
58 | unsigned n; | ||
59 | |||
60 | init_intel_cacheinfo(c); | 57 | init_intel_cacheinfo(c); |
61 | if (c->cpuid_level > 9) { | 58 | if (c->cpuid_level > 9) { |
62 | unsigned eax = cpuid_eax(10); | 59 | unsigned eax = cpuid_eax(10); |
@@ -78,13 +75,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
78 | if (cpu_has_bts) | 75 | if (cpu_has_bts) |
79 | ds_init_intel(c); | 76 | ds_init_intel(c); |
80 | 77 | ||
81 | n = c->extended_cpuid_level; | ||
82 | if (n >= 0x80000008) { | ||
83 | unsigned eax = cpuid_eax(0x80000008); | ||
84 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
85 | c->x86_phys_bits = eax & 0xff; | ||
86 | } | ||
87 | |||
88 | if (c->x86 == 15) | 78 | if (c->x86 == 15) |
89 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 79 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
90 | if (c->x86 == 6) | 80 | if (c->x86 == 6) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 501ca1cea27d..c4a7ec31394c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/smp_lock.h> | ||
12 | #include <linux/string.h> | 13 | #include <linux/string.h> |
13 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
14 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
@@ -363,7 +364,7 @@ static void mcheck_check_cpu(void *info) | |||
363 | 364 | ||
364 | static void mcheck_timer(struct work_struct *work) | 365 | static void mcheck_timer(struct work_struct *work) |
365 | { | 366 | { |
366 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 367 | on_each_cpu(mcheck_check_cpu, NULL, 1); |
367 | 368 | ||
368 | /* | 369 | /* |
369 | * Alert userspace if needed. If we logged an MCE, reduce the | 370 | * Alert userspace if needed. If we logged an MCE, reduce the |
@@ -532,10 +533,12 @@ static int open_exclu; /* already open exclusive? */ | |||
532 | 533 | ||
533 | static int mce_open(struct inode *inode, struct file *file) | 534 | static int mce_open(struct inode *inode, struct file *file) |
534 | { | 535 | { |
536 | lock_kernel(); | ||
535 | spin_lock(&mce_state_lock); | 537 | spin_lock(&mce_state_lock); |
536 | 538 | ||
537 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { | 539 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { |
538 | spin_unlock(&mce_state_lock); | 540 | spin_unlock(&mce_state_lock); |
541 | unlock_kernel(); | ||
539 | return -EBUSY; | 542 | return -EBUSY; |
540 | } | 543 | } |
541 | 544 | ||
@@ -544,6 +547,7 @@ static int mce_open(struct inode *inode, struct file *file) | |||
544 | open_count++; | 547 | open_count++; |
545 | 548 | ||
546 | spin_unlock(&mce_state_lock); | 549 | spin_unlock(&mce_state_lock); |
550 | unlock_kernel(); | ||
547 | 551 | ||
548 | return nonseekable_open(inode, file); | 552 | return nonseekable_open(inode, file); |
549 | } | 553 | } |
@@ -617,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
617 | * Collect entries that were still getting written before the | 621 | * Collect entries that were still getting written before the |
618 | * synchronize. | 622 | * synchronize. |
619 | */ | 623 | */ |
620 | on_each_cpu(collect_tscs, cpu_tsc, 1, 1); | 624 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
621 | for (i = next; i < MCE_LOG_LEN; i++) { | 625 | for (i = next; i < MCE_LOG_LEN; i++) { |
622 | if (mcelog.entry[i].finished && | 626 | if (mcelog.entry[i].finished && |
623 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | 627 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { |
@@ -742,7 +746,7 @@ static void mce_restart(void) | |||
742 | if (next_interval) | 746 | if (next_interval) |
743 | cancel_delayed_work(&mcheck_work); | 747 | cancel_delayed_work(&mcheck_work); |
744 | /* Timer race is harmless here */ | 748 | /* Timer race is harmless here */ |
745 | on_each_cpu(mce_init, NULL, 1, 1); | 749 | on_each_cpu(mce_init, NULL, 1); |
746 | next_interval = check_interval * HZ; | 750 | next_interval = check_interval * HZ; |
747 | if (next_interval) | 751 | if (next_interval) |
748 | schedule_delayed_work(&mcheck_work, | 752 | schedule_delayed_work(&mcheck_work, |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | |||
59 | 59 | ||
60 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
61 | { | 61 | { |
62 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1); |
63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
64 | } | 64 | } |
65 | 65 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 105afe12beb0..6f23969c8faf 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
223 | atomic_set(&data.gate,0); | 223 | atomic_set(&data.gate,0); |
224 | 224 | ||
225 | /* Start the ball rolling on other CPUs */ | 225 | /* Start the ball rolling on other CPUs */ |
226 | if (smp_call_function(ipi_handler, &data, 1, 0) != 0) | 226 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
227 | panic("mtrr: timed out waiting for other CPUs\n"); | 227 | panic("mtrr: timed out waiting for other CPUs\n"); |
228 | 228 | ||
229 | local_irq_save(flags); | 229 | local_irq_save(flags); |
@@ -1682,7 +1682,7 @@ void mtrr_ap_init(void) | |||
1682 | */ | 1682 | */ |
1683 | void mtrr_save_state(void) | 1683 | void mtrr_save_state(void) |
1684 | { | 1684 | { |
1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); | 1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
1686 | } | 1686 | } |
1687 | 1687 | ||
1688 | static int __init mtrr_init_finialize(void) | 1688 | static int __init mtrr_init_finialize(void) |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 2e9bef6e3aa3..6d4bdc02388a 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void) | |||
189 | if (atomic_read(&nmi_active) <= 0) | 189 | if (atomic_read(&nmi_active) <= 0) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | 192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); |
193 | 193 | ||
194 | if (wd_ops) | 194 | if (wd_ops) |
195 | wd_ops->unreserve(); | 195 | wd_ops->unreserve(); |
@@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void) | |||
213 | return; | 213 | return; |
214 | } | 214 | } |
215 | 215 | ||
216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | 216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); |
217 | touch_nmi_watchdog(); | 217 | touch_nmi_watchdog(); |
218 | } | 218 | } |
219 | 219 | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index daff52a62248..2de5fa2bbf77 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/smp_lock.h> | ||
36 | #include <linux/major.h> | 37 | #include <linux/major.h> |
37 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
38 | #include <linux/smp_lock.h> | 39 | #include <linux/smp_lock.h> |
@@ -95,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
95 | for (; count; count -= 16) { | 96 | for (; count; count -= 16) { |
96 | cmd.eax = pos; | 97 | cmd.eax = pos; |
97 | cmd.ecx = pos >> 32; | 98 | cmd.ecx = pos >> 32; |
98 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); | 99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
99 | if (copy_to_user(tmp, &cmd, 16)) | 100 | if (copy_to_user(tmp, &cmd, 16)) |
100 | return -EFAULT; | 101 | return -EFAULT; |
101 | tmp += 16; | 102 | tmp += 16; |
@@ -107,15 +108,23 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
107 | 108 | ||
108 | static int cpuid_open(struct inode *inode, struct file *file) | 109 | static int cpuid_open(struct inode *inode, struct file *file) |
109 | { | 110 | { |
110 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); | 111 | unsigned int cpu; |
111 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 112 | struct cpuinfo_x86 *c; |
112 | 113 | int ret = 0; | |
113 | if (cpu >= NR_CPUS || !cpu_online(cpu)) | 114 | |
114 | return -ENXIO; /* No such CPU */ | 115 | lock_kernel(); |
116 | |||
117 | cpu = iminor(file->f_path.dentry->d_inode); | ||
118 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | ||
119 | ret = -ENXIO; /* No such CPU */ | ||
120 | goto out; | ||
121 | } | ||
122 | c = &cpu_data(cpu); | ||
115 | if (c->cpuid_level < 0) | 123 | if (c->cpuid_level < 0) |
116 | return -EIO; /* CPUID not supported */ | 124 | ret = -EIO; /* CPUID not supported */ |
117 | 125 | out: | |
118 | return 0; | 126 | unlock_kernel(); |
127 | return ret; | ||
119 | } | 128 | } |
120 | 129 | ||
121 | /* | 130 | /* |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a5383ae2cbe3..28c29180b380 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1049,11 +1049,6 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
1049 | #endif | 1049 | #endif |
1050 | 1050 | ||
1051 | /* | 1051 | /* |
1052 | * Last pfn which the user wants to use. | ||
1053 | */ | ||
1054 | unsigned long __initdata end_user_pfn = MAX_ARCH_PFN; | ||
1055 | |||
1056 | /* | ||
1057 | * Find the highest page frame number we have available | 1052 | * Find the highest page frame number we have available |
1058 | */ | 1053 | */ |
1059 | static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) | 1054 | static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) |
@@ -1085,8 +1080,6 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) | |||
1085 | 1080 | ||
1086 | if (last_pfn > max_arch_pfn) | 1081 | if (last_pfn > max_arch_pfn) |
1087 | last_pfn = max_arch_pfn; | 1082 | last_pfn = max_arch_pfn; |
1088 | if (last_pfn > end_user_pfn) | ||
1089 | last_pfn = end_user_pfn; | ||
1090 | 1083 | ||
1091 | printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n", | 1084 | printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n", |
1092 | last_pfn, max_arch_pfn); | 1085 | last_pfn, max_arch_pfn); |
@@ -1131,12 +1124,6 @@ int __init e820_find_active_region(const struct e820entry *ei, | |||
1131 | if (*ei_endpfn > last_pfn) | 1124 | if (*ei_endpfn > last_pfn) |
1132 | *ei_endpfn = last_pfn; | 1125 | *ei_endpfn = last_pfn; |
1133 | 1126 | ||
1134 | /* Obey end_user_pfn to save on memmap */ | ||
1135 | if (*ei_startpfn >= end_user_pfn) | ||
1136 | return 0; | ||
1137 | if (*ei_endpfn > end_user_pfn) | ||
1138 | *ei_endpfn = end_user_pfn; | ||
1139 | |||
1140 | return 1; | 1127 | return 1; |
1141 | } | 1128 | } |
1142 | 1129 | ||
@@ -1201,7 +1188,6 @@ static int __init parse_memopt(char *p) | |||
1201 | 1188 | ||
1202 | userdef = 1; | 1189 | userdef = 1; |
1203 | mem_size = memparse(p, &p); | 1190 | mem_size = memparse(p, &p); |
1204 | end_user_pfn = mem_size>>PAGE_SHIFT; | ||
1205 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 1191 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
1206 | 1192 | ||
1207 | return 0; | 1193 | return 0; |
@@ -1245,10 +1231,9 @@ static int __init parse_memmap_opt(char *p) | |||
1245 | } else if (*p == '$') { | 1231 | } else if (*p == '$') { |
1246 | start_at = memparse(p+1, &p); | 1232 | start_at = memparse(p+1, &p); |
1247 | e820_add_region(start_at, mem_size, E820_RESERVED); | 1233 | e820_add_region(start_at, mem_size, E820_RESERVED); |
1248 | } else { | 1234 | } else |
1249 | end_user_pfn = (mem_size >> PAGE_SHIFT); | ||
1250 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 1235 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
1251 | } | 1236 | |
1252 | return *p == '\0' ? 0 : -EINVAL; | 1237 | return *p == '\0' ? 0 : -EINVAL; |
1253 | } | 1238 | } |
1254 | early_param("memmap", parse_memmap_opt); | 1239 | early_param("memmap", parse_memmap_opt); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a4665f37cfc5..a0e11c0cc872 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -120,7 +120,18 @@ static struct chipset early_qrk[] __initdata = { | |||
120 | {} | 120 | {} |
121 | }; | 121 | }; |
122 | 122 | ||
123 | static void __init check_dev_quirk(int num, int slot, int func) | 123 | /** |
124 | * check_dev_quirk - apply early quirks to a given PCI device | ||
125 | * @num: bus number | ||
126 | * @slot: slot number | ||
127 | * @func: PCI function | ||
128 | * | ||
129 | * Check the vendor & device ID against the early quirks table. | ||
130 | * | ||
131 | * If the device is single function, let early_quirks() know so we don't | ||
132 | * poke at this device again. | ||
133 | */ | ||
134 | static int __init check_dev_quirk(int num, int slot, int func) | ||
124 | { | 135 | { |
125 | u16 class; | 136 | u16 class; |
126 | u16 vendor; | 137 | u16 vendor; |
@@ -131,7 +142,7 @@ static void __init check_dev_quirk(int num, int slot, int func) | |||
131 | class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); | 142 | class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); |
132 | 143 | ||
133 | if (class == 0xffff) | 144 | if (class == 0xffff) |
134 | return; | 145 | return -1; /* no class, treat as single function */ |
135 | 146 | ||
136 | vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); | 147 | vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); |
137 | 148 | ||
@@ -154,7 +165,9 @@ static void __init check_dev_quirk(int num, int slot, int func) | |||
154 | type = read_pci_config_byte(num, slot, func, | 165 | type = read_pci_config_byte(num, slot, func, |
155 | PCI_HEADER_TYPE); | 166 | PCI_HEADER_TYPE); |
156 | if (!(type & 0x80)) | 167 | if (!(type & 0x80)) |
157 | return; | 168 | return -1; |
169 | |||
170 | return 0; | ||
158 | } | 171 | } |
159 | 172 | ||
160 | void __init early_quirks(void) | 173 | void __init early_quirks(void) |
@@ -167,6 +180,9 @@ void __init early_quirks(void) | |||
167 | /* Poor man's PCI discovery */ | 180 | /* Poor man's PCI discovery */ |
168 | for (num = 0; num < 32; num++) | 181 | for (num = 0; num < 32; num++) |
169 | for (slot = 0; slot < 32; slot++) | 182 | for (slot = 0; slot < 32; slot++) |
170 | for (func = 0; func < 8; func++) | 183 | for (func = 0; func < 8; func++) { |
171 | check_dev_quirk(num, slot, func); | 184 | /* Only probe function 0 on single fn devices */ |
185 | if (check_dev_quirk(num, slot, func)) | ||
186 | break; | ||
187 | } | ||
172 | } | 188 | } |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 643fd861b724..ff9e7350da54 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -196,7 +196,7 @@ static struct console simnow_console = { | |||
196 | static struct console *early_console = &early_vga_console; | 196 | static struct console *early_console = &early_vga_console; |
197 | static int early_console_initialized; | 197 | static int early_console_initialized; |
198 | 198 | ||
199 | void early_printk(const char *fmt, ...) | 199 | asmlinkage void early_printk(const char *fmt, ...) |
200 | { | 200 | { |
201 | char buf[512]; | 201 | char buf[512]; |
202 | int n; | 202 | int n; |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index cfe28a715434..6bc07f0f1202 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/percpu.h> | 51 | #include <asm/percpu.h> |
52 | #include <asm/dwarf2.h> | 52 | #include <asm/dwarf2.h> |
53 | #include <asm/processor-flags.h> | 53 | #include <asm/processor-flags.h> |
54 | #include <asm/ftrace.h> | ||
54 | #include <asm/irq_vectors.h> | 55 | #include <asm/irq_vectors.h> |
55 | 56 | ||
56 | /* | 57 | /* |
@@ -1111,6 +1112,77 @@ ENDPROC(xen_failsafe_callback) | |||
1111 | 1112 | ||
1112 | #endif /* CONFIG_XEN */ | 1113 | #endif /* CONFIG_XEN */ |
1113 | 1114 | ||
1115 | #ifdef CONFIG_FTRACE | ||
1116 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1117 | |||
1118 | ENTRY(mcount) | ||
1119 | pushl %eax | ||
1120 | pushl %ecx | ||
1121 | pushl %edx | ||
1122 | movl 0xc(%esp), %eax | ||
1123 | subl $MCOUNT_INSN_SIZE, %eax | ||
1124 | |||
1125 | .globl mcount_call | ||
1126 | mcount_call: | ||
1127 | call ftrace_stub | ||
1128 | |||
1129 | popl %edx | ||
1130 | popl %ecx | ||
1131 | popl %eax | ||
1132 | |||
1133 | ret | ||
1134 | END(mcount) | ||
1135 | |||
1136 | ENTRY(ftrace_caller) | ||
1137 | pushl %eax | ||
1138 | pushl %ecx | ||
1139 | pushl %edx | ||
1140 | movl 0xc(%esp), %eax | ||
1141 | movl 0x4(%ebp), %edx | ||
1142 | subl $MCOUNT_INSN_SIZE, %eax | ||
1143 | |||
1144 | .globl ftrace_call | ||
1145 | ftrace_call: | ||
1146 | call ftrace_stub | ||
1147 | |||
1148 | popl %edx | ||
1149 | popl %ecx | ||
1150 | popl %eax | ||
1151 | |||
1152 | .globl ftrace_stub | ||
1153 | ftrace_stub: | ||
1154 | ret | ||
1155 | END(ftrace_caller) | ||
1156 | |||
1157 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
1158 | |||
1159 | ENTRY(mcount) | ||
1160 | cmpl $ftrace_stub, ftrace_trace_function | ||
1161 | jnz trace | ||
1162 | .globl ftrace_stub | ||
1163 | ftrace_stub: | ||
1164 | ret | ||
1165 | |||
1166 | /* taken from glibc */ | ||
1167 | trace: | ||
1168 | pushl %eax | ||
1169 | pushl %ecx | ||
1170 | pushl %edx | ||
1171 | movl 0xc(%esp), %eax | ||
1172 | movl 0x4(%ebp), %edx | ||
1173 | subl $MCOUNT_INSN_SIZE, %eax | ||
1174 | |||
1175 | call *ftrace_trace_function | ||
1176 | |||
1177 | popl %edx | ||
1178 | popl %ecx | ||
1179 | popl %eax | ||
1180 | |||
1181 | jmp ftrace_stub | ||
1182 | END(mcount) | ||
1183 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
1184 | #endif /* CONFIG_FTRACE */ | ||
1185 | |||
1114 | .section .rodata,"a" | 1186 | .section .rodata,"a" |
1115 | #include "syscall_table_32.S" | 1187 | #include "syscall_table_32.S" |
1116 | 1188 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index bb4e22f4892f..ae63e584c340 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -51,9 +51,115 @@ | |||
51 | #include <asm/page.h> | 51 | #include <asm/page.h> |
52 | #include <asm/irqflags.h> | 52 | #include <asm/irqflags.h> |
53 | #include <asm/paravirt.h> | 53 | #include <asm/paravirt.h> |
54 | #include <asm/ftrace.h> | ||
54 | 55 | ||
55 | .code64 | 56 | .code64 |
56 | 57 | ||
58 | #ifdef CONFIG_FTRACE | ||
59 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
60 | ENTRY(mcount) | ||
61 | |||
62 | subq $0x38, %rsp | ||
63 | movq %rax, (%rsp) | ||
64 | movq %rcx, 8(%rsp) | ||
65 | movq %rdx, 16(%rsp) | ||
66 | movq %rsi, 24(%rsp) | ||
67 | movq %rdi, 32(%rsp) | ||
68 | movq %r8, 40(%rsp) | ||
69 | movq %r9, 48(%rsp) | ||
70 | |||
71 | movq 0x38(%rsp), %rdi | ||
72 | subq $MCOUNT_INSN_SIZE, %rdi | ||
73 | |||
74 | .globl mcount_call | ||
75 | mcount_call: | ||
76 | call ftrace_stub | ||
77 | |||
78 | movq 48(%rsp), %r9 | ||
79 | movq 40(%rsp), %r8 | ||
80 | movq 32(%rsp), %rdi | ||
81 | movq 24(%rsp), %rsi | ||
82 | movq 16(%rsp), %rdx | ||
83 | movq 8(%rsp), %rcx | ||
84 | movq (%rsp), %rax | ||
85 | addq $0x38, %rsp | ||
86 | |||
87 | retq | ||
88 | END(mcount) | ||
89 | |||
90 | ENTRY(ftrace_caller) | ||
91 | |||
92 | /* taken from glibc */ | ||
93 | subq $0x38, %rsp | ||
94 | movq %rax, (%rsp) | ||
95 | movq %rcx, 8(%rsp) | ||
96 | movq %rdx, 16(%rsp) | ||
97 | movq %rsi, 24(%rsp) | ||
98 | movq %rdi, 32(%rsp) | ||
99 | movq %r8, 40(%rsp) | ||
100 | movq %r9, 48(%rsp) | ||
101 | |||
102 | movq 0x38(%rsp), %rdi | ||
103 | movq 8(%rbp), %rsi | ||
104 | subq $MCOUNT_INSN_SIZE, %rdi | ||
105 | |||
106 | .globl ftrace_call | ||
107 | ftrace_call: | ||
108 | call ftrace_stub | ||
109 | |||
110 | movq 48(%rsp), %r9 | ||
111 | movq 40(%rsp), %r8 | ||
112 | movq 32(%rsp), %rdi | ||
113 | movq 24(%rsp), %rsi | ||
114 | movq 16(%rsp), %rdx | ||
115 | movq 8(%rsp), %rcx | ||
116 | movq (%rsp), %rax | ||
117 | addq $0x38, %rsp | ||
118 | |||
119 | .globl ftrace_stub | ||
120 | ftrace_stub: | ||
121 | retq | ||
122 | END(ftrace_caller) | ||
123 | |||
124 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
125 | ENTRY(mcount) | ||
126 | cmpq $ftrace_stub, ftrace_trace_function | ||
127 | jnz trace | ||
128 | .globl ftrace_stub | ||
129 | ftrace_stub: | ||
130 | retq | ||
131 | |||
132 | trace: | ||
133 | /* taken from glibc */ | ||
134 | subq $0x38, %rsp | ||
135 | movq %rax, (%rsp) | ||
136 | movq %rcx, 8(%rsp) | ||
137 | movq %rdx, 16(%rsp) | ||
138 | movq %rsi, 24(%rsp) | ||
139 | movq %rdi, 32(%rsp) | ||
140 | movq %r8, 40(%rsp) | ||
141 | movq %r9, 48(%rsp) | ||
142 | |||
143 | movq 0x38(%rsp), %rdi | ||
144 | movq 8(%rbp), %rsi | ||
145 | subq $MCOUNT_INSN_SIZE, %rdi | ||
146 | |||
147 | call *ftrace_trace_function | ||
148 | |||
149 | movq 48(%rsp), %r9 | ||
150 | movq 40(%rsp), %r8 | ||
151 | movq 32(%rsp), %rdi | ||
152 | movq 24(%rsp), %rsi | ||
153 | movq 16(%rsp), %rdx | ||
154 | movq 8(%rsp), %rcx | ||
155 | movq (%rsp), %rax | ||
156 | addq $0x38, %rsp | ||
157 | |||
158 | jmp ftrace_stub | ||
159 | END(mcount) | ||
160 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
161 | #endif /* CONFIG_FTRACE */ | ||
162 | |||
57 | #ifndef CONFIG_PREEMPT | 163 | #ifndef CONFIG_PREEMPT |
58 | #define retint_kernel retint_restore_args | 164 | #define retint_kernel retint_restore_args |
59 | #endif | 165 | #endif |
@@ -710,6 +816,9 @@ END(invalidate_interrupt\num) | |||
710 | ENTRY(call_function_interrupt) | 816 | ENTRY(call_function_interrupt) |
711 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | 817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt |
712 | END(call_function_interrupt) | 818 | END(call_function_interrupt) |
819 | ENTRY(call_function_single_interrupt) | ||
820 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt | ||
821 | END(call_function_single_interrupt) | ||
713 | ENTRY(irq_move_cleanup_interrupt) | 822 | ENTRY(irq_move_cleanup_interrupt) |
714 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | 823 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt |
715 | END(irq_move_cleanup_interrupt) | 824 | END(irq_move_cleanup_interrupt) |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c new file mode 100644 index 000000000000..ab115cd15fdf --- /dev/null +++ b/arch/x86/kernel/ftrace.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Code for replacing ftrace calls with jumps. | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | ||
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | ||
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | ||
9 | * the dangers of modifying code on the run. | ||
10 | */ | ||
11 | |||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/hardirq.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/list.h> | ||
18 | |||
19 | #include <asm/alternative.h> | ||
20 | #include <asm/ftrace.h> | ||
21 | |||
22 | |||
23 | /* Long is fine, even if it is only 4 bytes ;-) */ | ||
24 | static long *ftrace_nop; | ||
25 | |||
26 | union ftrace_code_union { | ||
27 | char code[MCOUNT_INSN_SIZE]; | ||
28 | struct { | ||
29 | char e8; | ||
30 | int offset; | ||
31 | } __attribute__((packed)); | ||
32 | }; | ||
33 | |||
34 | |||
35 | static int notrace ftrace_calc_offset(long ip, long addr) | ||
36 | { | ||
37 | return (int)(addr - ip); | ||
38 | } | ||
39 | |||
40 | notrace unsigned char *ftrace_nop_replace(void) | ||
41 | { | ||
42 | return (char *)ftrace_nop; | ||
43 | } | ||
44 | |||
45 | notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
46 | { | ||
47 | static union ftrace_code_union calc; | ||
48 | |||
49 | calc.e8 = 0xe8; | ||
50 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); | ||
51 | |||
52 | /* | ||
53 | * No locking needed, this must be called via kstop_machine | ||
54 | * which in essence is like running on a uniprocessor machine. | ||
55 | */ | ||
56 | return calc.code; | ||
57 | } | ||
58 | |||
59 | notrace int | ||
60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | ||
61 | unsigned char *new_code) | ||
62 | { | ||
63 | unsigned replaced; | ||
64 | unsigned old = *(unsigned *)old_code; /* 4 bytes */ | ||
65 | unsigned new = *(unsigned *)new_code; /* 4 bytes */ | ||
66 | unsigned char newch = new_code[4]; | ||
67 | int faulted = 0; | ||
68 | |||
69 | /* | ||
70 | * Note: Due to modules and __init, code can | ||
71 | * disappear and change, we need to protect against faulting | ||
72 | * as well as code changing. | ||
73 | * | ||
74 | * No real locking needed, this code is run through | ||
75 | * kstop_machine. | ||
76 | */ | ||
77 | asm volatile ( | ||
78 | "1: lock\n" | ||
79 | " cmpxchg %3, (%2)\n" | ||
80 | " jnz 2f\n" | ||
81 | " movb %b4, 4(%2)\n" | ||
82 | "2:\n" | ||
83 | ".section .fixup, \"ax\"\n" | ||
84 | "3: movl $1, %0\n" | ||
85 | " jmp 2b\n" | ||
86 | ".previous\n" | ||
87 | _ASM_EXTABLE(1b, 3b) | ||
88 | : "=r"(faulted), "=a"(replaced) | ||
89 | : "r"(ip), "r"(new), "c"(newch), | ||
90 | "0"(faulted), "a"(old) | ||
91 | : "memory"); | ||
92 | sync_core(); | ||
93 | |||
94 | if (replaced != old && replaced != new) | ||
95 | faulted = 2; | ||
96 | |||
97 | return faulted; | ||
98 | } | ||
99 | |||
100 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) | ||
101 | { | ||
102 | unsigned long ip = (unsigned long)(&ftrace_call); | ||
103 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
104 | int ret; | ||
105 | |||
106 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | ||
107 | new = ftrace_call_replace(ip, (unsigned long)func); | ||
108 | ret = ftrace_modify_code(ip, old, new); | ||
109 | |||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | notrace int ftrace_mcount_set(unsigned long *data) | ||
114 | { | ||
115 | unsigned long ip = (long)(&mcount_call); | ||
116 | unsigned long *addr = data; | ||
117 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
118 | |||
119 | /* | ||
120 | * Replace the mcount stub with a pointer to the | ||
121 | * ip recorder function. | ||
122 | */ | ||
123 | memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); | ||
124 | new = ftrace_call_replace(ip, *addr); | ||
125 | *addr = ftrace_modify_code(ip, old, new); | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | int __init ftrace_dyn_arch_init(void *data) | ||
131 | { | ||
132 | const unsigned char *const *noptable = find_nop_table(); | ||
133 | |||
134 | /* This is running in kstop_machine */ | ||
135 | |||
136 | ftrace_mcount_set(data); | ||
137 | |||
138 | ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; | ||
139 | |||
140 | return 0; | ||
141 | } | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ea230ec69057..0ea6a19bfdfe 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -36,26 +36,15 @@ static inline void hpet_writel(unsigned long d, unsigned long a) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | #ifdef CONFIG_X86_64 | 38 | #ifdef CONFIG_X86_64 |
39 | |||
40 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
41 | 40 | #endif | |
42 | static inline void hpet_set_mapping(void) | ||
43 | { | ||
44 | set_fixmap_nocache(FIX_HPET_BASE, hpet_address); | ||
45 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | ||
46 | hpet_virt_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE); | ||
47 | } | ||
48 | |||
49 | static inline void hpet_clear_mapping(void) | ||
50 | { | ||
51 | hpet_virt_address = NULL; | ||
52 | } | ||
53 | |||
54 | #else | ||
55 | 41 | ||
56 | static inline void hpet_set_mapping(void) | 42 | static inline void hpet_set_mapping(void) |
57 | { | 43 | { |
58 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | 44 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); |
45 | #ifdef CONFIG_X86_64 | ||
46 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | ||
47 | #endif | ||
59 | } | 48 | } |
60 | 49 | ||
61 | static inline void hpet_clear_mapping(void) | 50 | static inline void hpet_clear_mapping(void) |
@@ -63,7 +52,6 @@ static inline void hpet_clear_mapping(void) | |||
63 | iounmap(hpet_virt_address); | 52 | iounmap(hpet_virt_address); |
64 | hpet_virt_address = NULL; | 53 | hpet_virt_address = NULL; |
65 | } | 54 | } |
66 | #endif | ||
67 | 55 | ||
68 | /* | 56 | /* |
69 | * HPET command line enable / disable | 57 | * HPET command line enable / disable |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index deb43785e923..dd7ebee446af 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -1,7 +1,14 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | |||
2 | #include <asm/checksum.h> | 3 | #include <asm/checksum.h> |
3 | #include <asm/desc.h> | ||
4 | #include <asm/pgtable.h> | 4 | #include <asm/pgtable.h> |
5 | #include <asm/desc.h> | ||
6 | #include <asm/ftrace.h> | ||
7 | |||
8 | #ifdef CONFIG_FTRACE | ||
9 | /* mcount is defined in assembly */ | ||
10 | EXPORT_SYMBOL(mcount); | ||
11 | #endif | ||
5 | 12 | ||
6 | /* Networking helper routines. */ | 13 | /* Networking helper routines. */ |
7 | EXPORT_SYMBOL(csum_partial_copy_generic); | 14 | EXPORT_SYMBOL(csum_partial_copy_generic); |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 382208d11f8d..a82065b0699e 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -59,13 +59,6 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | |||
59 | static DEFINE_SPINLOCK(ioapic_lock); | 59 | static DEFINE_SPINLOCK(ioapic_lock); |
60 | static DEFINE_SPINLOCK(vector_lock); | 60 | static DEFINE_SPINLOCK(vector_lock); |
61 | 61 | ||
62 | static bool mask_ioapic_irq_2 __initdata; | ||
63 | |||
64 | void __init force_mask_ioapic_irq_2(void) | ||
65 | { | ||
66 | mask_ioapic_irq_2 = true; | ||
67 | } | ||
68 | |||
69 | int timer_through_8259 __initdata; | 62 | int timer_through_8259 __initdata; |
70 | 63 | ||
71 | /* | 64 | /* |
@@ -1576,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1576 | 1569 | ||
1577 | void print_all_local_APICs(void) | 1570 | void print_all_local_APICs(void) |
1578 | { | 1571 | { |
1579 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1572 | on_each_cpu(print_local_APIC, NULL, 1); |
1580 | } | 1573 | } |
1581 | 1574 | ||
1582 | void /*__init*/ print_PIC(void) | 1575 | void /*__init*/ print_PIC(void) |
@@ -2186,9 +2179,6 @@ static inline void __init check_timer(void) | |||
2186 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 2179 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
2187 | vector, apic1, pin1, apic2, pin2); | 2180 | vector, apic1, pin1, apic2, pin2); |
2188 | 2181 | ||
2189 | if (mask_ioapic_irq_2) | ||
2190 | mask_IO_APIC_irq(2); | ||
2191 | |||
2192 | /* | 2182 | /* |
2193 | * Some BIOS writers are clueless and report the ExtINTA | 2183 | * Some BIOS writers are clueless and report the ExtINTA |
2194 | * I/O APIC input from the cascaded 8259A as the timer | 2184 | * I/O APIC input from the cascaded 8259A as the timer |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 2db0f98e2af5..39f0be37e9a1 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -96,13 +96,6 @@ static int no_timer_check; | |||
96 | 96 | ||
97 | static int disable_timer_pin_1 __initdata; | 97 | static int disable_timer_pin_1 __initdata; |
98 | 98 | ||
99 | static bool mask_ioapic_irq_2 __initdata; | ||
100 | |||
101 | void __init force_mask_ioapic_irq_2(void) | ||
102 | { | ||
103 | mask_ioapic_irq_2 = true; | ||
104 | } | ||
105 | |||
106 | int timer_through_8259 __initdata; | 99 | int timer_through_8259 __initdata; |
107 | 100 | ||
108 | /* Where if anywhere is the i8259 connect in external int mode */ | 101 | /* Where if anywhere is the i8259 connect in external int mode */ |
@@ -1314,7 +1307,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1314 | 1307 | ||
1315 | void print_all_local_APICs (void) | 1308 | void print_all_local_APICs (void) |
1316 | { | 1309 | { |
1317 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1310 | on_each_cpu(print_local_APIC, NULL, 1); |
1318 | } | 1311 | } |
1319 | 1312 | ||
1320 | void __apicdebuginit print_PIC(void) | 1313 | void __apicdebuginit print_PIC(void) |
@@ -2020,9 +2013,6 @@ static inline void __init check_timer(void) | |||
2020 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 2013 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
2021 | cfg->vector, apic1, pin1, apic2, pin2); | 2014 | cfg->vector, apic1, pin1, apic2, pin2); |
2022 | 2015 | ||
2023 | if (mask_ioapic_irq_2) | ||
2024 | mask_IO_APIC_irq(2); | ||
2025 | |||
2026 | /* | 2016 | /* |
2027 | * Some BIOS writers are clueless and report the ExtINTA | 2017 | * Some BIOS writers are clueless and report the ExtINTA |
2028 | * I/O APIC input from the cascaded 8259A as the timer | 2018 | * I/O APIC input from the cascaded 8259A as the timer |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31f49e8f46a7..0373e88de95a 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -199,6 +199,10 @@ void __init native_init_IRQ(void) | |||
199 | /* IPI for generic function call */ | 199 | /* IPI for generic function call */ |
200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
201 | 201 | ||
202 | /* IPI for generic single function call */ | ||
203 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
204 | call_function_single_interrupt); | ||
205 | |||
202 | /* Low priority IPI to cleanup after moving an irq */ | 206 | /* Low priority IPI to cleanup after moving an irq */ |
203 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 207 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
204 | #endif | 208 | #endif |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 21f2bae98c15..a8449571858a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
68 | load_LDT(pc); | 68 | load_LDT(pc); |
69 | mask = cpumask_of_cpu(smp_processor_id()); | 69 | mask = cpumask_of_cpu(smp_processor_id()); |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | 70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) |
71 | smp_call_function(flush_ldt, current->mm, 1, 1); | 71 | smp_call_function(flush_ldt, current->mm, 1); |
72 | preempt_enable(); | 72 | preempt_enable(); |
73 | #else | 73 | #else |
74 | load_LDT(pc); | 74 | load_LDT(pc); |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index f4960171bc66..8864230d55af 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | ||
15 | |||
14 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
15 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
16 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
@@ -107,6 +109,8 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
107 | unsigned long page_list[PAGES_NR]; | 109 | unsigned long page_list[PAGES_NR]; |
108 | void *control_page; | 110 | void *control_page; |
109 | 111 | ||
112 | tracer_disable(); | ||
113 | |||
110 | /* Interrupts aren't acceptable while we reboot */ | 114 | /* Interrupts aren't acceptable while we reboot */ |
111 | local_irq_disable(); | 115 | local_irq_disable(); |
112 | 116 | ||
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 7830dc4a8380..9dd9262693a3 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/reboot.h> | 12 | #include <linux/reboot.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | ||
15 | |||
14 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
15 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
16 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
@@ -184,6 +186,8 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
184 | unsigned long page_list[PAGES_NR]; | 186 | unsigned long page_list[PAGES_NR]; |
185 | void *control_page; | 187 | void *control_page; |
186 | 188 | ||
189 | tracer_disable(); | ||
190 | |||
187 | /* Interrupts aren't acceptable while we reboot */ | 191 | /* Interrupts aren't acceptable while we reboot */ |
188 | local_irq_disable(); | 192 | local_irq_disable(); |
189 | 193 | ||
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 9758fea87c5b..56b933119a04 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -76,6 +76,7 @@ | |||
76 | #include <linux/kernel.h> | 76 | #include <linux/kernel.h> |
77 | #include <linux/init.h> | 77 | #include <linux/init.h> |
78 | #include <linux/sched.h> | 78 | #include <linux/sched.h> |
79 | #include <linux/smp_lock.h> | ||
79 | #include <linux/cpumask.h> | 80 | #include <linux/cpumask.h> |
80 | #include <linux/module.h> | 81 | #include <linux/module.h> |
81 | #include <linux/slab.h> | 82 | #include <linux/slab.h> |
@@ -423,6 +424,7 @@ out: | |||
423 | 424 | ||
424 | static int microcode_open (struct inode *unused1, struct file *unused2) | 425 | static int microcode_open (struct inode *unused1, struct file *unused2) |
425 | { | 426 | { |
427 | cycle_kernel_lock(); | ||
426 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | 428 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
427 | } | 429 | } |
428 | 430 | ||
@@ -489,7 +491,7 @@ MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | |||
489 | #define microcode_dev_exit() do { } while(0) | 491 | #define microcode_dev_exit() do { } while(0) |
490 | #endif | 492 | #endif |
491 | 493 | ||
492 | static long get_next_ucode_from_buffer(void **mc, void *buf, | 494 | static long get_next_ucode_from_buffer(void **mc, const u8 *buf, |
493 | unsigned long size, long offset) | 495 | unsigned long size, long offset) |
494 | { | 496 | { |
495 | microcode_header_t *mc_header; | 497 | microcode_header_t *mc_header; |
@@ -523,7 +525,7 @@ static int cpu_request_microcode(int cpu) | |||
523 | char name[30]; | 525 | char name[30]; |
524 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 526 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
525 | const struct firmware *firmware; | 527 | const struct firmware *firmware; |
526 | void *buf; | 528 | const u8 *buf; |
527 | unsigned long size; | 529 | unsigned long size; |
528 | long offset = 0; | 530 | long offset = 0; |
529 | int error; | 531 | int error; |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 1f3abe048e93..a153b3905f60 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -117,12 +117,20 @@ static int msr_open(struct inode *inode, struct file *file) | |||
117 | { | 117 | { |
118 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); | 118 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); |
119 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 119 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
120 | int ret = 0; | ||
120 | 121 | ||
121 | if (cpu >= NR_CPUS || !cpu_online(cpu)) | 122 | lock_kernel(); |
122 | return -ENXIO; /* No such CPU */ | 123 | cpu = iminor(file->f_path.dentry->d_inode); |
123 | if (!cpu_has(c, X86_FEATURE_MSR)) | ||
124 | return -EIO; /* MSR not supported */ | ||
125 | 124 | ||
125 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | ||
126 | ret = -ENXIO; /* No such CPU */ | ||
127 | goto out; | ||
128 | } | ||
129 | c = &cpu_data(cpu); | ||
130 | if (!cpu_has(c, X86_FEATURE_MSR)) | ||
131 | ret = -EIO; /* MSR not supported */ | ||
132 | out: | ||
133 | unlock_kernel(); | ||
126 | return 0; | 134 | return 0; |
127 | } | 135 | } |
128 | 136 | ||
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 716b89284be0..ec024b3baad0 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void) | |||
130 | 130 | ||
131 | #ifdef CONFIG_SMP | 131 | #ifdef CONFIG_SMP |
132 | if (nmi_watchdog == NMI_LOCAL_APIC) | 132 | if (nmi_watchdog == NMI_LOCAL_APIC) |
133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for_each_possible_cpu(cpu) | 136 | for_each_possible_cpu(cpu) |
@@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused) | |||
272 | void acpi_nmi_enable(void) | 272 | void acpi_nmi_enable(void) |
273 | { | 273 | { |
274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
275 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | 275 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void __acpi_nmi_disable(void *__unused) | 278 | static void __acpi_nmi_disable(void *__unused) |
@@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused) | |||
286 | void acpi_nmi_disable(void) | 286 | void acpi_nmi_disable(void) |
287 | { | 287 | { |
288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
289 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 289 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
290 | } | 290 | } |
291 | 291 | ||
292 | void setup_apic_nmi_watchdog(void *unused) | 292 | void setup_apic_nmi_watchdog(void *unused) |
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index f0f1de1c4a1d..a23e8233b9ac 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c | |||
@@ -93,12 +93,13 @@ int __init get_memcfg_numaq(void) | |||
93 | return 1; | 93 | return 1; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int __init numaq_tsc_disable(void) | 96 | void __init numaq_tsc_disable(void) |
97 | { | 97 | { |
98 | if (!found_numaq) | ||
99 | return; | ||
100 | |||
98 | if (num_online_nodes() > 1) { | 101 | if (num_online_nodes() > 1) { |
99 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); | 102 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); |
100 | setup_clear_cpu_cap(X86_FEATURE_TSC); | 103 | setup_clear_cpu_cap(X86_FEATURE_TSC); |
101 | } | 104 | } |
102 | return 0; | ||
103 | } | 105 | } |
104 | arch_initcall(numaq_tsc_disable); | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d0d18db5d2a4..c3fe78406d18 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -630,6 +630,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
630 | struct pci_dev *dev; | 630 | struct pci_dev *dev; |
631 | void *gatt; | 631 | void *gatt; |
632 | int i, error; | 632 | int i, error; |
633 | unsigned long start_pfn, end_pfn; | ||
633 | 634 | ||
634 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 635 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
635 | aper_size = aper_base = info->aper_size = 0; | 636 | aper_size = aper_base = info->aper_size = 0; |
@@ -674,6 +675,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
674 | 675 | ||
675 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 676 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
676 | aper_base, aper_size>>10); | 677 | aper_base, aper_size>>10); |
678 | |||
679 | /* need to map that range */ | ||
680 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
681 | if (end_pfn > max_low_pfn_mapped) { | ||
682 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
683 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
684 | } | ||
677 | return 0; | 685 | return 0; |
678 | 686 | ||
679 | nommu: | 687 | nommu: |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4061d63aabe7..4d629c62f4f8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -7,6 +7,12 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/pm.h> | 8 | #include <linux/pm.h> |
9 | #include <linux/clockchips.h> | 9 | #include <linux/clockchips.h> |
10 | #include <asm/system.h> | ||
11 | |||
12 | unsigned long idle_halt; | ||
13 | EXPORT_SYMBOL(idle_halt); | ||
14 | unsigned long idle_nomwait; | ||
15 | EXPORT_SYMBOL(idle_nomwait); | ||
10 | 16 | ||
11 | struct kmem_cache *task_xstate_cachep; | 17 | struct kmem_cache *task_xstate_cachep; |
12 | 18 | ||
@@ -132,7 +138,7 @@ void cpu_idle_wait(void) | |||
132 | { | 138 | { |
133 | smp_mb(); | 139 | smp_mb(); |
134 | /* kick all the CPUs so that they exit out of pm_idle */ | 140 | /* kick all the CPUs so that they exit out of pm_idle */ |
135 | smp_call_function(do_nothing, NULL, 0, 1); | 141 | smp_call_function(do_nothing, NULL, 1); |
136 | } | 142 | } |
137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 143 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
138 | 144 | ||
@@ -325,7 +331,27 @@ static int __init idle_setup(char *str) | |||
325 | pm_idle = poll_idle; | 331 | pm_idle = poll_idle; |
326 | } else if (!strcmp(str, "mwait")) | 332 | } else if (!strcmp(str, "mwait")) |
327 | force_mwait = 1; | 333 | force_mwait = 1; |
328 | else | 334 | else if (!strcmp(str, "halt")) { |
335 | /* | ||
336 | * When the boot option of idle=halt is added, halt is | ||
337 | * forced to be used for CPU idle. In such case CPU C2/C3 | ||
338 | * won't be used again. | ||
339 | * To continue to load the CPU idle driver, don't touch | ||
340 | * the boot_option_idle_override. | ||
341 | */ | ||
342 | pm_idle = default_idle; | ||
343 | idle_halt = 1; | ||
344 | return 0; | ||
345 | } else if (!strcmp(str, "nomwait")) { | ||
346 | /* | ||
347 | * If the boot option of "idle=nomwait" is added, | ||
348 | * it means that mwait will be disabled for CPU C2/C3 | ||
349 | * states. In such case it won't touch the variable | ||
350 | * of boot_option_idle_override. | ||
351 | */ | ||
352 | idle_nomwait = 1; | ||
353 | return 0; | ||
354 | } else | ||
329 | return -1; | 355 | return -1; |
330 | 356 | ||
331 | boot_option_idle_override = 1; | 357 | boot_option_idle_override = 1; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 9a139f6c9df3..0c3927accb00 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -142,7 +142,10 @@ void cpu_idle(void) | |||
142 | 142 | ||
143 | local_irq_disable(); | 143 | local_irq_disable(); |
144 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 144 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
145 | /* Don't trace irqs off for idle */ | ||
146 | stop_critical_timings(); | ||
145 | pm_idle(); | 147 | pm_idle(); |
148 | start_critical_timings(); | ||
146 | } | 149 | } |
147 | tick_nohz_restart_sched_tick(); | 150 | tick_nohz_restart_sched_tick(); |
148 | preempt_enable_no_resched(); | 151 | preempt_enable_no_resched(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index db5eb963e4df..a8e53626ac9a 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -134,7 +134,10 @@ void cpu_idle(void) | |||
134 | */ | 134 | */ |
135 | local_irq_disable(); | 135 | local_irq_disable(); |
136 | enter_idle(); | 136 | enter_idle(); |
137 | /* Don't trace irqs off for idle */ | ||
138 | stop_critical_timings(); | ||
137 | pm_idle(); | 139 | pm_idle(); |
140 | start_critical_timings(); | ||
138 | /* In many cases the interrupt that ended idle | 141 | /* In many cases the interrupt that ended idle |
139 | has already called exit_idle. But some idle | 142 | has already called exit_idle. But some idle |
140 | loops can be woken up without interrupt. */ | 143 | loops can be woken up without interrupt. */ |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 79bdcd11c66e..d13858818100 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -266,6 +266,8 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) | |||
266 | hpet_print_force_info(); | 266 | hpet_print_force_info(); |
267 | } | 267 | } |
268 | 268 | ||
269 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, | ||
270 | old_ich_force_enable_hpet_user); | ||
269 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, | 271 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, |
270 | old_ich_force_enable_hpet_user); | 272 | old_ich_force_enable_hpet_user); |
271 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, | 273 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 2e78a143dec3..6121ffd46b9e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -684,6 +684,11 @@ void __init setup_arch(char **cmdline_p) | |||
684 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | 684 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
685 | } | 685 | } |
686 | 686 | ||
687 | #ifdef CONFIG_PCI | ||
688 | if (pci_early_dump_regs) | ||
689 | early_dump_pci_devices(); | ||
690 | #endif | ||
691 | |||
687 | finish_e820_parsing(); | 692 | finish_e820_parsing(); |
688 | 693 | ||
689 | #ifdef CONFIG_X86_32 | 694 | #ifdef CONFIG_X86_32 |
@@ -851,6 +856,14 @@ void __init setup_arch(char **cmdline_p) | |||
851 | init_cpu_to_node(); | 856 | init_cpu_to_node(); |
852 | #endif | 857 | #endif |
853 | 858 | ||
859 | #ifdef CONFIG_X86_NUMAQ | ||
860 | /* | ||
861 | * need to check online nodes num, call it | ||
862 | * here before time_init/tsc_init | ||
863 | */ | ||
864 | numaq_tsc_disable(); | ||
865 | #endif | ||
866 | |||
854 | init_apic_mappings(); | 867 | init_apic_mappings(); |
855 | ioapic_init_mappings(); | 868 | ioapic_init_mappings(); |
856 | 869 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 5fc310f746fc..cac68430d31f 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -343,23 +343,23 @@ static const cpumask_t cpu_mask_none; | |||
343 | /* | 343 | /* |
344 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | 344 | * Returns a pointer to the bitmask of CPUs on Node 'node'. |
345 | */ | 345 | */ |
346 | cpumask_t *_node_to_cpumask_ptr(int node) | 346 | const cpumask_t *_node_to_cpumask_ptr(int node) |
347 | { | 347 | { |
348 | if (node_to_cpumask_map == NULL) { | 348 | if (node_to_cpumask_map == NULL) { |
349 | printk(KERN_WARNING | 349 | printk(KERN_WARNING |
350 | "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", | 350 | "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", |
351 | node); | 351 | node); |
352 | dump_stack(); | 352 | dump_stack(); |
353 | return &cpu_online_map; | 353 | return (const cpumask_t *)&cpu_online_map; |
354 | } | 354 | } |
355 | if (node >= nr_node_ids) { | 355 | if (node >= nr_node_ids) { |
356 | printk(KERN_WARNING | 356 | printk(KERN_WARNING |
357 | "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", | 357 | "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", |
358 | node, nr_node_ids); | 358 | node, nr_node_ids); |
359 | dump_stack(); | 359 | dump_stack(); |
360 | return (cpumask_t *)&cpu_mask_none; | 360 | return &cpu_mask_none; |
361 | } | 361 | } |
362 | return (cpumask_t *)&node_to_cpumask_map[node]; | 362 | return &node_to_cpumask_map[node]; |
363 | } | 363 | } |
364 | EXPORT_SYMBOL(_node_to_cpumask_ptr); | 364 | EXPORT_SYMBOL(_node_to_cpumask_ptr); |
365 | 365 | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aadc87cd..361b7a4c640c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) | |||
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | void native_send_call_func_single_ipi(int cpu) |
125 | * Structure and data for smp_call_function(). This is designed to minimise | ||
126 | * static memory requirements. It also looks cleaner. | ||
127 | */ | ||
128 | static DEFINE_SPINLOCK(call_lock); | ||
129 | |||
130 | struct call_data_struct { | ||
131 | void (*func) (void *info); | ||
132 | void *info; | ||
133 | atomic_t started; | ||
134 | atomic_t finished; | ||
135 | int wait; | ||
136 | }; | ||
137 | |||
138 | void lock_ipi_call_lock(void) | ||
139 | { | 125 | { |
140 | spin_lock_irq(&call_lock); | 126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
141 | } | ||
142 | |||
143 | void unlock_ipi_call_lock(void) | ||
144 | { | ||
145 | spin_unlock_irq(&call_lock); | ||
146 | } | ||
147 | |||
148 | static struct call_data_struct *call_data; | ||
149 | |||
150 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
151 | int nonatomic, int wait) | ||
152 | { | ||
153 | struct call_data_struct data; | ||
154 | int cpus = num_online_cpus() - 1; | ||
155 | |||
156 | if (!cpus) | ||
157 | return; | ||
158 | |||
159 | data.func = func; | ||
160 | data.info = info; | ||
161 | atomic_set(&data.started, 0); | ||
162 | data.wait = wait; | ||
163 | if (wait) | ||
164 | atomic_set(&data.finished, 0); | ||
165 | |||
166 | call_data = &data; | ||
167 | mb(); | ||
168 | |||
169 | /* Send a message to all other CPUs and wait for them to respond */ | ||
170 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
171 | |||
172 | /* Wait for response */ | ||
173 | while (atomic_read(&data.started) != cpus) | ||
174 | cpu_relax(); | ||
175 | |||
176 | if (wait) | ||
177 | while (atomic_read(&data.finished) != cpus) | ||
178 | cpu_relax(); | ||
179 | } | 127 | } |
180 | 128 | ||
181 | 129 | void native_send_call_func_ipi(cpumask_t mask) | |
182 | /** | ||
183 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
184 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
185 | * @func: The function to run. This must be fast and non-blocking. | ||
186 | * @info: An arbitrary pointer to pass to the function. | ||
187 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
188 | * | ||
189 | * Returns 0 on success, else a negative status code. | ||
190 | * | ||
191 | * If @wait is true, then returns once @func has returned; otherwise | ||
192 | * it returns just before the target cpu calls @func. | ||
193 | * | ||
194 | * You must not call this function with disabled interrupts or from a | ||
195 | * hardware interrupt handler or from a bottom half handler. | ||
196 | */ | ||
197 | static int | ||
198 | native_smp_call_function_mask(cpumask_t mask, | ||
199 | void (*func)(void *), void *info, | ||
200 | int wait) | ||
201 | { | 130 | { |
202 | struct call_data_struct data; | ||
203 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
204 | int cpus; | ||
205 | |||
206 | /* Can deadlock when called with interrupts disabled */ | ||
207 | WARN_ON(irqs_disabled()); | ||
208 | |||
209 | /* Holding any lock stops cpus from going down. */ | ||
210 | spin_lock(&call_lock); | ||
211 | 132 | ||
212 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
213 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
214 | 135 | ||
215 | cpus_and(mask, mask, allbutself); | ||
216 | cpus = cpus_weight(mask); | ||
217 | |||
218 | if (!cpus) { | ||
219 | spin_unlock(&call_lock); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | data.func = func; | ||
224 | data.info = info; | ||
225 | atomic_set(&data.started, 0); | ||
226 | data.wait = wait; | ||
227 | if (wait) | ||
228 | atomic_set(&data.finished, 0); | ||
229 | |||
230 | call_data = &data; | ||
231 | wmb(); | ||
232 | |||
233 | /* Send a message to other CPUs */ | ||
234 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(mask, allbutself) && |
235 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
236 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
237 | else | 139 | else |
238 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | 140 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
239 | |||
240 | /* Wait for response */ | ||
241 | while (atomic_read(&data.started) != cpus) | ||
242 | cpu_relax(); | ||
243 | |||
244 | if (wait) | ||
245 | while (atomic_read(&data.finished) != cpus) | ||
246 | cpu_relax(); | ||
247 | spin_unlock(&call_lock); | ||
248 | |||
249 | return 0; | ||
250 | } | 141 | } |
251 | 142 | ||
252 | static void stop_this_cpu(void *dummy) | 143 | static void stop_this_cpu(void *dummy) |
@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) | |||
268 | 159 | ||
269 | static void native_smp_send_stop(void) | 160 | static void native_smp_send_stop(void) |
270 | { | 161 | { |
271 | int nolock; | ||
272 | unsigned long flags; | 162 | unsigned long flags; |
273 | 163 | ||
274 | if (reboot_force) | 164 | if (reboot_force) |
275 | return; | 165 | return; |
276 | 166 | ||
277 | /* Don't deadlock on the call lock in panic */ | 167 | smp_call_function(stop_this_cpu, NULL, 0); |
278 | nolock = !spin_trylock(&call_lock); | ||
279 | local_irq_save(flags); | 168 | local_irq_save(flags); |
280 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
281 | if (!nolock) | ||
282 | spin_unlock(&call_lock); | ||
283 | disable_local_APIC(); | 169 | disable_local_APIC(); |
284 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
285 | } | 171 | } |
@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
301 | 187 | ||
302 | void smp_call_function_interrupt(struct pt_regs *regs) | 188 | void smp_call_function_interrupt(struct pt_regs *regs) |
303 | { | 189 | { |
304 | void (*func) (void *info) = call_data->func; | ||
305 | void *info = call_data->info; | ||
306 | int wait = call_data->wait; | ||
307 | |||
308 | ack_APIC_irq(); | 190 | ack_APIC_irq(); |
309 | /* | ||
310 | * Notify initiating CPU that I've grabbed the data and am | ||
311 | * about to execute the function | ||
312 | */ | ||
313 | mb(); | ||
314 | atomic_inc(&call_data->started); | ||
315 | /* | ||
316 | * At this point the info structure may be out of scope unless wait==1 | ||
317 | */ | ||
318 | irq_enter(); | 191 | irq_enter(); |
319 | (*func)(info); | 192 | generic_smp_call_function_interrupt(); |
320 | #ifdef CONFIG_X86_32 | 193 | #ifdef CONFIG_X86_32 |
321 | __get_cpu_var(irq_stat).irq_call_count++; | 194 | __get_cpu_var(irq_stat).irq_call_count++; |
322 | #else | 195 | #else |
323 | add_pda(irq_call_count, 1); | 196 | add_pda(irq_call_count, 1); |
324 | #endif | 197 | #endif |
325 | irq_exit(); | 198 | irq_exit(); |
199 | } | ||
326 | 200 | ||
327 | if (wait) { | 201 | void smp_call_function_single_interrupt(struct pt_regs *regs) |
328 | mb(); | 202 | { |
329 | atomic_inc(&call_data->finished); | 203 | ack_APIC_irq(); |
330 | } | 204 | irq_enter(); |
205 | generic_smp_call_function_single_interrupt(); | ||
206 | #ifdef CONFIG_X86_32 | ||
207 | __get_cpu_var(irq_stat).irq_call_count++; | ||
208 | #else | ||
209 | add_pda(irq_call_count, 1); | ||
210 | #endif | ||
211 | irq_exit(); | ||
331 | } | 212 | } |
332 | 213 | ||
333 | struct smp_ops smp_ops = { | 214 | struct smp_ops smp_ops = { |
@@ -338,7 +219,8 @@ struct smp_ops smp_ops = { | |||
338 | 219 | ||
339 | .smp_send_stop = native_smp_send_stop, | 220 | .smp_send_stop = native_smp_send_stop, |
340 | .smp_send_reschedule = native_smp_send_reschedule, | 221 | .smp_send_reschedule = native_smp_send_reschedule, |
341 | .smp_call_function_mask = native_smp_call_function_mask, | 222 | |
223 | .send_call_func_ipi = native_send_call_func_ipi, | ||
224 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
342 | }; | 225 | }; |
343 | EXPORT_SYMBOL_GPL(smp_ops); | 226 | EXPORT_SYMBOL_GPL(smp_ops); |
344 | |||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6cd002f3e20e..23c3b3d1f4cc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -328,12 +328,12 @@ static void __cpuinit start_secondary(void *unused) | |||
328 | * lock helps us to not include this cpu in a currently in progress | 328 | * lock helps us to not include this cpu in a currently in progress |
329 | * smp_call_function(). | 329 | * smp_call_function(). |
330 | */ | 330 | */ |
331 | lock_ipi_call_lock(); | 331 | ipi_call_lock_irq(); |
332 | #ifdef CONFIG_X86_IO_APIC | 332 | #ifdef CONFIG_X86_IO_APIC |
333 | setup_vector_irq(smp_processor_id()); | 333 | setup_vector_irq(smp_processor_id()); |
334 | #endif | 334 | #endif |
335 | cpu_set(smp_processor_id(), cpu_online_map); | 335 | cpu_set(smp_processor_id(), cpu_online_map); |
336 | unlock_ipi_call_lock(); | 336 | ipi_call_unlock_irq(); |
337 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 337 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
338 | 338 | ||
339 | setup_secondary_clock(); | 339 | setup_secondary_clock(); |
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064d141a..99941b37eca0 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c | |||
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) | |||
25 | per_cpu(cpu_number, cpu) = cpu; | 25 | per_cpu(cpu_number, cpu) = cpu; |
26 | } | 26 | } |
27 | #endif | 27 | #endif |
28 | |||
29 | /** | ||
30 | * smp_call_function(): Run a function on all other CPUs. | ||
31 | * @func: The function to run. This must be fast and non-blocking. | ||
32 | * @info: An arbitrary pointer to pass to the function. | ||
33 | * @nonatomic: Unused. | ||
34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
35 | * | ||
36 | * Returns 0 on success, else a negative status code. | ||
37 | * | ||
38 | * If @wait is true, then returns once @func has returned; otherwise | ||
39 | * it returns just before the target cpu calls @func. | ||
40 | * | ||
41 | * You must not call this function with disabled interrupts or from a | ||
42 | * hardware interrupt handler or from a bottom half handler. | ||
43 | */ | ||
44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
45 | int wait) | ||
46 | { | ||
47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
48 | } | ||
49 | EXPORT_SYMBOL(smp_call_function); | ||
50 | |||
51 | /** | ||
52 | * smp_call_function_single - Run a function on a specific CPU | ||
53 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
54 | * @func: The function to run. This must be fast and non-blocking. | ||
55 | * @info: An arbitrary pointer to pass to the function. | ||
56 | * @nonatomic: Unused. | ||
57 | * @wait: If true, wait until function has completed on other CPUs. | ||
58 | * | ||
59 | * Returns 0 on success, else a negative status code. | ||
60 | * | ||
61 | * If @wait is true, then returns once @func has returned; otherwise | ||
62 | * it returns just before the target cpu calls @func. | ||
63 | */ | ||
64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
65 | int nonatomic, int wait) | ||
66 | { | ||
67 | /* prevent preemption and reschedule on another processor */ | ||
68 | int ret; | ||
69 | int me = get_cpu(); | ||
70 | if (cpu == me) { | ||
71 | local_irq_disable(); | ||
72 | func(info); | ||
73 | local_irq_enable(); | ||
74 | put_cpu(); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
79 | |||
80 | put_cpu(); | ||
81 | return ret; | ||
82 | } | ||
83 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index c28c342c162f..a03e7f6d90c3 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -74,6 +74,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
74 | if (trace->nr_entries < trace->max_entries) | 74 | if (trace->nr_entries < trace->max_entries) |
75 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 75 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
77 | 78 | ||
78 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 79 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
79 | { | 80 | { |
@@ -81,3 +82,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
81 | if (trace->nr_entries < trace->max_entries) | 82 | if (trace->nr_entries < trace->max_entries) |
82 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 83 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
83 | } | 84 | } |
85 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) | |||
238 | 238 | ||
239 | void flush_tlb_all(void) | 239 | void flush_tlb_all(void) |
240 | { | 240 | { |
241 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 241 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 5039d0f097a2..dcbf7a1159ea 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info) | |||
275 | 275 | ||
276 | void flush_tlb_all(void) | 276 | void flush_tlb_all(void) |
277 | { | 277 | { |
278 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 278 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
279 | } | 279 | } |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 74e992957ff6..2696a6837782 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -105,30 +105,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
105 | 105 | ||
106 | void printk_address(unsigned long address, int reliable) | 106 | void printk_address(unsigned long address, int reliable) |
107 | { | 107 | { |
108 | #ifdef CONFIG_KALLSYMS | 108 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); |
109 | unsigned long offset = 0, symsize; | ||
110 | const char *symname; | ||
111 | char *modname; | ||
112 | char *delim = ":"; | ||
113 | char namebuf[KSYM_NAME_LEN]; | ||
114 | char reliab[4] = ""; | ||
115 | |||
116 | symname = kallsyms_lookup(address, &symsize, &offset, | ||
117 | &modname, namebuf); | ||
118 | if (!symname) { | ||
119 | printk(" [<%016lx>]\n", address); | ||
120 | return; | ||
121 | } | ||
122 | if (!reliable) | ||
123 | strcpy(reliab, "? "); | ||
124 | |||
125 | if (!modname) | ||
126 | modname = delim = ""; | ||
127 | printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n", | ||
128 | address, reliab, delim, modname, delim, symname, offset, symsize); | ||
129 | #else | ||
130 | printk(" [<%016lx>]\n", address); | ||
131 | #endif | ||
132 | } | 109 | } |
133 | 110 | ||
134 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 111 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 3c36f92160c9..7603c0553909 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -358,6 +358,7 @@ static cycle_t read_tsc(void) | |||
358 | ret : clocksource_tsc.cycle_last; | 358 | ret : clocksource_tsc.cycle_last; |
359 | } | 359 | } |
360 | 360 | ||
361 | #ifdef CONFIG_X86_64 | ||
361 | static cycle_t __vsyscall_fn vread_tsc(void) | 362 | static cycle_t __vsyscall_fn vread_tsc(void) |
362 | { | 363 | { |
363 | cycle_t ret = (cycle_t)vget_cycles(); | 364 | cycle_t ret = (cycle_t)vget_cycles(); |
@@ -365,6 +366,7 @@ static cycle_t __vsyscall_fn vread_tsc(void) | |||
365 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | 366 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? |
366 | ret : __vsyscall_gtod_data.clock.cycle_last; | 367 | ret : __vsyscall_gtod_data.clock.cycle_last; |
367 | } | 368 | } |
369 | #endif | ||
368 | 370 | ||
369 | static struct clocksource clocksource_tsc = { | 371 | static struct clocksource clocksource_tsc = { |
370 | .name = "tsc", | 372 | .name = "tsc", |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 2674f5796275..cdb2363697d2 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -49,16 +49,14 @@ SECTIONS | |||
49 | _etext = .; /* End of text section */ | 49 | _etext = .; /* End of text section */ |
50 | } :text = 0x9090 | 50 | } :text = 0x9090 |
51 | 51 | ||
52 | NOTES :text :note | ||
53 | |||
52 | . = ALIGN(16); /* Exception table */ | 54 | . = ALIGN(16); /* Exception table */ |
53 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { | 55 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { |
54 | __start___ex_table = .; | 56 | __start___ex_table = .; |
55 | *(__ex_table) | 57 | *(__ex_table) |
56 | __stop___ex_table = .; | 58 | __stop___ex_table = .; |
57 | } | 59 | } :text = 0x9090 |
58 | |||
59 | NOTES :text :note | ||
60 | |||
61 | BUG_TABLE :text | ||
62 | 60 | ||
63 | RODATA | 61 | RODATA |
64 | 62 | ||
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index fd246e22fe6b..63e5c1a22e88 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S | |||
@@ -19,7 +19,7 @@ PHDRS { | |||
19 | data PT_LOAD FLAGS(7); /* RWE */ | 19 | data PT_LOAD FLAGS(7); /* RWE */ |
20 | user PT_LOAD FLAGS(7); /* RWE */ | 20 | user PT_LOAD FLAGS(7); /* RWE */ |
21 | data.init PT_LOAD FLAGS(7); /* RWE */ | 21 | data.init PT_LOAD FLAGS(7); /* RWE */ |
22 | note PT_NOTE FLAGS(4); /* R__ */ | 22 | note PT_NOTE FLAGS(0); /* ___ */ |
23 | } | 23 | } |
24 | SECTIONS | 24 | SECTIONS |
25 | { | 25 | { |
@@ -40,16 +40,14 @@ SECTIONS | |||
40 | _etext = .; /* End of text section */ | 40 | _etext = .; /* End of text section */ |
41 | } :text = 0x9090 | 41 | } :text = 0x9090 |
42 | 42 | ||
43 | NOTES :text :note | ||
44 | |||
43 | . = ALIGN(16); /* Exception table */ | 45 | . = ALIGN(16); /* Exception table */ |
44 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { | 46 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { |
45 | __start___ex_table = .; | 47 | __start___ex_table = .; |
46 | *(__ex_table) | 48 | *(__ex_table) |
47 | __stop___ex_table = .; | 49 | __stop___ex_table = .; |
48 | } | 50 | } :text = 0x9090 |
49 | |||
50 | NOTES :text :note | ||
51 | |||
52 | BUG_TABLE :text | ||
53 | 51 | ||
54 | RODATA | 52 | RODATA |
55 | 53 | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index c87cbd84c3e5..0b8b6690a86d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -42,7 +42,8 @@ | |||
42 | #include <asm/topology.h> | 42 | #include <asm/topology.h> |
43 | #include <asm/vgtod.h> | 43 | #include <asm/vgtod.h> |
44 | 44 | ||
45 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) | 45 | #define __vsyscall(nr) \ |
46 | __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace | ||
46 | #define __syscall_clobber "r11","cx","memory" | 47 | #define __syscall_clobber "r11","cx","memory" |
47 | 48 | ||
48 | /* | 49 | /* |
@@ -278,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
278 | { | 279 | { |
279 | long cpu = (long)arg; | 280 | long cpu = (long)arg; |
280 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 281 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
281 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); | 282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
282 | return NOTIFY_DONE; | 283 | return NOTIFY_DONE; |
283 | } | 284 | } |
284 | 285 | ||
@@ -301,7 +302,7 @@ static int __init vsyscall_init(void) | |||
301 | #ifdef CONFIG_SYSCTL | 302 | #ifdef CONFIG_SYSCTL |
302 | register_sysctl_table(kernel_root_table2); | 303 | register_sysctl_table(kernel_root_table2); |
303 | #endif | 304 | #endif |
304 | on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); | 305 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
305 | hotcpu_notifier(cpu_vsyscall_notifier, 0); | 306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); |
306 | return 0; | 307 | return 0; |
307 | } | 308 | } |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 2f306a826897..b545f371b5f5 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -2,13 +2,20 @@ | |||
2 | All C exports should go in the respective C files. */ | 2 | All C exports should go in the respective C files. */ |
3 | 3 | ||
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <net/checksum.h> | ||
6 | #include <linux/smp.h> | 5 | #include <linux/smp.h> |
7 | 6 | ||
7 | #include <net/checksum.h> | ||
8 | |||
8 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
9 | #include <asm/uaccess.h> | ||
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/uaccess.h> | ||
11 | #include <asm/desc.h> | 12 | #include <asm/desc.h> |
13 | #include <asm/ftrace.h> | ||
14 | |||
15 | #ifdef CONFIG_FTRACE | ||
16 | /* mcount is defined in assembly */ | ||
17 | EXPORT_SYMBOL(mcount); | ||
18 | #endif | ||
12 | 19 | ||
13 | EXPORT_SYMBOL(kernel_thread); | 20 | EXPORT_SYMBOL(kernel_thread); |
14 | 21 | ||