diff options
Diffstat (limited to 'arch/x86/kernel')
32 files changed, 624 insertions, 450 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c611ad64137f..145cce75cda7 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
| @@ -66,7 +66,8 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o | |||
| 66 | obj-y += apic/ | 66 | obj-y += apic/ |
| 67 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 67 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
| 68 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 68 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
| 69 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 69 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
| 70 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
| 70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 71 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
| 71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 72 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
| 72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 73 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4c80f1557433..f57658702571 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/kprobes.h> | 5 | #include <linux/kprobes.h> |
| 6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
| 7 | #include <linux/vmalloc.h> | 7 | #include <linux/vmalloc.h> |
| 8 | #include <linux/memory.h> | ||
| 8 | #include <asm/alternative.h> | 9 | #include <asm/alternative.h> |
| 9 | #include <asm/sections.h> | 10 | #include <asm/sections.h> |
| 10 | #include <asm/pgtable.h> | 11 | #include <asm/pgtable.h> |
| @@ -12,7 +13,9 @@ | |||
| 12 | #include <asm/nmi.h> | 13 | #include <asm/nmi.h> |
| 13 | #include <asm/vsyscall.h> | 14 | #include <asm/vsyscall.h> |
| 14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/tlbflush.h> | ||
| 15 | #include <asm/io.h> | 17 | #include <asm/io.h> |
| 18 | #include <asm/fixmap.h> | ||
| 16 | 19 | ||
| 17 | #define MAX_PATCH_LEN (255-1) | 20 | #define MAX_PATCH_LEN (255-1) |
| 18 | 21 | ||
| @@ -226,6 +229,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
| 226 | { | 229 | { |
| 227 | u8 **ptr; | 230 | u8 **ptr; |
| 228 | 231 | ||
| 232 | mutex_lock(&text_mutex); | ||
| 229 | for (ptr = start; ptr < end; ptr++) { | 233 | for (ptr = start; ptr < end; ptr++) { |
| 230 | if (*ptr < text) | 234 | if (*ptr < text) |
| 231 | continue; | 235 | continue; |
| @@ -234,6 +238,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
| 234 | /* turn DS segment override prefix into lock prefix */ | 238 | /* turn DS segment override prefix into lock prefix */ |
| 235 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | 239 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); |
| 236 | }; | 240 | }; |
| 241 | mutex_unlock(&text_mutex); | ||
| 237 | } | 242 | } |
| 238 | 243 | ||
| 239 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 244 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) |
| @@ -243,6 +248,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end | |||
| 243 | if (noreplace_smp) | 248 | if (noreplace_smp) |
| 244 | return; | 249 | return; |
| 245 | 250 | ||
| 251 | mutex_lock(&text_mutex); | ||
| 246 | for (ptr = start; ptr < end; ptr++) { | 252 | for (ptr = start; ptr < end; ptr++) { |
| 247 | if (*ptr < text) | 253 | if (*ptr < text) |
| 248 | continue; | 254 | continue; |
| @@ -251,6 +257,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end | |||
| 251 | /* turn lock prefix into DS segment override prefix */ | 257 | /* turn lock prefix into DS segment override prefix */ |
| 252 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | 258 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); |
| 253 | }; | 259 | }; |
| 260 | mutex_unlock(&text_mutex); | ||
| 254 | } | 261 | } |
| 255 | 262 | ||
| 256 | struct smp_alt_module { | 263 | struct smp_alt_module { |
| @@ -500,15 +507,16 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) | |||
| 500 | * It means the size must be writable atomically and the address must be aligned | 507 | * It means the size must be writable atomically and the address must be aligned |
| 501 | * in a way that permits an atomic write. It also makes sure we fit on a single | 508 | * in a way that permits an atomic write. It also makes sure we fit on a single |
| 502 | * page. | 509 | * page. |
| 510 | * | ||
| 511 | * Note: Must be called under text_mutex. | ||
| 503 | */ | 512 | */ |
| 504 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | 513 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) |
| 505 | { | 514 | { |
| 515 | unsigned long flags; | ||
| 506 | char *vaddr; | 516 | char *vaddr; |
| 507 | int nr_pages = 2; | ||
| 508 | struct page *pages[2]; | 517 | struct page *pages[2]; |
| 509 | int i; | 518 | int i; |
| 510 | 519 | ||
| 511 | might_sleep(); | ||
| 512 | if (!core_kernel_text((unsigned long)addr)) { | 520 | if (!core_kernel_text((unsigned long)addr)) { |
| 513 | pages[0] = vmalloc_to_page(addr); | 521 | pages[0] = vmalloc_to_page(addr); |
| 514 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 522 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
| @@ -518,18 +526,21 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | |||
| 518 | pages[1] = virt_to_page(addr + PAGE_SIZE); | 526 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
| 519 | } | 527 | } |
| 520 | BUG_ON(!pages[0]); | 528 | BUG_ON(!pages[0]); |
| 521 | if (!pages[1]) | 529 | local_irq_save(flags); |
| 522 | nr_pages = 1; | 530 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
| 523 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); | 531 | if (pages[1]) |
| 524 | BUG_ON(!vaddr); | 532 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); |
| 525 | local_irq_disable(); | 533 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); |
| 526 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); | 534 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
| 527 | local_irq_enable(); | 535 | clear_fixmap(FIX_TEXT_POKE0); |
| 528 | vunmap(vaddr); | 536 | if (pages[1]) |
| 537 | clear_fixmap(FIX_TEXT_POKE1); | ||
| 538 | local_flush_tlb(); | ||
| 529 | sync_core(); | 539 | sync_core(); |
| 530 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 540 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
| 531 | that causes hangs on some VIA CPUs. */ | 541 | that causes hangs on some VIA CPUs. */ |
| 532 | for (i = 0; i < len; i++) | 542 | for (i = 0; i < len; i++) |
| 533 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); | 543 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); |
| 544 | local_irq_restore(flags); | ||
| 534 | return addr; | 545 | return addr; |
| 535 | } | 546 | } |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index d806ecaa948f..676cdac385c0 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
| @@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void) | |||
| 26 | return 1; | 26 | return 1; |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | static const cpumask_t *bigsmp_target_cpus(void) | 29 | static const struct cpumask *bigsmp_target_cpus(void) |
| 30 | { | 30 | { |
| 31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
| 32 | return &cpu_online_map; | 32 | return cpu_online_mask; |
| 33 | #else | 33 | #else |
| 34 | return &cpumask_of_cpu(0); | 34 | return cpumask_of(0); |
| 35 | #endif | 35 | #endif |
| 36 | } | 36 | } |
| 37 | 37 | ||
| @@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* As we are using single CPU as destination, pick only one CPU here */ | 120 | /* As we are using single CPU as destination, pick only one CPU here */ |
| 121 | static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) | 121 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) |
| 122 | { | 122 | { |
| 123 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); | 123 | return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
| @@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
| 188 | { } /* NULL entry stops DMI scanning */ | 188 | { } /* NULL entry stops DMI scanning */ |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
| 191 | static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) | 191 | static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) |
| 192 | { | 192 | { |
| 193 | cpus_clear(*retmask); | 193 | cpumask_clear(retmask); |
| 194 | cpu_set(cpu, *retmask); | 194 | cpumask_set_cpu(cpu, retmask); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | static int probe_bigsmp(void) | 197 | static int probe_bigsmp(void) |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 19588f2770ee..1c11b819f245 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
| @@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void) | |||
| 410 | WARN(1, "Command failed, status = %x\n", mip_status); | 410 | WARN(1, "Command failed, status = %x\n", mip_status); |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | 413 | static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask) |
| 414 | { | 414 | { |
| 415 | /* Careful. Some cpus do not strictly honor the set of cpus | 415 | /* Careful. Some cpus do not strictly honor the set of cpus |
| 416 | * specified in the interrupt destination when using lowest | 416 | * specified in the interrupt destination when using lowest |
| @@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
| 420 | * deliver interrupts to the wrong hyperthread when only one | 420 | * deliver interrupts to the wrong hyperthread when only one |
| 421 | * hyperthread was specified in the interrupt desitination. | 421 | * hyperthread was specified in the interrupt desitination. |
| 422 | */ | 422 | */ |
| 423 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 423 | cpumask_clear(retmask); |
| 424 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
| 424 | } | 425 | } |
| 425 | 426 | ||
| 426 | 427 | ||
| @@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void) | |||
| 455 | return 1; | 456 | return 1; |
| 456 | } | 457 | } |
| 457 | 458 | ||
| 458 | static const cpumask_t *target_cpus_cluster(void) | 459 | static const struct cpumask *target_cpus_cluster(void) |
| 459 | { | 460 | { |
| 460 | return &CPU_MASK_ALL; | 461 | return cpu_all_mask; |
| 461 | } | 462 | } |
| 462 | 463 | ||
| 463 | static const cpumask_t *es7000_target_cpus(void) | 464 | static const struct cpumask *es7000_target_cpus(void) |
| 464 | { | 465 | { |
| 465 | return &cpumask_of_cpu(smp_processor_id()); | 466 | return cpumask_of(smp_processor_id()); |
| 466 | } | 467 | } |
| 467 | 468 | ||
| 468 | static unsigned long | 469 | static unsigned long |
| @@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void) | |||
| 517 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 518 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
| 518 | (apic_version[apic] == 0x14) ? | 519 | (apic_version[apic] == 0x14) ? |
| 519 | "Physical Cluster" : "Logical Cluster", | 520 | "Physical Cluster" : "Logical Cluster", |
| 520 | nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); | 521 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
| 521 | } | 522 | } |
| 522 | 523 | ||
| 523 | static int es7000_apicid_to_node(int logical_apicid) | 524 | static int es7000_apicid_to_node(int logical_apicid) |
| @@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | |||
| 572 | return 1; | 573 | return 1; |
| 573 | } | 574 | } |
| 574 | 575 | ||
| 575 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) | 576 | static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) |
| 576 | { | 577 | { |
| 577 | unsigned int round = 0; | 578 | unsigned int round = 0; |
| 578 | int cpu, uninitialized_var(apicid); | 579 | int cpu, uninitialized_var(apicid); |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index bdfad80c3cf1..d6bd62407152 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
| 40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
| 41 | 41 | ||
| 42 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | 42 | static cpumask_var_t backtrace_mask; |
| 43 | 43 | ||
| 44 | /* nmi_active: | 44 | /* nmi_active: |
| 45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
| @@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void) | |||
| 138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
| 139 | goto error; | 139 | goto error; |
| 140 | 140 | ||
| 141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); | ||
| 141 | printk(KERN_INFO "Testing NMI watchdog ... "); | 142 | printk(KERN_INFO "Testing NMI watchdog ... "); |
| 142 | 143 | ||
| 143 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
| @@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 413 | touched = 1; | 414 | touched = 1; |
| 414 | } | 415 | } |
| 415 | 416 | ||
| 416 | if (cpu_isset(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, backtrace_mask)) { |
| 417 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
| 418 | 419 | ||
| 419 | spin_lock(&lock); | 420 | spin_lock(&lock); |
| 420 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
| 421 | dump_stack(); | 422 | dump_stack(); |
| 422 | spin_unlock(&lock); | 423 | spin_unlock(&lock); |
| 423 | cpu_clear(cpu, backtrace_mask); | 424 | cpumask_clear_cpu(cpu, backtrace_mask); |
| 424 | } | 425 | } |
| 425 | 426 | ||
| 426 | /* Could check oops_in_progress here too, but it's safer not to */ | 427 | /* Could check oops_in_progress here too, but it's safer not to */ |
| @@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void) | |||
| 554 | { | 555 | { |
| 555 | int i; | 556 | int i; |
| 556 | 557 | ||
| 557 | backtrace_mask = cpu_online_map; | 558 | cpumask_copy(backtrace_mask, cpu_online_mask); |
| 558 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 559 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 559 | for (i = 0; i < 10 * 1000; i++) { | 560 | for (i = 0; i < 10 * 1000; i++) { |
| 560 | if (cpus_empty(backtrace_mask)) | 561 | if (cpumask_empty(backtrace_mask)) |
| 561 | break; | 562 | break; |
| 562 | mdelay(1); | 563 | mdelay(1); |
| 563 | } | 564 | } |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index ba2fc6465534..533e59c6fc82 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
| @@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void) | |||
| 334 | clear_local_APIC(); | 334 | clear_local_APIC(); |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | static inline const cpumask_t *numaq_target_cpus(void) | 337 | static inline const struct cpumask *numaq_target_cpus(void) |
| 338 | { | 338 | { |
| 339 | return &CPU_MASK_ALL; | 339 | return cpu_all_mask; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static inline unsigned long | 342 | static inline unsigned long |
| @@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
| 427 | * We use physical apicids here, not logical, so just return the default | 427 | * We use physical apicids here, not logical, so just return the default |
| 428 | * physical broadcast to stop people from breaking us | 428 | * physical broadcast to stop people from breaking us |
| 429 | */ | 429 | */ |
| 430 | static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) | 430 | static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) |
| 431 | { | 431 | { |
| 432 | return 0x0F; | 432 | return 0x0F; |
| 433 | } | 433 | } |
| @@ -462,7 +462,7 @@ static int probe_numaq(void) | |||
| 462 | return found_numaq; | 462 | return found_numaq; |
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) | 465 | static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask) |
| 466 | { | 466 | { |
| 467 | /* Careful. Some cpus do not strictly honor the set of cpus | 467 | /* Careful. Some cpus do not strictly honor the set of cpus |
| 468 | * specified in the interrupt destination when using lowest | 468 | * specified in the interrupt destination when using lowest |
| @@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
| 472 | * deliver interrupts to the wrong hyperthread when only one | 472 | * deliver interrupts to the wrong hyperthread when only one |
| 473 | * hyperthread was specified in the interrupt desitination. | 473 | * hyperthread was specified in the interrupt desitination. |
| 474 | */ | 474 | */ |
| 475 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 475 | cpumask_clear(retmask); |
| 476 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
| 476 | } | 477 | } |
| 477 | 478 | ||
| 478 | static void numaq_setup_portio_remap(void) | 479 | static void numaq_setup_portio_remap(void) |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 141c99a1c264..01eda2ac65e4 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
| @@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
| 83 | * deliver interrupts to the wrong hyperthread when only one | 83 | * deliver interrupts to the wrong hyperthread when only one |
| 84 | * hyperthread was specified in the interrupt desitination. | 84 | * hyperthread was specified in the interrupt desitination. |
| 85 | */ | 85 | */ |
| 86 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; | 86 | cpumask_clear(retmask); |
| 87 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
| 87 | } | 88 | } |
| 88 | 89 | ||
| 89 | /* should be called last. */ | 90 | /* should be called last. */ |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index aac52fa873ff..9cfe1f415d81 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
| @@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x) | |||
| 53 | return (x >> 24) & 0xFF; | 53 | return (x >> 24) & 0xFF; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) | 56 | static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector) |
| 57 | { | 57 | { |
| 58 | default_send_IPI_mask_sequence_logical(mask, vector); | 58 | default_send_IPI_mask_sequence_logical(mask, vector); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static void summit_send_IPI_allbutself(int vector) | 61 | static void summit_send_IPI_allbutself(int vector) |
| 62 | { | 62 | { |
| 63 | cpumask_t mask = cpu_online_map; | 63 | default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); |
| 64 | cpu_clear(smp_processor_id(), mask); | ||
| 65 | |||
| 66 | if (!cpus_empty(mask)) | ||
| 67 | summit_send_IPI_mask(&mask, vector); | ||
| 68 | } | 64 | } |
| 69 | 65 | ||
| 70 | static void summit_send_IPI_all(int vector) | 66 | static void summit_send_IPI_all(int vector) |
| 71 | { | 67 | { |
| 72 | summit_send_IPI_mask(&cpu_online_map, vector); | 68 | summit_send_IPI_mask(cpu_online_mask, vector); |
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | #include <asm/tsc.h> | 71 | #include <asm/tsc.h> |
| @@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
| 186 | 182 | ||
| 187 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 183 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
| 188 | 184 | ||
| 189 | static const cpumask_t *summit_target_cpus(void) | 185 | static const struct cpumask *summit_target_cpus(void) |
| 190 | { | 186 | { |
| 191 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 187 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
| 192 | * dest_LowestPrio mode logical clustered apic interrupt routing | 188 | * dest_LowestPrio mode logical clustered apic interrupt routing |
| 193 | * Just start on cpu 0. IRQ balancing will spread load | 189 | * Just start on cpu 0. IRQ balancing will spread load |
| 194 | */ | 190 | */ |
| 195 | return &cpumask_of_cpu(0); | 191 | return cpumask_of(0); |
| 196 | } | 192 | } |
| 197 | 193 | ||
| 198 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) | 194 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) |
| @@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
| 289 | return 1; | 285 | return 1; |
| 290 | } | 286 | } |
| 291 | 287 | ||
| 292 | static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) | 288 | static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) |
| 293 | { | 289 | { |
| 294 | unsigned int round = 0; | 290 | unsigned int round = 0; |
| 295 | int cpu, apicid = 0; | 291 | int cpu, apicid = 0; |
| @@ -346,7 +342,7 @@ static int probe_summit(void) | |||
| 346 | return 0; | 342 | return 0; |
| 347 | } | 343 | } |
| 348 | 344 | ||
| 349 | static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | 345 | static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask) |
| 350 | { | 346 | { |
| 351 | /* Careful. Some cpus do not strictly honor the set of cpus | 347 | /* Careful. Some cpus do not strictly honor the set of cpus |
| 352 | * specified in the interrupt destination when using lowest | 348 | * specified in the interrupt destination when using lowest |
| @@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
| 356 | * deliver interrupts to the wrong hyperthread when only one | 352 | * deliver interrupts to the wrong hyperthread when only one |
| 357 | * hyperthread was specified in the interrupt desitination. | 353 | * hyperthread was specified in the interrupt desitination. |
| 358 | */ | 354 | */ |
| 359 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 355 | cpumask_clear(retmask); |
| 356 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
| 360 | } | 357 | } |
| 361 | 358 | ||
| 362 | #ifdef CONFIG_X86_SUMMIT_NUMA | 359 | #ifdef CONFIG_X86_SUMMIT_NUMA |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index ac7783a67432..49e0939bac42 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
| @@ -466,7 +466,7 @@ static const lookup_t error_table[] = { | |||
| 466 | * @err: APM BIOS return code | 466 | * @err: APM BIOS return code |
| 467 | * | 467 | * |
| 468 | * Write a meaningful log entry to the kernel log in the event of | 468 | * Write a meaningful log entry to the kernel log in the event of |
| 469 | * an APM error. | 469 | * an APM error. Note that this also handles (negative) kernel errors. |
| 470 | */ | 470 | */ |
| 471 | 471 | ||
| 472 | static void apm_error(char *str, int err) | 472 | static void apm_error(char *str, int err) |
| @@ -478,43 +478,14 @@ static void apm_error(char *str, int err) | |||
| 478 | break; | 478 | break; |
| 479 | if (i < ERROR_COUNT) | 479 | if (i < ERROR_COUNT) |
| 480 | printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); | 480 | printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); |
| 481 | else if (err < 0) | ||
| 482 | printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); | ||
| 481 | else | 483 | else |
| 482 | printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", | 484 | printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", |
| 483 | str, err); | 485 | str, err); |
| 484 | } | 486 | } |
| 485 | 487 | ||
| 486 | /* | 488 | /* |
| 487 | * Lock APM functionality to physical CPU 0 | ||
| 488 | */ | ||
| 489 | |||
| 490 | #ifdef CONFIG_SMP | ||
| 491 | |||
| 492 | static cpumask_t apm_save_cpus(void) | ||
| 493 | { | ||
| 494 | cpumask_t x = current->cpus_allowed; | ||
| 495 | /* Some bioses don't like being called from CPU != 0 */ | ||
| 496 | set_cpus_allowed(current, cpumask_of_cpu(0)); | ||
| 497 | BUG_ON(smp_processor_id() != 0); | ||
| 498 | return x; | ||
| 499 | } | ||
| 500 | |||
| 501 | static inline void apm_restore_cpus(cpumask_t mask) | ||
| 502 | { | ||
| 503 | set_cpus_allowed(current, mask); | ||
| 504 | } | ||
| 505 | |||
| 506 | #else | ||
| 507 | |||
| 508 | /* | ||
| 509 | * No CPU lockdown needed on a uniprocessor | ||
| 510 | */ | ||
| 511 | |||
| 512 | #define apm_save_cpus() (current->cpus_allowed) | ||
| 513 | #define apm_restore_cpus(x) (void)(x) | ||
| 514 | |||
| 515 | #endif | ||
| 516 | |||
| 517 | /* | ||
| 518 | * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and | 489 | * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and |
| 519 | * apm_info.allow_ints, we are being really paranoid here! Not only | 490 | * apm_info.allow_ints, we are being really paranoid here! Not only |
| 520 | * are interrupts disabled, but all the segment registers (except SS) | 491 | * are interrupts disabled, but all the segment registers (except SS) |
| @@ -568,16 +539,23 @@ static inline void apm_irq_restore(unsigned long flags) | |||
| 568 | # define APM_DO_RESTORE_SEGS | 539 | # define APM_DO_RESTORE_SEGS |
| 569 | #endif | 540 | #endif |
| 570 | 541 | ||
| 542 | struct apm_bios_call { | ||
| 543 | u32 func; | ||
| 544 | /* In and out */ | ||
| 545 | u32 ebx; | ||
| 546 | u32 ecx; | ||
| 547 | /* Out only */ | ||
| 548 | u32 eax; | ||
| 549 | u32 edx; | ||
| 550 | u32 esi; | ||
| 551 | |||
| 552 | /* Error: -ENOMEM, or bits 8-15 of eax */ | ||
| 553 | int err; | ||
| 554 | }; | ||
| 555 | |||
| 571 | /** | 556 | /** |
| 572 | * apm_bios_call - Make an APM BIOS 32bit call | 557 | * __apm_bios_call - Make an APM BIOS 32bit call |
| 573 | * @func: APM function to execute | 558 | * @_call: pointer to struct apm_bios_call. |
| 574 | * @ebx_in: EBX register for call entry | ||
| 575 | * @ecx_in: ECX register for call entry | ||
| 576 | * @eax: EAX register return | ||
| 577 | * @ebx: EBX register return | ||
| 578 | * @ecx: ECX register return | ||
| 579 | * @edx: EDX register return | ||
| 580 | * @esi: ESI register return | ||
| 581 | * | 559 | * |
| 582 | * Make an APM call using the 32bit protected mode interface. The | 560 | * Make an APM call using the 32bit protected mode interface. The |
| 583 | * caller is responsible for knowing if APM BIOS is configured and | 561 | * caller is responsible for knowing if APM BIOS is configured and |
| @@ -586,80 +564,142 @@ static inline void apm_irq_restore(unsigned long flags) | |||
| 586 | * flag is loaded into AL. If there is an error, then the error | 564 | * flag is loaded into AL. If there is an error, then the error |
| 587 | * code is returned in AH (bits 8-15 of eax) and this function | 565 | * code is returned in AH (bits 8-15 of eax) and this function |
| 588 | * returns non-zero. | 566 | * returns non-zero. |
| 567 | * | ||
| 568 | * Note: this makes the call on the current CPU. | ||
| 589 | */ | 569 | */ |
| 590 | 570 | static long __apm_bios_call(void *_call) | |
| 591 | static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | ||
| 592 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi) | ||
| 593 | { | 571 | { |
| 594 | APM_DECL_SEGS | 572 | APM_DECL_SEGS |
| 595 | unsigned long flags; | 573 | unsigned long flags; |
| 596 | cpumask_t cpus; | ||
| 597 | int cpu; | 574 | int cpu; |
| 598 | struct desc_struct save_desc_40; | 575 | struct desc_struct save_desc_40; |
| 599 | struct desc_struct *gdt; | 576 | struct desc_struct *gdt; |
| 600 | 577 | struct apm_bios_call *call = _call; | |
| 601 | cpus = apm_save_cpus(); | ||
| 602 | 578 | ||
| 603 | cpu = get_cpu(); | 579 | cpu = get_cpu(); |
| 580 | BUG_ON(cpu != 0); | ||
| 604 | gdt = get_cpu_gdt_table(cpu); | 581 | gdt = get_cpu_gdt_table(cpu); |
| 605 | save_desc_40 = gdt[0x40 / 8]; | 582 | save_desc_40 = gdt[0x40 / 8]; |
| 606 | gdt[0x40 / 8] = bad_bios_desc; | 583 | gdt[0x40 / 8] = bad_bios_desc; |
| 607 | 584 | ||
| 608 | apm_irq_save(flags); | 585 | apm_irq_save(flags); |
| 609 | APM_DO_SAVE_SEGS; | 586 | APM_DO_SAVE_SEGS; |
| 610 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); | 587 | apm_bios_call_asm(call->func, call->ebx, call->ecx, |
| 588 | &call->eax, &call->ebx, &call->ecx, &call->edx, | ||
| 589 | &call->esi); | ||
| 611 | APM_DO_RESTORE_SEGS; | 590 | APM_DO_RESTORE_SEGS; |
| 612 | apm_irq_restore(flags); | 591 | apm_irq_restore(flags); |
| 613 | gdt[0x40 / 8] = save_desc_40; | 592 | gdt[0x40 / 8] = save_desc_40; |
| 614 | put_cpu(); | 593 | put_cpu(); |
| 615 | apm_restore_cpus(cpus); | ||
| 616 | 594 | ||
| 617 | return *eax & 0xff; | 595 | return call->eax & 0xff; |
| 596 | } | ||
| 597 | |||
| 598 | /* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */ | ||
| 599 | static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call) | ||
| 600 | { | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | /* Don't bother with work_on_cpu in the common case, so we don't | ||
| 604 | * have to worry about OOM or overhead. */ | ||
| 605 | if (get_cpu() == 0) { | ||
| 606 | ret = fn(call); | ||
| 607 | put_cpu(); | ||
| 608 | } else { | ||
| 609 | put_cpu(); | ||
| 610 | ret = work_on_cpu(0, fn, call); | ||
| 611 | } | ||
| 612 | |||
| 613 | /* work_on_cpu can fail with -ENOMEM */ | ||
| 614 | if (ret < 0) | ||
| 615 | call->err = ret; | ||
| 616 | else | ||
| 617 | call->err = (call->eax >> 8) & 0xff; | ||
| 618 | |||
| 619 | return ret; | ||
| 618 | } | 620 | } |
| 619 | 621 | ||
| 620 | /** | 622 | /** |
| 621 | * apm_bios_call_simple - make a simple APM BIOS 32bit call | 623 | * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0) |
| 622 | * @func: APM function to invoke | 624 | * @call: the apm_bios_call registers. |
| 623 | * @ebx_in: EBX register value for BIOS call | 625 | * |
| 624 | * @ecx_in: ECX register value for BIOS call | 626 | * If there is an error, it is returned in @call.err. |
| 625 | * @eax: EAX register on return from the BIOS call | 627 | */ |
| 628 | static int apm_bios_call(struct apm_bios_call *call) | ||
| 629 | { | ||
| 630 | return on_cpu0(__apm_bios_call, call); | ||
| 631 | } | ||
| 632 | |||
| 633 | /** | ||
| 634 | * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0) | ||
| 635 | * @_call: pointer to struct apm_bios_call. | ||
| 626 | * | 636 | * |
| 627 | * Make a BIOS call that returns one value only, or just status. | 637 | * Make a BIOS call that returns one value only, or just status. |
| 628 | * If there is an error, then the error code is returned in AH | 638 | * If there is an error, then the error code is returned in AH |
| 629 | * (bits 8-15 of eax) and this function returns non-zero. This is | 639 | * (bits 8-15 of eax) and this function returns non-zero (it can |
| 630 | * used for simpler BIOS operations. This call may hold interrupts | 640 | * also return -ENOMEM). This is used for simpler BIOS operations. |
| 631 | * off for a long time on some laptops. | 641 | * This call may hold interrupts off for a long time on some laptops. |
| 642 | * | ||
| 643 | * Note: this makes the call on the current CPU. | ||
| 632 | */ | 644 | */ |
| 633 | 645 | static long __apm_bios_call_simple(void *_call) | |
| 634 | static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | ||
| 635 | { | 646 | { |
| 636 | u8 error; | 647 | u8 error; |
| 637 | APM_DECL_SEGS | 648 | APM_DECL_SEGS |
| 638 | unsigned long flags; | 649 | unsigned long flags; |
| 639 | cpumask_t cpus; | ||
| 640 | int cpu; | 650 | int cpu; |
| 641 | struct desc_struct save_desc_40; | 651 | struct desc_struct save_desc_40; |
| 642 | struct desc_struct *gdt; | 652 | struct desc_struct *gdt; |
| 643 | 653 | struct apm_bios_call *call = _call; | |
| 644 | cpus = apm_save_cpus(); | ||
| 645 | 654 | ||
| 646 | cpu = get_cpu(); | 655 | cpu = get_cpu(); |
| 656 | BUG_ON(cpu != 0); | ||
| 647 | gdt = get_cpu_gdt_table(cpu); | 657 | gdt = get_cpu_gdt_table(cpu); |
| 648 | save_desc_40 = gdt[0x40 / 8]; | 658 | save_desc_40 = gdt[0x40 / 8]; |
| 649 | gdt[0x40 / 8] = bad_bios_desc; | 659 | gdt[0x40 / 8] = bad_bios_desc; |
| 650 | 660 | ||
| 651 | apm_irq_save(flags); | 661 | apm_irq_save(flags); |
| 652 | APM_DO_SAVE_SEGS; | 662 | APM_DO_SAVE_SEGS; |
| 653 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); | 663 | error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, |
| 664 | &call->eax); | ||
| 654 | APM_DO_RESTORE_SEGS; | 665 | APM_DO_RESTORE_SEGS; |
| 655 | apm_irq_restore(flags); | 666 | apm_irq_restore(flags); |
| 656 | gdt[0x40 / 8] = save_desc_40; | 667 | gdt[0x40 / 8] = save_desc_40; |
| 657 | put_cpu(); | 668 | put_cpu(); |
| 658 | apm_restore_cpus(cpus); | ||
| 659 | return error; | 669 | return error; |
| 660 | } | 670 | } |
| 661 | 671 | ||
| 662 | /** | 672 | /** |
| 673 | * apm_bios_call_simple - make a simple APM BIOS 32bit call | ||
| 674 | * @func: APM function to invoke | ||
| 675 | * @ebx_in: EBX register value for BIOS call | ||
| 676 | * @ecx_in: ECX register value for BIOS call | ||
| 677 | * @eax: EAX register on return from the BIOS call | ||
| 678 | * @err: bits | ||
| 679 | * | ||
| 680 | * Make a BIOS call that returns one value only, or just status. | ||
| 681 | * If there is an error, then the error code is returned in @err | ||
| 682 | * and this function returns non-zero. This is used for simpler | ||
| 683 | * BIOS operations. This call may hold interrupts off for a long | ||
| 684 | * time on some laptops. | ||
| 685 | */ | ||
| 686 | static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, | ||
| 687 | int *err) | ||
| 688 | { | ||
| 689 | struct apm_bios_call call; | ||
| 690 | int ret; | ||
| 691 | |||
| 692 | call.func = func; | ||
| 693 | call.ebx = ebx_in; | ||
| 694 | call.ecx = ecx_in; | ||
| 695 | |||
| 696 | ret = on_cpu0(__apm_bios_call_simple, &call); | ||
| 697 | *eax = call.eax; | ||
| 698 | *err = call.err; | ||
| 699 | return ret; | ||
| 700 | } | ||
| 701 | |||
| 702 | /** | ||
| 663 | * apm_driver_version - APM driver version | 703 | * apm_driver_version - APM driver version |
| 664 | * @val: loaded with the APM version on return | 704 | * @val: loaded with the APM version on return |
| 665 | * | 705 | * |
| @@ -678,9 +718,10 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
| 678 | static int apm_driver_version(u_short *val) | 718 | static int apm_driver_version(u_short *val) |
| 679 | { | 719 | { |
| 680 | u32 eax; | 720 | u32 eax; |
| 721 | int err; | ||
| 681 | 722 | ||
| 682 | if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax)) | 723 | if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err)) |
| 683 | return (eax >> 8) & 0xff; | 724 | return err; |
| 684 | *val = eax; | 725 | *val = eax; |
| 685 | return APM_SUCCESS; | 726 | return APM_SUCCESS; |
| 686 | } | 727 | } |
| @@ -701,22 +742,21 @@ static int apm_driver_version(u_short *val) | |||
| 701 | * that APM 1.2 is in use. If no messges are pending the value 0x80 | 742 | * that APM 1.2 is in use. If no messges are pending the value 0x80 |
| 702 | * is returned (No power management events pending). | 743 | * is returned (No power management events pending). |
| 703 | */ | 744 | */ |
| 704 | |||
| 705 | static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) | 745 | static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) |
| 706 | { | 746 | { |
| 707 | u32 eax; | 747 | struct apm_bios_call call; |
| 708 | u32 ebx; | ||
| 709 | u32 ecx; | ||
| 710 | u32 dummy; | ||
| 711 | 748 | ||
| 712 | if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx, | 749 | call.func = APM_FUNC_GET_EVENT; |
| 713 | &dummy, &dummy)) | 750 | call.ebx = call.ecx = 0; |
| 714 | return (eax >> 8) & 0xff; | 751 | |
| 715 | *event = ebx; | 752 | if (apm_bios_call(&call)) |
| 753 | return call.err; | ||
| 754 | |||
| 755 | *event = call.ebx; | ||
| 716 | if (apm_info.connection_version < 0x0102) | 756 | if (apm_info.connection_version < 0x0102) |
| 717 | *info = ~0; /* indicate info not valid */ | 757 | *info = ~0; /* indicate info not valid */ |
| 718 | else | 758 | else |
| 719 | *info = ecx; | 759 | *info = call.ecx; |
| 720 | return APM_SUCCESS; | 760 | return APM_SUCCESS; |
| 721 | } | 761 | } |
| 722 | 762 | ||
| @@ -737,9 +777,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) | |||
| 737 | static int set_power_state(u_short what, u_short state) | 777 | static int set_power_state(u_short what, u_short state) |
| 738 | { | 778 | { |
| 739 | u32 eax; | 779 | u32 eax; |
| 780 | int err; | ||
| 740 | 781 | ||
| 741 | if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax)) | 782 | if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err)) |
| 742 | return (eax >> 8) & 0xff; | 783 | return err; |
| 743 | return APM_SUCCESS; | 784 | return APM_SUCCESS; |
| 744 | } | 785 | } |
| 745 | 786 | ||
| @@ -770,6 +811,7 @@ static int apm_do_idle(void) | |||
| 770 | u8 ret = 0; | 811 | u8 ret = 0; |
| 771 | int idled = 0; | 812 | int idled = 0; |
| 772 | int polling; | 813 | int polling; |
| 814 | int err; | ||
| 773 | 815 | ||
| 774 | polling = !!(current_thread_info()->status & TS_POLLING); | 816 | polling = !!(current_thread_info()->status & TS_POLLING); |
| 775 | if (polling) { | 817 | if (polling) { |
| @@ -782,7 +824,7 @@ static int apm_do_idle(void) | |||
| 782 | } | 824 | } |
| 783 | if (!need_resched()) { | 825 | if (!need_resched()) { |
| 784 | idled = 1; | 826 | idled = 1; |
| 785 | ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); | 827 | ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err); |
| 786 | } | 828 | } |
| 787 | if (polling) | 829 | if (polling) |
| 788 | current_thread_info()->status |= TS_POLLING; | 830 | current_thread_info()->status |= TS_POLLING; |
| @@ -797,8 +839,7 @@ static int apm_do_idle(void) | |||
| 797 | * Only report the failure the first 5 times. | 839 | * Only report the failure the first 5 times. |
| 798 | */ | 840 | */ |
| 799 | if (++t < 5) { | 841 | if (++t < 5) { |
| 800 | printk(KERN_DEBUG "apm_do_idle failed (%d)\n", | 842 | printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err); |
| 801 | (eax >> 8) & 0xff); | ||
| 802 | t = jiffies; | 843 | t = jiffies; |
| 803 | } | 844 | } |
| 804 | return -1; | 845 | return -1; |
| @@ -816,9 +857,10 @@ static int apm_do_idle(void) | |||
| 816 | static void apm_do_busy(void) | 857 | static void apm_do_busy(void) |
| 817 | { | 858 | { |
| 818 | u32 dummy; | 859 | u32 dummy; |
| 860 | int err; | ||
| 819 | 861 | ||
| 820 | if (clock_slowed || ALWAYS_CALL_BUSY) { | 862 | if (clock_slowed || ALWAYS_CALL_BUSY) { |
| 821 | (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy); | 863 | (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err); |
| 822 | clock_slowed = 0; | 864 | clock_slowed = 0; |
| 823 | } | 865 | } |
| 824 | } | 866 | } |
| @@ -937,7 +979,7 @@ static void apm_power_off(void) | |||
| 937 | 979 | ||
| 938 | /* Some bioses don't like being called from CPU != 0 */ | 980 | /* Some bioses don't like being called from CPU != 0 */ |
| 939 | if (apm_info.realmode_power_off) { | 981 | if (apm_info.realmode_power_off) { |
| 940 | (void)apm_save_cpus(); | 982 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
| 941 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); | 983 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); |
| 942 | } else { | 984 | } else { |
| 943 | (void)set_system_power_state(APM_STATE_OFF); | 985 | (void)set_system_power_state(APM_STATE_OFF); |
| @@ -956,12 +998,13 @@ static void apm_power_off(void) | |||
| 956 | static int apm_enable_power_management(int enable) | 998 | static int apm_enable_power_management(int enable) |
| 957 | { | 999 | { |
| 958 | u32 eax; | 1000 | u32 eax; |
| 1001 | int err; | ||
| 959 | 1002 | ||
| 960 | if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) | 1003 | if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) |
| 961 | return APM_NOT_ENGAGED; | 1004 | return APM_NOT_ENGAGED; |
| 962 | if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, | 1005 | if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, |
| 963 | enable, &eax)) | 1006 | enable, &eax, &err)) |
| 964 | return (eax >> 8) & 0xff; | 1007 | return err; |
| 965 | if (enable) | 1008 | if (enable) |
| 966 | apm_info.bios.flags &= ~APM_BIOS_DISABLED; | 1009 | apm_info.bios.flags &= ~APM_BIOS_DISABLED; |
| 967 | else | 1010 | else |
| @@ -986,24 +1029,23 @@ static int apm_enable_power_management(int enable) | |||
| 986 | 1029 | ||
| 987 | static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) | 1030 | static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) |
| 988 | { | 1031 | { |
| 989 | u32 eax; | 1032 | struct apm_bios_call call; |
| 990 | u32 ebx; | 1033 | |
| 991 | u32 ecx; | 1034 | call.func = APM_FUNC_GET_STATUS; |
| 992 | u32 edx; | 1035 | call.ebx = APM_DEVICE_ALL; |
| 993 | u32 dummy; | 1036 | call.ecx = 0; |
| 994 | 1037 | ||
| 995 | if (apm_info.get_power_status_broken) | 1038 | if (apm_info.get_power_status_broken) |
| 996 | return APM_32_UNSUPPORTED; | 1039 | return APM_32_UNSUPPORTED; |
| 997 | if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0, | 1040 | if (apm_bios_call(&call)) |
| 998 | &eax, &ebx, &ecx, &edx, &dummy)) | 1041 | return call.err; |
| 999 | return (eax >> 8) & 0xff; | 1042 | *status = call.ebx; |
| 1000 | *status = ebx; | 1043 | *bat = call.ecx; |
| 1001 | *bat = ecx; | ||
| 1002 | if (apm_info.get_power_status_swabinminutes) { | 1044 | if (apm_info.get_power_status_swabinminutes) { |
| 1003 | *life = swab16((u16)edx); | 1045 | *life = swab16((u16)call.edx); |
| 1004 | *life |= 0x8000; | 1046 | *life |= 0x8000; |
| 1005 | } else | 1047 | } else |
| 1006 | *life = edx; | 1048 | *life = call.edx; |
| 1007 | return APM_SUCCESS; | 1049 | return APM_SUCCESS; |
| 1008 | } | 1050 | } |
| 1009 | 1051 | ||
| @@ -1048,12 +1090,14 @@ static int apm_get_battery_status(u_short which, u_short *status, | |||
| 1048 | static int apm_engage_power_management(u_short device, int enable) | 1090 | static int apm_engage_power_management(u_short device, int enable) |
| 1049 | { | 1091 | { |
| 1050 | u32 eax; | 1092 | u32 eax; |
| 1093 | int err; | ||
| 1051 | 1094 | ||
| 1052 | if ((enable == 0) && (device == APM_DEVICE_ALL) | 1095 | if ((enable == 0) && (device == APM_DEVICE_ALL) |
| 1053 | && (apm_info.bios.flags & APM_BIOS_DISABLED)) | 1096 | && (apm_info.bios.flags & APM_BIOS_DISABLED)) |
| 1054 | return APM_DISABLED; | 1097 | return APM_DISABLED; |
| 1055 | if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax)) | 1098 | if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, |
| 1056 | return (eax >> 8) & 0xff; | 1099 | &eax, &err)) |
| 1100 | return err; | ||
| 1057 | if (device == APM_DEVICE_ALL) { | 1101 | if (device == APM_DEVICE_ALL) { |
| 1058 | if (enable) | 1102 | if (enable) |
| 1059 | apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; | 1103 | apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; |
| @@ -1689,16 +1733,14 @@ static int apm(void *unused) | |||
| 1689 | char *power_stat; | 1733 | char *power_stat; |
| 1690 | char *bat_stat; | 1734 | char *bat_stat; |
| 1691 | 1735 | ||
| 1692 | #ifdef CONFIG_SMP | ||
| 1693 | /* 2002/08/01 - WT | 1736 | /* 2002/08/01 - WT |
| 1694 | * This is to avoid random crashes at boot time during initialization | 1737 | * This is to avoid random crashes at boot time during initialization |
| 1695 | * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. | 1738 | * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. |
| 1696 | * Some bioses don't like being called from CPU != 0. | 1739 | * Some bioses don't like being called from CPU != 0. |
| 1697 | * Method suggested by Ingo Molnar. | 1740 | * Method suggested by Ingo Molnar. |
| 1698 | */ | 1741 | */ |
| 1699 | set_cpus_allowed(current, cpumask_of_cpu(0)); | 1742 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
| 1700 | BUG_ON(smp_processor_id() != 0); | 1743 | BUG_ON(smp_processor_id() != 0); |
| 1701 | #endif | ||
| 1702 | 1744 | ||
| 1703 | if (apm_info.connection_version == 0) { | 1745 | if (apm_info.connection_version == 0) { |
| 1704 | apm_info.connection_version = apm_info.bios.version; | 1746 | apm_info.connection_version = apm_info.bios.version; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e2962cc1e27b..c4f667896c28 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -41,8 +41,6 @@ | |||
| 41 | 41 | ||
| 42 | #include "cpu.h" | 42 | #include "cpu.h" |
| 43 | 43 | ||
| 44 | #ifdef CONFIG_X86_64 | ||
| 45 | |||
| 46 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 44 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
| 47 | cpumask_var_t cpu_initialized_mask; | 45 | cpumask_var_t cpu_initialized_mask; |
| 48 | cpumask_var_t cpu_callout_mask; | 46 | cpumask_var_t cpu_callout_mask; |
| @@ -60,16 +58,6 @@ void __init setup_cpu_local_masks(void) | |||
| 60 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 63 | #else /* CONFIG_X86_32 */ | ||
| 64 | |||
| 65 | cpumask_t cpu_sibling_setup_map; | ||
| 66 | cpumask_t cpu_callout_map; | ||
| 67 | cpumask_t cpu_initialized; | ||
| 68 | cpumask_t cpu_callin_map; | ||
| 69 | |||
| 70 | #endif /* CONFIG_X86_32 */ | ||
| 71 | |||
| 72 | |||
| 73 | static const struct cpu_dev *this_cpu __cpuinitdata; | 61 | static const struct cpu_dev *this_cpu __cpuinitdata; |
| 74 | 62 | ||
| 75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 63 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
| @@ -859,6 +847,7 @@ static void vgetcpu_set_mode(void) | |||
| 859 | void __init identify_boot_cpu(void) | 847 | void __init identify_boot_cpu(void) |
| 860 | { | 848 | { |
| 861 | identify_cpu(&boot_cpu_data); | 849 | identify_cpu(&boot_cpu_data); |
| 850 | init_c1e_mask(); | ||
| 862 | #ifdef CONFIG_X86_32 | 851 | #ifdef CONFIG_X86_32 |
| 863 | sysenter_setup(); | 852 | sysenter_setup(); |
| 864 | enable_sep_cpu(); | 853 | enable_sep_cpu(); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 23da96e57b17..05209b5cc6ca 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
| 34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
| 35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
| 36 | #include <linux/ftrace.h> | 36 | #include <trace/power.h> |
| 37 | 37 | ||
| 38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
| 39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
| @@ -72,6 +72,8 @@ struct acpi_cpufreq_data { | |||
| 72 | 72 | ||
| 73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
| 74 | 74 | ||
| 75 | DEFINE_TRACE(power_mark); | ||
| 76 | |||
| 75 | /* acpi_perf_data is a pointer to percpu data. */ | 77 | /* acpi_perf_data is a pointer to percpu data. */ |
| 76 | static struct acpi_processor_performance *acpi_perf_data; | 78 | static struct acpi_processor_performance *acpi_perf_data; |
| 77 | 79 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 41ed94915f97..6ac55bd341ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
| @@ -211,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
| 211 | unsigned int i; | 211 | unsigned int i; |
| 212 | 212 | ||
| 213 | #ifdef CONFIG_SMP | 213 | #ifdef CONFIG_SMP |
| 214 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 214 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 215 | #endif | 215 | #endif |
| 216 | 216 | ||
| 217 | /* Errata workaround */ | 217 | /* Errata workaround */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index a15ac94e0b9b..4709ead2db52 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -54,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); | |||
| 54 | static int cpu_family = CPU_OPTERON; | 54 | static int cpu_family = CPU_OPTERON; |
| 55 | 55 | ||
| 56 | #ifndef CONFIG_SMP | 56 | #ifndef CONFIG_SMP |
| 57 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 57 | static inline const struct cpumask *cpu_core_mask(int cpu) |
| 58 | { | ||
| 59 | return cpumask_of(0); | ||
| 60 | } | ||
| 58 | #endif | 61 | #endif |
| 59 | 62 | ||
| 60 | /* Return a frequency in MHz, given an input fid */ | 63 | /* Return a frequency in MHz, given an input fid */ |
| @@ -699,7 +702,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
| 699 | 702 | ||
| 700 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 703 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
| 701 | data->powernow_table = powernow_table; | 704 | data->powernow_table = powernow_table; |
| 702 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 705 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 703 | print_basics(data); | 706 | print_basics(data); |
| 704 | 707 | ||
| 705 | for (j = 0; j < data->numps; j++) | 708 | for (j = 0; j < data->numps; j++) |
| @@ -862,7 +865,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
| 862 | 865 | ||
| 863 | /* fill in data */ | 866 | /* fill in data */ |
| 864 | data->numps = data->acpi_data.state_count; | 867 | data->numps = data->acpi_data.state_count; |
| 865 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 868 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 866 | print_basics(data); | 869 | print_basics(data); |
| 867 | powernow_k8_acpi_pst_values(data, 0); | 870 | powernow_k8_acpi_pst_values(data, 0); |
| 868 | 871 | ||
| @@ -1300,7 +1303,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1300 | if (cpu_family == CPU_HW_PSTATE) | 1303 | if (cpu_family == CPU_HW_PSTATE) |
| 1301 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); | 1304 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); |
| 1302 | else | 1305 | else |
| 1303 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1306 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
| 1304 | data->available_cores = pol->cpus; | 1307 | data->available_cores = pol->cpus; |
| 1305 | 1308 | ||
| 1306 | if (cpu_family == CPU_HW_PSTATE) | 1309 | if (cpu_family == CPU_HW_PSTATE) |
| @@ -1365,7 +1368,7 @@ static unsigned int powernowk8_get(unsigned int cpu) | |||
| 1365 | unsigned int khz = 0; | 1368 | unsigned int khz = 0; |
| 1366 | unsigned int first; | 1369 | unsigned int first; |
| 1367 | 1370 | ||
| 1368 | first = first_cpu(per_cpu(cpu_core_map, cpu)); | 1371 | first = cpumask_first(cpu_core_mask(cpu)); |
| 1369 | data = per_cpu(powernow_data, first); | 1372 | data = per_cpu(powernow_data, first); |
| 1370 | 1373 | ||
| 1371 | if (!data) | 1374 | if (!data) |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 8bbb11adb315..016c1a4fa3fc 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
| @@ -321,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
| 321 | 321 | ||
| 322 | /* only run on CPU to be set, or on its sibling */ | 322 | /* only run on CPU to be set, or on its sibling */ |
| 323 | #ifdef CONFIG_SMP | 323 | #ifdef CONFIG_SMP |
| 324 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 324 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 325 | #endif | 325 | #endif |
| 326 | 326 | ||
| 327 | cpus_allowed = current->cpus_allowed; | 327 | cpus_allowed = current->cpus_allowed; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c471eb1a389c..483eda96e102 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -159,7 +159,7 @@ struct _cpuid4_info_regs { | |||
| 159 | unsigned long can_disable; | 159 | unsigned long can_disable; |
| 160 | }; | 160 | }; |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_PCI | 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) |
| 163 | static struct pci_device_id k8_nb_id[] = { | 163 | static struct pci_device_id k8_nb_id[] = { |
| 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
| 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
| @@ -324,15 +324,6 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
| 324 | return 0; | 324 | return 0; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | static int | ||
| 328 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 329 | { | ||
| 330 | struct _cpuid4_info_regs *leaf_regs = | ||
| 331 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 332 | |||
| 333 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 334 | } | ||
| 335 | |||
| 336 | static int __cpuinit find_num_cache_leaves(void) | 327 | static int __cpuinit find_num_cache_leaves(void) |
| 337 | { | 328 | { |
| 338 | unsigned int eax, ebx, ecx, edx; | 329 | unsigned int eax, ebx, ecx, edx; |
| @@ -508,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 508 | return l2; | 499 | return l2; |
| 509 | } | 500 | } |
| 510 | 501 | ||
| 502 | #ifdef CONFIG_SYSFS | ||
| 503 | |||
| 511 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 504 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 512 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 505 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
| 513 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 506 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
| @@ -571,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
| 571 | per_cpu(cpuid4_info, cpu) = NULL; | 564 | per_cpu(cpuid4_info, cpu) = NULL; |
| 572 | } | 565 | } |
| 573 | 566 | ||
| 567 | static int | ||
| 568 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 569 | { | ||
| 570 | struct _cpuid4_info_regs *leaf_regs = | ||
| 571 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 572 | |||
| 573 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 574 | } | ||
| 575 | |||
| 574 | static void __cpuinit get_cpu_leaves(void *_retval) | 576 | static void __cpuinit get_cpu_leaves(void *_retval) |
| 575 | { | 577 | { |
| 576 | int j, *retval = _retval, cpu = smp_processor_id(); | 578 | int j, *retval = _retval, cpu = smp_processor_id(); |
| @@ -612,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 612 | return retval; | 614 | return retval; |
| 613 | } | 615 | } |
| 614 | 616 | ||
| 615 | #ifdef CONFIG_SYSFS | ||
| 616 | |||
| 617 | #include <linux/kobject.h> | 617 | #include <linux/kobject.h> |
| 618 | #include <linux/sysfs.h> | 618 | #include <linux/sysfs.h> |
| 619 | 619 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index ca14604611ec..863f89568b1a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = { | |||
| 990 | NULL | 990 | NULL |
| 991 | }; | 991 | }; |
| 992 | 992 | ||
| 993 | static cpumask_t mce_device_initialized = CPU_MASK_NONE; | 993 | static cpumask_var_t mce_device_initialized; |
| 994 | 994 | ||
| 995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | 995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ |
| 996 | static __cpuinit int mce_create_device(unsigned int cpu) | 996 | static __cpuinit int mce_create_device(unsigned int cpu) |
| @@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
| 1021 | if (err) | 1021 | if (err) |
| 1022 | goto error2; | 1022 | goto error2; |
| 1023 | } | 1023 | } |
| 1024 | cpu_set(cpu, mce_device_initialized); | 1024 | cpumask_set_cpu(cpu, mce_device_initialized); |
| 1025 | 1025 | ||
| 1026 | return 0; | 1026 | return 0; |
| 1027 | error2: | 1027 | error2: |
| @@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
| 1043 | { | 1043 | { |
| 1044 | int i; | 1044 | int i; |
| 1045 | 1045 | ||
| 1046 | if (!cpu_isset(cpu, mce_device_initialized)) | 1046 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
| 1047 | return; | 1047 | return; |
| 1048 | 1048 | ||
| 1049 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
| @@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
| 1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | 1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), |
| 1054 | &bank_attrs[i]); | 1054 | &bank_attrs[i]); |
| 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
| 1056 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpumask_clear_cpu(cpu, mce_device_initialized); |
| 1057 | } | 1057 | } |
| 1058 | 1058 | ||
| 1059 | /* Make sure there are no machine checks on offlined CPUs. */ | 1059 | /* Make sure there are no machine checks on offlined CPUs. */ |
| @@ -1162,6 +1162,8 @@ static __init int mce_init_device(void) | |||
| 1162 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
| 1163 | return -EIO; | 1163 | return -EIO; |
| 1164 | 1164 | ||
| 1165 | alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); | ||
| 1166 | |||
| 1165 | err = mce_init_banks(); | 1167 | err = mce_init_banks(); |
| 1166 | if (err) | 1168 | if (err) |
| 1167 | return err; | 1169 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7d01be868870..56dde9c4bc96 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
| @@ -485,7 +485,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 485 | 485 | ||
| 486 | #ifdef CONFIG_SMP | 486 | #ifdef CONFIG_SMP |
| 487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
| 488 | i = cpumask_first(&per_cpu(cpu_core_map, cpu)); | 488 | i = cpumask_first(cpu_core_mask(cpu)); |
| 489 | 489 | ||
| 490 | /* first core not up yet */ | 490 | /* first core not up yet */ |
| 491 | if (cpu_data(i).cpu_core_id) | 491 | if (cpu_data(i).cpu_core_id) |
| @@ -505,7 +505,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 505 | if (err) | 505 | if (err) |
| 506 | goto out; | 506 | goto out; |
| 507 | 507 | ||
| 508 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); | 508 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 509 | per_cpu(threshold_banks, cpu)[bank] = b; | 509 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 510 | goto out; | 510 | goto out; |
| 511 | } | 511 | } |
| @@ -529,7 +529,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 529 | #ifndef CONFIG_SMP | 529 | #ifndef CONFIG_SMP |
| 530 | cpumask_setall(b->cpus); | 530 | cpumask_setall(b->cpus); |
| 531 | #else | 531 | #else |
| 532 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); | 532 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 533 | #endif | 533 | #endif |
| 534 | 534 | ||
| 535 | per_cpu(threshold_banks, cpu)[bank] = b; | 535 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 57df3d383470..d6b72df89d69 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
| @@ -249,7 +249,7 @@ void cmci_rediscover(int dying) | |||
| 249 | for_each_online_cpu (cpu) { | 249 | for_each_online_cpu (cpu) { |
| 250 | if (cpu == dying) | 250 | if (cpu == dying) |
| 251 | continue; | 251 | continue; |
| 252 | if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) | 252 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) |
| 253 | continue; | 253 | continue; |
| 254 | /* Recheck banks in case CPUs don't all have the same */ | 254 | /* Recheck banks in case CPUs don't all have the same */ |
| 255 | if (cmci_supported(&banks)) | 255 | if (cmci_supported(&banks)) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 37f28fc7cf95..0b776c09aff3 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -462,9 +462,6 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 462 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; | 462 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
| 463 | *type = base_lo & 0xff; | 463 | *type = base_lo & 0xff; |
| 464 | 464 | ||
| 465 | printk(KERN_DEBUG " get_mtrr: cpu%d reg%02d base=%010lx size=%010lx %s\n", | ||
| 466 | cpu, reg, *base, *size, | ||
| 467 | mtrr_attrib_to_str(*type & 0xff)); | ||
| 468 | out_put_cpu: | 465 | out_put_cpu: |
| 469 | put_cpu(); | 466 | put_cpu(); |
| 470 | } | 467 | } |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d67e0e48bc2d..f93047fed791 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
| @@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
| 14 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
| 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
| 16 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
| 17 | cpus_weight(per_cpu(cpu_core_map, cpu))); | 17 | cpumask_weight(cpu_sibling_mask(cpu))); |
| 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
| 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
| 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); |
| @@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 143 | static void *c_start(struct seq_file *m, loff_t *pos) | 143 | static void *c_start(struct seq_file *m, loff_t *pos) |
| 144 | { | 144 | { |
| 145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
| 146 | *pos = first_cpu(cpu_online_map); | 146 | *pos = cpumask_first(cpu_online_mask); |
| 147 | else | 147 | else |
| 148 | *pos = next_cpu_nr(*pos - 1, cpu_online_map); | 148 | *pos = cpumask_next(*pos - 1, cpu_online_mask); |
| 149 | if ((*pos) < nr_cpu_ids) | 149 | if ((*pos) < nr_cpu_ids) |
| 150 | return &cpu_data(*pos); | 150 | return &cpu_data(*pos); |
| 151 | return NULL; | 151 | return NULL; |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index dd2130b0fb3e..95ea5fa7d444 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
| 16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
| 17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
| 18 | #include <linux/ftrace.h> | ||
| 18 | 19 | ||
| 19 | #include <asm/stacktrace.h> | 20 | #include <asm/stacktrace.h> |
| 20 | 21 | ||
| @@ -196,6 +197,11 @@ unsigned __kprobes long oops_begin(void) | |||
| 196 | int cpu; | 197 | int cpu; |
| 197 | unsigned long flags; | 198 | unsigned long flags; |
| 198 | 199 | ||
| 200 | /* notify the hw-branch tracer so it may disable tracing and | ||
| 201 | add the last trace to the trace buffer - | ||
| 202 | the earlier this happens, the more useful the trace. */ | ||
| 203 | trace_hw_branch_oops(); | ||
| 204 | |||
| 199 | oops_enter(); | 205 | oops_enter(); |
| 200 | 206 | ||
| 201 | /* racy, but better than risking deadlock. */ | 207 | /* racy, but better than risking deadlock. */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 76f7141e0f91..61df77532120 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
| 20 | 20 | ||
| 21 | #include <asm/cacheflush.h> | ||
| 21 | #include <asm/ftrace.h> | 22 | #include <asm/ftrace.h> |
| 22 | #include <linux/ftrace.h> | 23 | #include <linux/ftrace.h> |
| 23 | #include <asm/nops.h> | 24 | #include <asm/nops.h> |
| @@ -26,6 +27,18 @@ | |||
| 26 | 27 | ||
| 27 | #ifdef CONFIG_DYNAMIC_FTRACE | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 28 | 29 | ||
| 30 | int ftrace_arch_code_modify_prepare(void) | ||
| 31 | { | ||
| 32 | set_kernel_text_rw(); | ||
| 33 | return 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | int ftrace_arch_code_modify_post_process(void) | ||
| 37 | { | ||
| 38 | set_kernel_text_ro(); | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 29 | union ftrace_code_union { | 42 | union ftrace_code_union { |
| 30 | char code[MCOUNT_INSN_SIZE]; | 43 | char code[MCOUNT_INSN_SIZE]; |
| 31 | struct { | 44 | struct { |
| @@ -66,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
| 66 | * | 79 | * |
| 67 | * 1) Put the instruction pointer into the IP buffer | 80 | * 1) Put the instruction pointer into the IP buffer |
| 68 | * and the new code into the "code" buffer. | 81 | * and the new code into the "code" buffer. |
| 69 | * 2) Set a flag that says we are modifying code | 82 | * 2) Wait for any running NMIs to finish and set a flag that says |
| 70 | * 3) Wait for any running NMIs to finish. | 83 | * we are modifying code, it is done in an atomic operation. |
| 71 | * 4) Write the code | 84 | * 3) Write the code |
| 72 | * 5) clear the flag. | 85 | * 4) clear the flag. |
| 73 | * 6) Wait for any running NMIs to finish. | 86 | * 5) Wait for any running NMIs to finish. |
| 74 | * | 87 | * |
| 75 | * If an NMI is executed, the first thing it does is to call | 88 | * If an NMI is executed, the first thing it does is to call |
| 76 | * "ftrace_nmi_enter". This will check if the flag is set to write | 89 | * "ftrace_nmi_enter". This will check if the flag is set to write |
| @@ -82,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
| 82 | * are the same as what exists. | 95 | * are the same as what exists. |
| 83 | */ | 96 | */ |
| 84 | 97 | ||
| 85 | static atomic_t in_nmi = ATOMIC_INIT(0); | 98 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ |
| 99 | static atomic_t nmi_running = ATOMIC_INIT(0); | ||
| 86 | static int mod_code_status; /* holds return value of text write */ | 100 | static int mod_code_status; /* holds return value of text write */ |
| 87 | static int mod_code_write; /* set when NMI should do the write */ | ||
| 88 | static void *mod_code_ip; /* holds the IP to write to */ | 101 | static void *mod_code_ip; /* holds the IP to write to */ |
| 89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | 102 | static void *mod_code_newcode; /* holds the text to write to the IP */ |
| 90 | 103 | ||
| @@ -101,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size) | |||
| 101 | return r; | 114 | return r; |
| 102 | } | 115 | } |
| 103 | 116 | ||
| 117 | static void clear_mod_flag(void) | ||
| 118 | { | ||
| 119 | int old = atomic_read(&nmi_running); | ||
| 120 | |||
| 121 | for (;;) { | ||
| 122 | int new = old & ~MOD_CODE_WRITE_FLAG; | ||
| 123 | |||
| 124 | if (old == new) | ||
| 125 | break; | ||
| 126 | |||
| 127 | old = atomic_cmpxchg(&nmi_running, old, new); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 104 | static void ftrace_mod_code(void) | 131 | static void ftrace_mod_code(void) |
| 105 | { | 132 | { |
| 106 | /* | 133 | /* |
| @@ -111,37 +138,52 @@ static void ftrace_mod_code(void) | |||
| 111 | */ | 138 | */ |
| 112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | 139 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
| 113 | MCOUNT_INSN_SIZE); | 140 | MCOUNT_INSN_SIZE); |
| 141 | |||
| 142 | /* if we fail, then kill any new writers */ | ||
| 143 | if (mod_code_status) | ||
| 144 | clear_mod_flag(); | ||
| 114 | } | 145 | } |
| 115 | 146 | ||
| 116 | void ftrace_nmi_enter(void) | 147 | void ftrace_nmi_enter(void) |
| 117 | { | 148 | { |
| 118 | atomic_inc(&in_nmi); | 149 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
| 119 | /* Must have in_nmi seen before reading write flag */ | 150 | smp_rmb(); |
| 120 | smp_mb(); | ||
| 121 | if (mod_code_write) { | ||
| 122 | ftrace_mod_code(); | 151 | ftrace_mod_code(); |
| 123 | atomic_inc(&nmi_update_count); | 152 | atomic_inc(&nmi_update_count); |
| 124 | } | 153 | } |
| 154 | /* Must have previous changes seen before executions */ | ||
| 155 | smp_mb(); | ||
| 125 | } | 156 | } |
| 126 | 157 | ||
| 127 | void ftrace_nmi_exit(void) | 158 | void ftrace_nmi_exit(void) |
| 128 | { | 159 | { |
| 129 | /* Finish all executions before clearing in_nmi */ | 160 | /* Finish all executions before clearing nmi_running */ |
| 130 | smp_wmb(); | 161 | smp_mb(); |
| 131 | atomic_dec(&in_nmi); | 162 | atomic_dec(&nmi_running); |
| 163 | } | ||
| 164 | |||
| 165 | static void wait_for_nmi_and_set_mod_flag(void) | ||
| 166 | { | ||
| 167 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | ||
| 168 | return; | ||
| 169 | |||
| 170 | do { | ||
| 171 | cpu_relax(); | ||
| 172 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | ||
| 173 | |||
| 174 | nmi_wait_count++; | ||
| 132 | } | 175 | } |
| 133 | 176 | ||
| 134 | static void wait_for_nmi(void) | 177 | static void wait_for_nmi(void) |
| 135 | { | 178 | { |
| 136 | int waited = 0; | 179 | if (!atomic_read(&nmi_running)) |
| 180 | return; | ||
| 137 | 181 | ||
| 138 | while (atomic_read(&in_nmi)) { | 182 | do { |
| 139 | waited = 1; | ||
| 140 | cpu_relax(); | 183 | cpu_relax(); |
| 141 | } | 184 | } while (atomic_read(&nmi_running)); |
| 142 | 185 | ||
| 143 | if (waited) | 186 | nmi_wait_count++; |
| 144 | nmi_wait_count++; | ||
| 145 | } | 187 | } |
| 146 | 188 | ||
| 147 | static int | 189 | static int |
| @@ -151,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
| 151 | mod_code_newcode = new_code; | 193 | mod_code_newcode = new_code; |
| 152 | 194 | ||
| 153 | /* The buffers need to be visible before we let NMIs write them */ | 195 | /* The buffers need to be visible before we let NMIs write them */ |
| 154 | smp_wmb(); | ||
| 155 | |||
| 156 | mod_code_write = 1; | ||
| 157 | |||
| 158 | /* Make sure write bit is visible before we wait on NMIs */ | ||
| 159 | smp_mb(); | 196 | smp_mb(); |
| 160 | 197 | ||
| 161 | wait_for_nmi(); | 198 | wait_for_nmi_and_set_mod_flag(); |
| 162 | 199 | ||
| 163 | /* Make sure all running NMIs have finished before we write the code */ | 200 | /* Make sure all running NMIs have finished before we write the code */ |
| 164 | smp_mb(); | 201 | smp_mb(); |
| @@ -166,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
| 166 | ftrace_mod_code(); | 203 | ftrace_mod_code(); |
| 167 | 204 | ||
| 168 | /* Make sure the write happens before clearing the bit */ | 205 | /* Make sure the write happens before clearing the bit */ |
| 169 | smp_wmb(); | ||
| 170 | |||
| 171 | mod_code_write = 0; | ||
| 172 | |||
| 173 | /* make sure NMIs see the cleared bit */ | ||
| 174 | smp_mb(); | 206 | smp_mb(); |
| 175 | 207 | ||
| 208 | clear_mod_flag(); | ||
| 176 | wait_for_nmi(); | 209 | wait_for_nmi(); |
| 177 | 210 | ||
| 178 | return mod_code_status; | 211 | return mod_code_status; |
| @@ -368,25 +401,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
| 368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 401 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
| 369 | } | 402 | } |
| 370 | 403 | ||
| 371 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
| 372 | |||
| 373 | /* | ||
| 374 | * These functions are picked from those used on | ||
| 375 | * this page for dynamic ftrace. They have been | ||
| 376 | * simplified to ignore all traces in NMI context. | ||
| 377 | */ | ||
| 378 | static atomic_t in_nmi; | ||
| 379 | |||
| 380 | void ftrace_nmi_enter(void) | ||
| 381 | { | ||
| 382 | atomic_inc(&in_nmi); | ||
| 383 | } | ||
| 384 | |||
| 385 | void ftrace_nmi_exit(void) | ||
| 386 | { | ||
| 387 | atomic_dec(&in_nmi); | ||
| 388 | } | ||
| 389 | |||
| 390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 404 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
| 391 | 405 | ||
| 392 | /* | 406 | /* |
| @@ -396,14 +410,13 @@ void ftrace_nmi_exit(void) | |||
| 396 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 410 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
| 397 | { | 411 | { |
| 398 | unsigned long old; | 412 | unsigned long old; |
| 399 | unsigned long long calltime; | ||
| 400 | int faulted; | 413 | int faulted; |
| 401 | struct ftrace_graph_ent trace; | 414 | struct ftrace_graph_ent trace; |
| 402 | unsigned long return_hooker = (unsigned long) | 415 | unsigned long return_hooker = (unsigned long) |
| 403 | &return_to_handler; | 416 | &return_to_handler; |
| 404 | 417 | ||
| 405 | /* Nmi's are currently unsupported */ | 418 | /* Nmi's are currently unsupported */ |
| 406 | if (unlikely(atomic_read(&in_nmi))) | 419 | if (unlikely(in_nmi())) |
| 407 | return; | 420 | return; |
| 408 | 421 | ||
| 409 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 422 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| @@ -439,17 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 439 | return; | 452 | return; |
| 440 | } | 453 | } |
| 441 | 454 | ||
| 442 | if (unlikely(!__kernel_text_address(old))) { | 455 | if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { |
| 443 | ftrace_graph_stop(); | ||
| 444 | *parent = old; | ||
| 445 | WARN_ON(1); | ||
| 446 | return; | ||
| 447 | } | ||
| 448 | |||
| 449 | calltime = cpu_clock(raw_smp_processor_id()); | ||
| 450 | |||
| 451 | if (ftrace_push_return_trace(old, calltime, | ||
| 452 | self_addr, &trace.depth) == -EBUSY) { | ||
| 453 | *parent = old; | 456 | *parent = old; |
| 454 | return; | 457 | return; |
| 455 | } | 458 | } |
| @@ -463,3 +466,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 463 | } | 466 | } |
| 464 | } | 467 | } |
| 465 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 468 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 469 | |||
| 470 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
| 471 | |||
| 472 | extern unsigned long __start_syscalls_metadata[]; | ||
| 473 | extern unsigned long __stop_syscalls_metadata[]; | ||
| 474 | extern unsigned long *sys_call_table; | ||
| 475 | |||
| 476 | static struct syscall_metadata **syscalls_metadata; | ||
| 477 | |||
| 478 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | ||
| 479 | { | ||
| 480 | struct syscall_metadata *start; | ||
| 481 | struct syscall_metadata *stop; | ||
| 482 | char str[KSYM_SYMBOL_LEN]; | ||
| 483 | |||
| 484 | |||
| 485 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
| 486 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
| 487 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | ||
| 488 | |||
| 489 | for ( ; start < stop; start++) { | ||
| 490 | if (start->name && !strcmp(start->name, str)) | ||
| 491 | return start; | ||
| 492 | } | ||
| 493 | return NULL; | ||
| 494 | } | ||
| 495 | |||
| 496 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
| 497 | { | ||
| 498 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | ||
| 499 | return NULL; | ||
| 500 | |||
| 501 | return syscalls_metadata[nr]; | ||
| 502 | } | ||
| 503 | |||
| 504 | void arch_init_ftrace_syscalls(void) | ||
| 505 | { | ||
| 506 | int i; | ||
| 507 | struct syscall_metadata *meta; | ||
| 508 | unsigned long **psys_syscall_table = &sys_call_table; | ||
| 509 | static atomic_t refs; | ||
| 510 | |||
| 511 | if (atomic_inc_return(&refs) != 1) | ||
| 512 | goto end; | ||
| 513 | |||
| 514 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
| 515 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | ||
| 516 | if (!syscalls_metadata) { | ||
| 517 | WARN_ON(1); | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | |||
| 521 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | ||
| 522 | meta = find_syscall_meta(psys_syscall_table[i]); | ||
| 523 | syscalls_metadata[i] = meta; | ||
| 524 | } | ||
| 525 | return; | ||
| 526 | |||
| 527 | /* Paranoid: avoid overflow */ | ||
| 528 | end: | ||
| 529 | atomic_dec(&refs); | ||
| 530 | } | ||
| 531 | #endif | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 55b94614e348..7b5169d2b000 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
| @@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
| 638 | #else | 638 | #else |
| 639 | " pushf\n" | 639 | " pushf\n" |
| 640 | /* | 640 | /* |
| 641 | * Skip cs, ip, orig_ax. | 641 | * Skip cs, ip, orig_ax and gs. |
| 642 | * trampoline_handler() will plug in these values | 642 | * trampoline_handler() will plug in these values |
| 643 | */ | 643 | */ |
| 644 | " subl $12, %esp\n" | 644 | " subl $16, %esp\n" |
| 645 | " pushl %fs\n" | 645 | " pushl %fs\n" |
| 646 | " pushl %ds\n" | ||
| 647 | " pushl %es\n" | 646 | " pushl %es\n" |
| 647 | " pushl %ds\n" | ||
| 648 | " pushl %eax\n" | 648 | " pushl %eax\n" |
| 649 | " pushl %ebp\n" | 649 | " pushl %ebp\n" |
| 650 | " pushl %edi\n" | 650 | " pushl %edi\n" |
| @@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
| 655 | " movl %esp, %eax\n" | 655 | " movl %esp, %eax\n" |
| 656 | " call trampoline_handler\n" | 656 | " call trampoline_handler\n" |
| 657 | /* Move flags to cs */ | 657 | /* Move flags to cs */ |
| 658 | " movl 52(%esp), %edx\n" | 658 | " movl 56(%esp), %edx\n" |
| 659 | " movl %edx, 48(%esp)\n" | 659 | " movl %edx, 52(%esp)\n" |
| 660 | /* Replace saved flags with true return address. */ | 660 | /* Replace saved flags with true return address. */ |
| 661 | " movl %eax, 52(%esp)\n" | 661 | " movl %eax, 56(%esp)\n" |
| 662 | " popl %ebx\n" | 662 | " popl %ebx\n" |
| 663 | " popl %ecx\n" | 663 | " popl %ecx\n" |
| 664 | " popl %edx\n" | 664 | " popl %edx\n" |
| @@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
| 666 | " popl %edi\n" | 666 | " popl %edi\n" |
| 667 | " popl %ebp\n" | 667 | " popl %ebp\n" |
| 668 | " popl %eax\n" | 668 | " popl %eax\n" |
| 669 | /* Skip ip, orig_ax, es, ds, fs */ | 669 | /* Skip ds, es, fs, gs, orig_ax and ip */ |
| 670 | " addl $20, %esp\n" | 670 | " addl $24, %esp\n" |
| 671 | " popf\n" | 671 | " popf\n" |
| 672 | #endif | 672 | #endif |
| 673 | " ret\n"); | 673 | " ret\n"); |
| @@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
| 691 | regs->cs = __KERNEL_CS; | 691 | regs->cs = __KERNEL_CS; |
| 692 | #else | 692 | #else |
| 693 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | 693 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
| 694 | regs->gs = 0; | ||
| 694 | #endif | 695 | #endif |
| 695 | regs->ip = trampoline_address; | 696 | regs->ip = trampoline_address; |
| 696 | regs->orig_ax = ~0UL; | 697 | regs->orig_ax = ~0UL; |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index c25fdb382292..453b5795a5c6 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
| @@ -12,31 +12,30 @@ | |||
| 12 | * | 12 | * |
| 13 | * Licensed under the terms of the GNU General Public | 13 | * Licensed under the terms of the GNU General Public |
| 14 | * License version 2. See file COPYING for details. | 14 | * License version 2. See file COPYING for details. |
| 15 | */ | 15 | */ |
| 16 | 16 | #include <linux/platform_device.h> | |
| 17 | #include <linux/capability.h> | 17 | #include <linux/capability.h> |
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/sched.h> | ||
| 21 | #include <linux/cpumask.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/vmalloc.h> | ||
| 25 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
| 19 | #include <linux/firmware.h> | ||
| 26 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
| 27 | #include <linux/mm.h> | 21 | #include <linux/cpumask.h> |
| 28 | #include <linux/fs.h> | 22 | #include <linux/pci_ids.h> |
| 23 | #include <linux/uaccess.h> | ||
| 24 | #include <linux/vmalloc.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/module.h> | ||
| 29 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 28 | #include <linux/sched.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/slab.h> | ||
| 30 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
| 31 | #include <linux/firmware.h> | ||
| 32 | #include <linux/platform_device.h> | ||
| 33 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
| 34 | #include <linux/pci_ids.h> | 33 | #include <linux/fs.h> |
| 35 | #include <linux/uaccess.h> | 34 | #include <linux/mm.h> |
| 36 | 35 | ||
| 37 | #include <asm/msr.h> | ||
| 38 | #include <asm/processor.h> | ||
| 39 | #include <asm/microcode.h> | 36 | #include <asm/microcode.h> |
| 37 | #include <asm/processor.h> | ||
| 38 | #include <asm/msr.h> | ||
| 40 | 39 | ||
| 41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | 40 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); |
| 42 | MODULE_AUTHOR("Peter Oruba"); | 41 | MODULE_AUTHOR("Peter Oruba"); |
| @@ -72,8 +71,8 @@ struct microcode_header_amd { | |||
| 72 | } __attribute__((packed)); | 71 | } __attribute__((packed)); |
| 73 | 72 | ||
| 74 | struct microcode_amd { | 73 | struct microcode_amd { |
| 75 | struct microcode_header_amd hdr; | 74 | struct microcode_header_amd hdr; |
| 76 | unsigned int mpb[0]; | 75 | unsigned int mpb[0]; |
| 77 | }; | 76 | }; |
| 78 | 77 | ||
| 79 | #define UCODE_MAX_SIZE 2048 | 78 | #define UCODE_MAX_SIZE 2048 |
| @@ -184,8 +183,8 @@ static int get_ucode_data(void *to, const u8 *from, size_t n) | |||
| 184 | return 0; | 183 | return 0; |
| 185 | } | 184 | } |
| 186 | 185 | ||
| 187 | static void *get_next_ucode(const u8 *buf, unsigned int size, | 186 | static void * |
| 188 | unsigned int *mc_size) | 187 | get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) |
| 189 | { | 188 | { |
| 190 | unsigned int total_size; | 189 | unsigned int total_size; |
| 191 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; | 190 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; |
| @@ -223,7 +222,6 @@ static void *get_next_ucode(const u8 *buf, unsigned int size, | |||
| 223 | return mc; | 222 | return mc; |
| 224 | } | 223 | } |
| 225 | 224 | ||
| 226 | |||
| 227 | static int install_equiv_cpu_table(const u8 *buf) | 225 | static int install_equiv_cpu_table(const u8 *buf) |
| 228 | { | 226 | { |
| 229 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; | 227 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; |
| @@ -372,4 +370,3 @@ struct microcode_ops * __init init_amd_microcode(void) | |||
| 372 | { | 370 | { |
| 373 | return µcode_amd_ops; | 371 | return µcode_amd_ops; |
| 374 | } | 372 | } |
| 375 | |||
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index c9b721ba968c..a0f3851ef310 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
| @@ -70,67 +70,78 @@ | |||
| 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
| 71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
| 72 | */ | 72 | */ |
| 73 | #include <linux/platform_device.h> | ||
| 73 | #include <linux/capability.h> | 74 | #include <linux/capability.h> |
| 74 | #include <linux/kernel.h> | 75 | #include <linux/miscdevice.h> |
| 75 | #include <linux/init.h> | 76 | #include <linux/firmware.h> |
| 76 | #include <linux/sched.h> | ||
| 77 | #include <linux/smp_lock.h> | 77 | #include <linux/smp_lock.h> |
| 78 | #include <linux/spinlock.h> | ||
| 78 | #include <linux/cpumask.h> | 79 | #include <linux/cpumask.h> |
| 79 | #include <linux/module.h> | 80 | #include <linux/uaccess.h> |
| 80 | #include <linux/slab.h> | ||
| 81 | #include <linux/vmalloc.h> | 81 | #include <linux/vmalloc.h> |
| 82 | #include <linux/miscdevice.h> | 82 | #include <linux/kernel.h> |
| 83 | #include <linux/spinlock.h> | 83 | #include <linux/module.h> |
| 84 | #include <linux/mm.h> | ||
| 85 | #include <linux/fs.h> | ||
| 86 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
| 85 | #include <linux/sched.h> | ||
| 86 | #include <linux/init.h> | ||
| 87 | #include <linux/slab.h> | ||
| 87 | #include <linux/cpu.h> | 88 | #include <linux/cpu.h> |
| 88 | #include <linux/firmware.h> | 89 | #include <linux/fs.h> |
| 89 | #include <linux/platform_device.h> | 90 | #include <linux/mm.h> |
| 90 | 91 | ||
| 91 | #include <asm/msr.h> | ||
| 92 | #include <asm/uaccess.h> | ||
| 93 | #include <asm/processor.h> | ||
| 94 | #include <asm/microcode.h> | 92 | #include <asm/microcode.h> |
| 93 | #include <asm/processor.h> | ||
| 94 | #include <asm/msr.h> | ||
| 95 | 95 | ||
| 96 | MODULE_DESCRIPTION("Microcode Update Driver"); | 96 | MODULE_DESCRIPTION("Microcode Update Driver"); |
| 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); |
| 98 | MODULE_LICENSE("GPL"); | 98 | MODULE_LICENSE("GPL"); |
| 99 | 99 | ||
| 100 | #define MICROCODE_VERSION "2.00" | 100 | #define MICROCODE_VERSION "2.00" |
| 101 | 101 | ||
| 102 | static struct microcode_ops *microcode_ops; | 102 | static struct microcode_ops *microcode_ops; |
| 103 | 103 | ||
| 104 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ | 104 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ |
| 105 | static DEFINE_MUTEX(microcode_mutex); | 105 | static DEFINE_MUTEX(microcode_mutex); |
| 106 | 106 | ||
| 107 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | 107 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
| 108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); | 108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE | 110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
| 111 | struct update_for_cpu { | ||
| 112 | const void __user *buf; | ||
| 113 | size_t size; | ||
| 114 | }; | ||
| 115 | |||
| 116 | static long update_for_cpu(void *_ufc) | ||
| 117 | { | ||
| 118 | struct update_for_cpu *ufc = _ufc; | ||
| 119 | int error; | ||
| 120 | |||
| 121 | error = microcode_ops->request_microcode_user(smp_processor_id(), | ||
| 122 | ufc->buf, ufc->size); | ||
| 123 | if (error < 0) | ||
| 124 | return error; | ||
| 125 | if (!error) | ||
| 126 | microcode_ops->apply_microcode(smp_processor_id()); | ||
| 127 | return error; | ||
| 128 | } | ||
| 129 | |||
| 111 | static int do_microcode_update(const void __user *buf, size_t size) | 130 | static int do_microcode_update(const void __user *buf, size_t size) |
| 112 | { | 131 | { |
| 113 | cpumask_t old; | ||
| 114 | int error = 0; | 132 | int error = 0; |
| 115 | int cpu; | 133 | int cpu; |
| 116 | 134 | struct update_for_cpu ufc = { .buf = buf, .size = size }; | |
| 117 | old = current->cpus_allowed; | ||
| 118 | 135 | ||
| 119 | for_each_online_cpu(cpu) { | 136 | for_each_online_cpu(cpu) { |
| 120 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 137 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 121 | 138 | ||
| 122 | if (!uci->valid) | 139 | if (!uci->valid) |
| 123 | continue; | 140 | continue; |
| 124 | 141 | error = work_on_cpu(cpu, update_for_cpu, &ufc); | |
| 125 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
| 126 | error = microcode_ops->request_microcode_user(cpu, buf, size); | ||
| 127 | if (error < 0) | 142 | if (error < 0) |
| 128 | goto out; | 143 | break; |
| 129 | if (!error) | ||
| 130 | microcode_ops->apply_microcode(cpu); | ||
| 131 | } | 144 | } |
| 132 | out: | ||
| 133 | set_cpus_allowed_ptr(current, &old); | ||
| 134 | return error; | 145 | return error; |
| 135 | } | 146 | } |
| 136 | 147 | ||
| @@ -198,18 +209,33 @@ static void microcode_dev_exit(void) | |||
| 198 | 209 | ||
| 199 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | 210 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); |
| 200 | #else | 211 | #else |
| 201 | #define microcode_dev_init() 0 | 212 | #define microcode_dev_init() 0 |
| 202 | #define microcode_dev_exit() do { } while (0) | 213 | #define microcode_dev_exit() do { } while (0) |
| 203 | #endif | 214 | #endif |
| 204 | 215 | ||
| 205 | /* fake device for request_firmware */ | 216 | /* fake device for request_firmware */ |
| 206 | static struct platform_device *microcode_pdev; | 217 | static struct platform_device *microcode_pdev; |
| 218 | |||
| 219 | static long reload_for_cpu(void *unused) | ||
| 220 | { | ||
| 221 | struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); | ||
| 222 | int err = 0; | ||
| 223 | |||
| 224 | mutex_lock(µcode_mutex); | ||
| 225 | if (uci->valid) { | ||
| 226 | err = microcode_ops->request_microcode_fw(smp_processor_id(), | ||
| 227 | µcode_pdev->dev); | ||
| 228 | if (!err) | ||
| 229 | microcode_ops->apply_microcode(smp_processor_id()); | ||
| 230 | } | ||
| 231 | mutex_unlock(µcode_mutex); | ||
| 232 | return err; | ||
| 233 | } | ||
| 207 | 234 | ||
| 208 | static ssize_t reload_store(struct sys_device *dev, | 235 | static ssize_t reload_store(struct sys_device *dev, |
| 209 | struct sysdev_attribute *attr, | 236 | struct sysdev_attribute *attr, |
| 210 | const char *buf, size_t sz) | 237 | const char *buf, size_t sz) |
| 211 | { | 238 | { |
| 212 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | ||
| 213 | char *end; | 239 | char *end; |
| 214 | unsigned long val = simple_strtoul(buf, &end, 0); | 240 | unsigned long val = simple_strtoul(buf, &end, 0); |
| 215 | int err = 0; | 241 | int err = 0; |
| @@ -218,21 +244,9 @@ static ssize_t reload_store(struct sys_device *dev, | |||
| 218 | if (end == buf) | 244 | if (end == buf) |
| 219 | return -EINVAL; | 245 | return -EINVAL; |
| 220 | if (val == 1) { | 246 | if (val == 1) { |
| 221 | cpumask_t old = current->cpus_allowed; | ||
| 222 | |||
| 223 | get_online_cpus(); | 247 | get_online_cpus(); |
| 224 | if (cpu_online(cpu)) { | 248 | if (cpu_online(cpu)) |
| 225 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 249 | err = work_on_cpu(cpu, reload_for_cpu, NULL); |
| 226 | mutex_lock(µcode_mutex); | ||
| 227 | if (uci->valid) { | ||
| 228 | err = microcode_ops->request_microcode_fw(cpu, | ||
| 229 | µcode_pdev->dev); | ||
| 230 | if (!err) | ||
| 231 | microcode_ops->apply_microcode(cpu); | ||
| 232 | } | ||
| 233 | mutex_unlock(µcode_mutex); | ||
| 234 | set_cpus_allowed_ptr(current, &old); | ||
| 235 | } | ||
| 236 | put_online_cpus(); | 250 | put_online_cpus(); |
| 237 | } | 251 | } |
| 238 | if (err) | 252 | if (err) |
| @@ -268,8 +282,8 @@ static struct attribute *mc_default_attrs[] = { | |||
| 268 | }; | 282 | }; |
| 269 | 283 | ||
| 270 | static struct attribute_group mc_attr_group = { | 284 | static struct attribute_group mc_attr_group = { |
| 271 | .attrs = mc_default_attrs, | 285 | .attrs = mc_default_attrs, |
| 272 | .name = "microcode", | 286 | .name = "microcode", |
| 273 | }; | 287 | }; |
| 274 | 288 | ||
| 275 | static void __microcode_fini_cpu(int cpu) | 289 | static void __microcode_fini_cpu(int cpu) |
| @@ -328,9 +342,9 @@ static int microcode_resume_cpu(int cpu) | |||
| 328 | return 0; | 342 | return 0; |
| 329 | } | 343 | } |
| 330 | 344 | ||
| 331 | static void microcode_update_cpu(int cpu) | 345 | static long microcode_update_cpu(void *unused) |
| 332 | { | 346 | { |
| 333 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 347 | struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); |
| 334 | int err = 0; | 348 | int err = 0; |
| 335 | 349 | ||
| 336 | /* | 350 | /* |
| @@ -338,30 +352,27 @@ static void microcode_update_cpu(int cpu) | |||
| 338 | * otherwise just request a firmware: | 352 | * otherwise just request a firmware: |
| 339 | */ | 353 | */ |
| 340 | if (uci->valid) { | 354 | if (uci->valid) { |
| 341 | err = microcode_resume_cpu(cpu); | 355 | err = microcode_resume_cpu(smp_processor_id()); |
| 342 | } else { | 356 | } else { |
| 343 | collect_cpu_info(cpu); | 357 | collect_cpu_info(smp_processor_id()); |
| 344 | if (uci->valid && system_state == SYSTEM_RUNNING) | 358 | if (uci->valid && system_state == SYSTEM_RUNNING) |
| 345 | err = microcode_ops->request_microcode_fw(cpu, | 359 | err = microcode_ops->request_microcode_fw( |
| 360 | smp_processor_id(), | ||
| 346 | µcode_pdev->dev); | 361 | µcode_pdev->dev); |
| 347 | } | 362 | } |
| 348 | if (!err) | 363 | if (!err) |
| 349 | microcode_ops->apply_microcode(cpu); | 364 | microcode_ops->apply_microcode(smp_processor_id()); |
| 365 | return err; | ||
| 350 | } | 366 | } |
| 351 | 367 | ||
| 352 | static void microcode_init_cpu(int cpu) | 368 | static int microcode_init_cpu(int cpu) |
| 353 | { | 369 | { |
| 354 | cpumask_t old = current->cpus_allowed; | 370 | int err; |
| 355 | |||
| 356 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
| 357 | /* We should bind the task to the CPU */ | ||
| 358 | BUG_ON(raw_smp_processor_id() != cpu); | ||
| 359 | |||
| 360 | mutex_lock(µcode_mutex); | 371 | mutex_lock(µcode_mutex); |
| 361 | microcode_update_cpu(cpu); | 372 | err = work_on_cpu(cpu, microcode_update_cpu, NULL); |
| 362 | mutex_unlock(µcode_mutex); | 373 | mutex_unlock(µcode_mutex); |
| 363 | 374 | ||
| 364 | set_cpus_allowed_ptr(current, &old); | 375 | return err; |
| 365 | } | 376 | } |
| 366 | 377 | ||
| 367 | static int mc_sysdev_add(struct sys_device *sys_dev) | 378 | static int mc_sysdev_add(struct sys_device *sys_dev) |
| @@ -379,8 +390,11 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
| 379 | if (err) | 390 | if (err) |
| 380 | return err; | 391 | return err; |
| 381 | 392 | ||
| 382 | microcode_init_cpu(cpu); | 393 | err = microcode_init_cpu(cpu); |
| 383 | return 0; | 394 | if (err) |
| 395 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | ||
| 396 | |||
| 397 | return err; | ||
| 384 | } | 398 | } |
| 385 | 399 | ||
| 386 | static int mc_sysdev_remove(struct sys_device *sys_dev) | 400 | static int mc_sysdev_remove(struct sys_device *sys_dev) |
| @@ -404,14 +418,14 @@ static int mc_sysdev_resume(struct sys_device *dev) | |||
| 404 | return 0; | 418 | return 0; |
| 405 | 419 | ||
| 406 | /* only CPU 0 will apply ucode here */ | 420 | /* only CPU 0 will apply ucode here */ |
| 407 | microcode_update_cpu(0); | 421 | microcode_update_cpu(NULL); |
| 408 | return 0; | 422 | return 0; |
| 409 | } | 423 | } |
| 410 | 424 | ||
| 411 | static struct sysdev_driver mc_sysdev_driver = { | 425 | static struct sysdev_driver mc_sysdev_driver = { |
| 412 | .add = mc_sysdev_add, | 426 | .add = mc_sysdev_add, |
| 413 | .remove = mc_sysdev_remove, | 427 | .remove = mc_sysdev_remove, |
| 414 | .resume = mc_sysdev_resume, | 428 | .resume = mc_sysdev_resume, |
| 415 | }; | 429 | }; |
| 416 | 430 | ||
| 417 | static __cpuinit int | 431 | static __cpuinit int |
| @@ -424,7 +438,9 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
| 424 | switch (action) { | 438 | switch (action) { |
| 425 | case CPU_ONLINE: | 439 | case CPU_ONLINE: |
| 426 | case CPU_ONLINE_FROZEN: | 440 | case CPU_ONLINE_FROZEN: |
| 427 | microcode_init_cpu(cpu); | 441 | if (microcode_init_cpu(cpu)) |
| 442 | printk(KERN_ERR "microcode: failed to init CPU%d\n", | ||
| 443 | cpu); | ||
| 428 | case CPU_DOWN_FAILED: | 444 | case CPU_DOWN_FAILED: |
| 429 | case CPU_DOWN_FAILED_FROZEN: | 445 | case CPU_DOWN_FAILED_FROZEN: |
| 430 | pr_debug("microcode: CPU%d added\n", cpu); | 446 | pr_debug("microcode: CPU%d added\n", cpu); |
| @@ -448,7 +464,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
| 448 | } | 464 | } |
| 449 | 465 | ||
| 450 | static struct notifier_block __refdata mc_cpu_notifier = { | 466 | static struct notifier_block __refdata mc_cpu_notifier = { |
| 451 | .notifier_call = mc_cpu_callback, | 467 | .notifier_call = mc_cpu_callback, |
| 452 | }; | 468 | }; |
| 453 | 469 | ||
| 454 | static int __init microcode_init(void) | 470 | static int __init microcode_init(void) |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 5e9f4fc51385..149b9ec7c1ab 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
| @@ -70,28 +70,28 @@ | |||
| 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
| 71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
| 72 | */ | 72 | */ |
| 73 | #include <linux/platform_device.h> | ||
| 73 | #include <linux/capability.h> | 74 | #include <linux/capability.h> |
| 74 | #include <linux/kernel.h> | 75 | #include <linux/miscdevice.h> |
| 75 | #include <linux/init.h> | 76 | #include <linux/firmware.h> |
| 76 | #include <linux/sched.h> | ||
| 77 | #include <linux/smp_lock.h> | 77 | #include <linux/smp_lock.h> |
| 78 | #include <linux/spinlock.h> | ||
| 78 | #include <linux/cpumask.h> | 79 | #include <linux/cpumask.h> |
| 79 | #include <linux/module.h> | 80 | #include <linux/uaccess.h> |
| 80 | #include <linux/slab.h> | ||
| 81 | #include <linux/vmalloc.h> | 81 | #include <linux/vmalloc.h> |
| 82 | #include <linux/miscdevice.h> | 82 | #include <linux/kernel.h> |
| 83 | #include <linux/spinlock.h> | 83 | #include <linux/module.h> |
| 84 | #include <linux/mm.h> | ||
| 85 | #include <linux/fs.h> | ||
| 86 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
| 85 | #include <linux/sched.h> | ||
| 86 | #include <linux/init.h> | ||
| 87 | #include <linux/slab.h> | ||
| 87 | #include <linux/cpu.h> | 88 | #include <linux/cpu.h> |
| 88 | #include <linux/firmware.h> | 89 | #include <linux/fs.h> |
| 89 | #include <linux/platform_device.h> | 90 | #include <linux/mm.h> |
| 90 | #include <linux/uaccess.h> | ||
| 91 | 91 | ||
| 92 | #include <asm/msr.h> | ||
| 93 | #include <asm/processor.h> | ||
| 94 | #include <asm/microcode.h> | 92 | #include <asm/microcode.h> |
| 93 | #include <asm/processor.h> | ||
| 94 | #include <asm/msr.h> | ||
| 95 | 95 | ||
| 96 | MODULE_DESCRIPTION("Microcode Update Driver"); | 96 | MODULE_DESCRIPTION("Microcode Update Driver"); |
| 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); |
| @@ -129,12 +129,13 @@ struct extended_sigtable { | |||
| 129 | struct extended_signature sigs[0]; | 129 | struct extended_signature sigs[0]; |
| 130 | }; | 130 | }; |
| 131 | 131 | ||
| 132 | #define DEFAULT_UCODE_DATASIZE (2000) | 132 | #define DEFAULT_UCODE_DATASIZE (2000) |
| 133 | #define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) | 133 | #define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) |
| 134 | #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) | 134 | #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) |
| 135 | #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) | 135 | #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) |
| 136 | #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) | 136 | #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) |
| 137 | #define DWSIZE (sizeof(u32)) | 137 | #define DWSIZE (sizeof(u32)) |
| 138 | |||
| 138 | #define get_totalsize(mc) \ | 139 | #define get_totalsize(mc) \ |
| 139 | (((struct microcode_intel *)mc)->hdr.totalsize ? \ | 140 | (((struct microcode_intel *)mc)->hdr.totalsize ? \ |
| 140 | ((struct microcode_intel *)mc)->hdr.totalsize : \ | 141 | ((struct microcode_intel *)mc)->hdr.totalsize : \ |
| @@ -197,30 +198,31 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf) | |||
| 197 | } | 198 | } |
| 198 | 199 | ||
| 199 | static inline int | 200 | static inline int |
| 200 | update_match_revision(struct microcode_header_intel *mc_header, int rev) | 201 | update_match_revision(struct microcode_header_intel *mc_header, int rev) |
| 201 | { | 202 | { |
| 202 | return (mc_header->rev <= rev) ? 0 : 1; | 203 | return (mc_header->rev <= rev) ? 0 : 1; |
| 203 | } | 204 | } |
| 204 | 205 | ||
| 205 | static int microcode_sanity_check(void *mc) | 206 | static int microcode_sanity_check(void *mc) |
| 206 | { | 207 | { |
| 208 | unsigned long total_size, data_size, ext_table_size; | ||
| 207 | struct microcode_header_intel *mc_header = mc; | 209 | struct microcode_header_intel *mc_header = mc; |
| 208 | struct extended_sigtable *ext_header = NULL; | 210 | struct extended_sigtable *ext_header = NULL; |
| 209 | struct extended_signature *ext_sig; | ||
| 210 | unsigned long total_size, data_size, ext_table_size; | ||
| 211 | int sum, orig_sum, ext_sigcount = 0, i; | 211 | int sum, orig_sum, ext_sigcount = 0, i; |
| 212 | struct extended_signature *ext_sig; | ||
| 212 | 213 | ||
| 213 | total_size = get_totalsize(mc_header); | 214 | total_size = get_totalsize(mc_header); |
| 214 | data_size = get_datasize(mc_header); | 215 | data_size = get_datasize(mc_header); |
| 216 | |||
| 215 | if (data_size + MC_HEADER_SIZE > total_size) { | 217 | if (data_size + MC_HEADER_SIZE > total_size) { |
| 216 | printk(KERN_ERR "microcode: error! " | 218 | printk(KERN_ERR "microcode: error! " |
| 217 | "Bad data size in microcode data file\n"); | 219 | "Bad data size in microcode data file\n"); |
| 218 | return -EINVAL; | 220 | return -EINVAL; |
| 219 | } | 221 | } |
| 220 | 222 | ||
| 221 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | 223 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { |
| 222 | printk(KERN_ERR "microcode: error! " | 224 | printk(KERN_ERR "microcode: error! " |
| 223 | "Unknown microcode update format\n"); | 225 | "Unknown microcode update format\n"); |
| 224 | return -EINVAL; | 226 | return -EINVAL; |
| 225 | } | 227 | } |
| 226 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | 228 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); |
| @@ -318,11 +320,15 @@ get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev) | |||
| 318 | 320 | ||
| 319 | static void apply_microcode(int cpu) | 321 | static void apply_microcode(int cpu) |
| 320 | { | 322 | { |
| 323 | struct microcode_intel *mc_intel; | ||
| 324 | struct ucode_cpu_info *uci; | ||
| 321 | unsigned long flags; | 325 | unsigned long flags; |
| 322 | unsigned int val[2]; | 326 | unsigned int val[2]; |
| 323 | int cpu_num = raw_smp_processor_id(); | 327 | int cpu_num; |
| 324 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 328 | |
| 325 | struct microcode_intel *mc_intel = uci->mc; | 329 | cpu_num = raw_smp_processor_id(); |
| 330 | uci = ucode_cpu_info + cpu; | ||
| 331 | mc_intel = uci->mc; | ||
| 326 | 332 | ||
| 327 | /* We should bind the task to the CPU */ | 333 | /* We should bind the task to the CPU */ |
| 328 | BUG_ON(cpu_num != cpu); | 334 | BUG_ON(cpu_num != cpu); |
| @@ -348,15 +354,17 @@ static void apply_microcode(int cpu) | |||
| 348 | spin_unlock_irqrestore(µcode_update_lock, flags); | 354 | spin_unlock_irqrestore(µcode_update_lock, flags); |
| 349 | if (val[1] != mc_intel->hdr.rev) { | 355 | if (val[1] != mc_intel->hdr.rev) { |
| 350 | printk(KERN_ERR "microcode: CPU%d update from revision " | 356 | printk(KERN_ERR "microcode: CPU%d update from revision " |
| 351 | "0x%x to 0x%x failed\n", cpu_num, uci->cpu_sig.rev, val[1]); | 357 | "0x%x to 0x%x failed\n", |
| 358 | cpu_num, uci->cpu_sig.rev, val[1]); | ||
| 352 | return; | 359 | return; |
| 353 | } | 360 | } |
| 354 | printk(KERN_INFO "microcode: CPU%d updated from revision " | 361 | printk(KERN_INFO "microcode: CPU%d updated from revision " |
| 355 | "0x%x to 0x%x, date = %04x-%02x-%02x \n", | 362 | "0x%x to 0x%x, date = %04x-%02x-%02x \n", |
| 356 | cpu_num, uci->cpu_sig.rev, val[1], | 363 | cpu_num, uci->cpu_sig.rev, val[1], |
| 357 | mc_intel->hdr.date & 0xffff, | 364 | mc_intel->hdr.date & 0xffff, |
| 358 | mc_intel->hdr.date >> 24, | 365 | mc_intel->hdr.date >> 24, |
| 359 | (mc_intel->hdr.date >> 16) & 0xff); | 366 | (mc_intel->hdr.date >> 16) & 0xff); |
| 367 | |||
| 360 | uci->cpu_sig.rev = val[1]; | 368 | uci->cpu_sig.rev = val[1]; |
| 361 | } | 369 | } |
| 362 | 370 | ||
| @@ -404,18 +412,23 @@ static int generic_load_microcode(int cpu, void *data, size_t size, | |||
| 404 | leftover -= mc_size; | 412 | leftover -= mc_size; |
| 405 | } | 413 | } |
| 406 | 414 | ||
| 407 | if (new_mc) { | 415 | if (!new_mc) |
| 408 | if (!leftover) { | 416 | goto out; |
| 409 | if (uci->mc) | 417 | |
| 410 | vfree(uci->mc); | 418 | if (leftover) { |
| 411 | uci->mc = (struct microcode_intel *)new_mc; | 419 | vfree(new_mc); |
| 412 | pr_debug("microcode: CPU%d found a matching microcode update with" | 420 | goto out; |
| 413 | " version 0x%x (current=0x%x)\n", | ||
| 414 | cpu, new_rev, uci->cpu_sig.rev); | ||
| 415 | } else | ||
| 416 | vfree(new_mc); | ||
| 417 | } | 421 | } |
| 418 | 422 | ||
| 423 | if (uci->mc) | ||
| 424 | vfree(uci->mc); | ||
| 425 | uci->mc = (struct microcode_intel *)new_mc; | ||
| 426 | |||
| 427 | pr_debug("microcode: CPU%d found a matching microcode update with" | ||
| 428 | " version 0x%x (current=0x%x)\n", | ||
| 429 | cpu, new_rev, uci->cpu_sig.rev); | ||
| 430 | |||
| 431 | out: | ||
| 419 | return (int)leftover; | 432 | return (int)leftover; |
| 420 | } | 433 | } |
| 421 | 434 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 156f87582c6c..ca989158e847 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
| 10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
| 11 | #include <linux/ftrace.h> | 11 | #include <trace/power.h> |
| 12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
| 13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
| 14 | #include <asm/idle.h> | 14 | #include <asm/idle.h> |
| @@ -22,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait); | |||
| 22 | 22 | ||
| 23 | struct kmem_cache *task_xstate_cachep; | 23 | struct kmem_cache *task_xstate_cachep; |
| 24 | 24 | ||
| 25 | DEFINE_TRACE(power_start); | ||
| 26 | DEFINE_TRACE(power_end); | ||
| 27 | |||
| 25 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 28 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 26 | { | 29 | { |
| 27 | *dst = *src; | 30 | *dst = *src; |
| @@ -325,7 +328,7 @@ void stop_this_cpu(void *dummy) | |||
| 325 | /* | 328 | /* |
| 326 | * Remove this CPU: | 329 | * Remove this CPU: |
| 327 | */ | 330 | */ |
| 328 | cpu_clear(smp_processor_id(), cpu_online_map); | 331 | set_cpu_online(smp_processor_id(), false); |
| 329 | disable_local_APIC(); | 332 | disable_local_APIC(); |
| 330 | 333 | ||
| 331 | for (;;) { | 334 | for (;;) { |
| @@ -475,12 +478,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
| 475 | return 1; | 478 | return 1; |
| 476 | } | 479 | } |
| 477 | 480 | ||
| 478 | static cpumask_t c1e_mask = CPU_MASK_NONE; | 481 | static cpumask_var_t c1e_mask; |
| 479 | static int c1e_detected; | 482 | static int c1e_detected; |
| 480 | 483 | ||
| 481 | void c1e_remove_cpu(int cpu) | 484 | void c1e_remove_cpu(int cpu) |
| 482 | { | 485 | { |
| 483 | cpu_clear(cpu, c1e_mask); | 486 | if (c1e_mask != NULL) |
| 487 | cpumask_clear_cpu(cpu, c1e_mask); | ||
| 484 | } | 488 | } |
| 485 | 489 | ||
| 486 | /* | 490 | /* |
| @@ -509,8 +513,8 @@ static void c1e_idle(void) | |||
| 509 | if (c1e_detected) { | 513 | if (c1e_detected) { |
| 510 | int cpu = smp_processor_id(); | 514 | int cpu = smp_processor_id(); |
| 511 | 515 | ||
| 512 | if (!cpu_isset(cpu, c1e_mask)) { | 516 | if (!cpumask_test_cpu(cpu, c1e_mask)) { |
| 513 | cpu_set(cpu, c1e_mask); | 517 | cpumask_set_cpu(cpu, c1e_mask); |
| 514 | /* | 518 | /* |
| 515 | * Force broadcast so ACPI can not interfere. Needs | 519 | * Force broadcast so ACPI can not interfere. Needs |
| 516 | * to run with interrupts enabled as it uses | 520 | * to run with interrupts enabled as it uses |
| @@ -562,6 +566,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
| 562 | pm_idle = default_idle; | 566 | pm_idle = default_idle; |
| 563 | } | 567 | } |
| 564 | 568 | ||
| 569 | void __init init_c1e_mask(void) | ||
| 570 | { | ||
| 571 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | ||
| 572 | if (pm_idle == c1e_idle) { | ||
| 573 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | ||
| 574 | cpumask_clear(c1e_mask); | ||
| 575 | } | ||
| 576 | } | ||
| 577 | |||
| 565 | static int __init idle_setup(char *str) | 578 | static int __init idle_setup(char *str) |
| 566 | { | 579 | { |
| 567 | if (!str) | 580 | if (!str) |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b7cc21bc6ae0..fe9345c967de 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
| 22 | #include <linux/seccomp.h> | 22 | #include <linux/seccomp.h> |
| 23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
| 24 | #include <linux/ftrace.h> | ||
| 24 | 25 | ||
| 25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
| 26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
| @@ -1415,6 +1416,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
| 1415 | tracehook_report_syscall_entry(regs)) | 1416 | tracehook_report_syscall_entry(regs)) |
| 1416 | ret = -1L; | 1417 | ret = -1L; |
| 1417 | 1418 | ||
| 1419 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | ||
| 1420 | ftrace_syscall_enter(regs); | ||
| 1421 | |||
| 1418 | if (unlikely(current->audit_context)) { | 1422 | if (unlikely(current->audit_context)) { |
| 1419 | if (IS_IA32) | 1423 | if (IS_IA32) |
| 1420 | audit_syscall_entry(AUDIT_ARCH_I386, | 1424 | audit_syscall_entry(AUDIT_ARCH_I386, |
| @@ -1438,6 +1442,9 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
| 1438 | if (unlikely(current->audit_context)) | 1442 | if (unlikely(current->audit_context)) |
| 1439 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1443 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
| 1440 | 1444 | ||
| 1445 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | ||
| 1446 | ftrace_syscall_exit(regs); | ||
| 1447 | |||
| 1441 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1448 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
| 1442 | tracehook_report_syscall_exit(regs, 0); | 1449 | tracehook_report_syscall_exit(regs, 0); |
| 1443 | 1450 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 400331b50a53..3a97a4cf1872 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
| @@ -153,7 +153,6 @@ static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | |||
| 153 | static ssize_t __init setup_pcpu_remap(size_t static_size) | 153 | static ssize_t __init setup_pcpu_remap(size_t static_size) |
| 154 | { | 154 | { |
| 155 | static struct vm_struct vm; | 155 | static struct vm_struct vm; |
| 156 | pg_data_t *last; | ||
| 157 | size_t ptrs_size, dyn_size; | 156 | size_t ptrs_size, dyn_size; |
| 158 | unsigned int cpu; | 157 | unsigned int cpu; |
| 159 | ssize_t ret; | 158 | ssize_t ret; |
| @@ -162,22 +161,9 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) | |||
| 162 | * If large page isn't supported, there's no benefit in doing | 161 | * If large page isn't supported, there's no benefit in doing |
| 163 | * this. Also, on non-NUMA, embedding is better. | 162 | * this. Also, on non-NUMA, embedding is better. |
| 164 | */ | 163 | */ |
| 165 | if (!cpu_has_pse || pcpu_need_numa()) | 164 | if (!cpu_has_pse || !pcpu_need_numa()) |
| 166 | return -EINVAL; | 165 | return -EINVAL; |
| 167 | 166 | ||
| 168 | last = NULL; | ||
| 169 | for_each_possible_cpu(cpu) { | ||
| 170 | int node = early_cpu_to_node(cpu); | ||
| 171 | |||
| 172 | if (node_online(node) && NODE_DATA(node) && | ||
| 173 | last && last != NODE_DATA(node)) | ||
| 174 | goto proceed; | ||
| 175 | |||
| 176 | last = NODE_DATA(node); | ||
| 177 | } | ||
| 178 | return -EINVAL; | ||
| 179 | |||
| 180 | proceed: | ||
| 181 | /* | 167 | /* |
| 182 | * Currently supports only single page. Supporting multiple | 168 | * Currently supports only single page. Supporting multiple |
| 183 | * pages won't be too difficult if it ever becomes necessary. | 169 | * pages won't be too difficult if it ever becomes necessary. |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index dfcc74ab0ab6..14425166b8e3 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
| @@ -221,7 +221,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
| 221 | if (!onsigstack) { | 221 | if (!onsigstack) { |
| 222 | /* This is the X/Open sanctioned signal stack switching. */ | 222 | /* This is the X/Open sanctioned signal stack switching. */ |
| 223 | if (ka->sa.sa_flags & SA_ONSTACK) { | 223 | if (ka->sa.sa_flags & SA_ONSTACK) { |
| 224 | if (sas_ss_flags(sp) == 0) | 224 | if (current->sas_ss_size) |
| 225 | sp = current->sas_ss_sp + current->sas_ss_size; | 225 | sp = current->sas_ss_sp + current->sas_ss_size; |
| 226 | } else { | 226 | } else { |
| 227 | #ifdef CONFIG_X86_32 | 227 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ef7d10170c30..58d24ef917d8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
| 101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
| 102 | 102 | ||
| 103 | /* representing HT siblings of each logical CPU */ | 103 | /* representing HT siblings of each logical CPU */ |
| 104 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 104 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
| 105 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 105 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 106 | 106 | ||
| 107 | /* representing HT and core siblings of each logical CPU */ | 107 | /* representing HT and core siblings of each logical CPU */ |
| 108 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 108 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
| 109 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 109 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
| 110 | 110 | ||
| 111 | /* Per CPU bogomips and other parameters */ | 111 | /* Per CPU bogomips and other parameters */ |
| @@ -115,11 +115,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
| 115 | atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
| 116 | 116 | ||
| 117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
| 118 | |||
| 119 | /* which logical CPUs are on which nodes */ | ||
| 120 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | ||
| 121 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | ||
| 122 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
| 123 | /* which node each logical CPU is on */ | 118 | /* which node each logical CPU is on */ |
| 124 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | 119 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; |
| 125 | EXPORT_SYMBOL(cpu_to_node_map); | 120 | EXPORT_SYMBOL(cpu_to_node_map); |
| @@ -128,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map); | |||
| 128 | static void map_cpu_to_node(int cpu, int node) | 123 | static void map_cpu_to_node(int cpu, int node) |
| 129 | { | 124 | { |
| 130 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | 125 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); |
| 131 | cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); | 126 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); |
| 132 | cpu_to_node_map[cpu] = node; | 127 | cpu_to_node_map[cpu] = node; |
| 133 | } | 128 | } |
| 134 | 129 | ||
| @@ -139,7 +134,7 @@ static void unmap_cpu_to_node(int cpu) | |||
| 139 | 134 | ||
| 140 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | 135 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); |
| 141 | for (node = 0; node < MAX_NUMNODES; node++) | 136 | for (node = 0; node < MAX_NUMNODES; node++) |
| 142 | cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); | 137 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
| 143 | cpu_to_node_map[cpu] = 0; | 138 | cpu_to_node_map[cpu] = 0; |
| 144 | } | 139 | } |
| 145 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | 140 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ |
| @@ -301,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 301 | __flush_tlb_all(); | 296 | __flush_tlb_all(); |
| 302 | #endif | 297 | #endif |
| 303 | 298 | ||
| 304 | /* This must be done before setting cpu_online_map */ | 299 | /* This must be done before setting cpu_online_mask */ |
| 305 | set_cpu_sibling_map(raw_smp_processor_id()); | 300 | set_cpu_sibling_map(raw_smp_processor_id()); |
| 306 | wmb(); | 301 | wmb(); |
| 307 | 302 | ||
| @@ -334,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 334 | cpu_idle(); | 329 | cpu_idle(); |
| 335 | } | 330 | } |
| 336 | 331 | ||
| 332 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 333 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
| 334 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
| 335 | const struct cpuinfo_x86 *src) | ||
| 336 | { | ||
| 337 | struct cpumask *llc = dst->llc_shared_map; | ||
| 338 | *dst = *src; | ||
| 339 | dst->llc_shared_map = llc; | ||
| 340 | } | ||
| 341 | #else | ||
| 342 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
| 343 | const struct cpuinfo_x86 *src) | ||
| 344 | { | ||
| 345 | *dst = *src; | ||
| 346 | } | ||
| 347 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
| 348 | |||
| 337 | /* | 349 | /* |
| 338 | * The bootstrap kernel entry code has set these up. Save them for | 350 | * The bootstrap kernel entry code has set these up. Save them for |
| 339 | * a given CPU | 351 | * a given CPU |
| @@ -343,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
| 343 | { | 355 | { |
| 344 | struct cpuinfo_x86 *c = &cpu_data(id); | 356 | struct cpuinfo_x86 *c = &cpu_data(id); |
| 345 | 357 | ||
| 346 | *c = boot_cpu_data; | 358 | copy_cpuinfo_x86(c, &boot_cpu_data); |
| 347 | c->cpu_index = id; | 359 | c->cpu_index = id; |
| 348 | if (id != 0) | 360 | if (id != 0) |
| 349 | identify_secondary_cpu(c); | 361 | identify_secondary_cpu(c); |
| @@ -367,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 367 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | 379 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); |
| 368 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 380 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| 369 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 381 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
| 370 | cpumask_set_cpu(i, &c->llc_shared_map); | 382 | cpumask_set_cpu(i, c->llc_shared_map); |
| 371 | cpumask_set_cpu(cpu, &o->llc_shared_map); | 383 | cpumask_set_cpu(cpu, o->llc_shared_map); |
| 372 | } | 384 | } |
| 373 | } | 385 | } |
| 374 | } else { | 386 | } else { |
| 375 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 387 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
| 376 | } | 388 | } |
| 377 | 389 | ||
| 378 | cpumask_set_cpu(cpu, &c->llc_shared_map); | 390 | cpumask_set_cpu(cpu, c->llc_shared_map); |
| 379 | 391 | ||
| 380 | if (current_cpu_data.x86_max_cores == 1) { | 392 | if (current_cpu_data.x86_max_cores == 1) { |
| 381 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 393 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
| @@ -386,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 386 | for_each_cpu(i, cpu_sibling_setup_mask) { | 398 | for_each_cpu(i, cpu_sibling_setup_mask) { |
| 387 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 399 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
| 388 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 400 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
| 389 | cpumask_set_cpu(i, &c->llc_shared_map); | 401 | cpumask_set_cpu(i, c->llc_shared_map); |
| 390 | cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); | 402 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); |
| 391 | } | 403 | } |
| 392 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 404 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
| 393 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 405 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| @@ -425,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
| 425 | if (sched_mc_power_savings || sched_smt_power_savings) | 437 | if (sched_mc_power_savings || sched_smt_power_savings) |
| 426 | return cpu_core_mask(cpu); | 438 | return cpu_core_mask(cpu); |
| 427 | else | 439 | else |
| 428 | return &c->llc_shared_map; | 440 | return c->llc_shared_map; |
| 429 | } | ||
| 430 | |||
| 431 | cpumask_t cpu_coregroup_map(int cpu) | ||
| 432 | { | ||
| 433 | return *cpu_coregroup_mask(cpu); | ||
| 434 | } | 441 | } |
| 435 | 442 | ||
| 436 | static void impress_friends(void) | 443 | static void impress_friends(void) |
| @@ -897,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
| 897 | */ | 904 | */ |
| 898 | static __init void disable_smp(void) | 905 | static __init void disable_smp(void) |
| 899 | { | 906 | { |
| 900 | /* use the read/write pointers to the present and possible maps */ | 907 | init_cpu_present(cpumask_of(0)); |
| 901 | cpumask_copy(&cpu_present_map, cpumask_of(0)); | 908 | init_cpu_possible(cpumask_of(0)); |
| 902 | cpumask_copy(&cpu_possible_map, cpumask_of(0)); | ||
| 903 | smpboot_clear_io_apic_irqs(); | 909 | smpboot_clear_io_apic_irqs(); |
| 904 | 910 | ||
| 905 | if (smp_found_config) | 911 | if (smp_found_config) |
| @@ -1031,6 +1037,8 @@ static void __init smp_cpu_index_default(void) | |||
| 1031 | */ | 1037 | */ |
| 1032 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | 1038 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
| 1033 | { | 1039 | { |
| 1040 | unsigned int i; | ||
| 1041 | |||
| 1034 | preempt_disable(); | 1042 | preempt_disable(); |
| 1035 | smp_cpu_index_default(); | 1043 | smp_cpu_index_default(); |
| 1036 | current_cpu_data = boot_cpu_data; | 1044 | current_cpu_data = boot_cpu_data; |
| @@ -1044,6 +1052,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1044 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1052 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
| 1045 | #endif | 1053 | #endif |
| 1046 | current_thread_info()->cpu = 0; /* needed? */ | 1054 | current_thread_info()->cpu = 0; /* needed? */ |
| 1055 | for_each_possible_cpu(i) { | ||
| 1056 | alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | ||
| 1057 | alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | ||
| 1058 | alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | ||
| 1059 | cpumask_clear(per_cpu(cpu_core_map, i)); | ||
| 1060 | cpumask_clear(per_cpu(cpu_sibling_map, i)); | ||
| 1061 | cpumask_clear(cpu_data(i).llc_shared_map); | ||
| 1062 | } | ||
| 1047 | set_cpu_sibling_map(0); | 1063 | set_cpu_sibling_map(0); |
| 1048 | 1064 | ||
| 1049 | enable_IR_x2apic(); | 1065 | enable_IR_x2apic(); |
| @@ -1132,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus); | |||
| 1132 | 1148 | ||
| 1133 | 1149 | ||
| 1134 | /* | 1150 | /* |
| 1135 | * cpu_possible_map should be static, it cannot change as cpu's | 1151 | * cpu_possible_mask should be static, it cannot change as cpu's |
| 1136 | * are onlined, or offlined. The reason is per-cpu data-structures | 1152 | * are onlined, or offlined. The reason is per-cpu data-structures |
| 1137 | * are allocated by some modules at init time, and dont expect to | 1153 | * are allocated by some modules at init time, and dont expect to |
| 1138 | * do this dynamically on cpu arrival/departure. | 1154 | * do this dynamically on cpu arrival/departure. |
| 1139 | * cpu_present_map on the other hand can change dynamically. | 1155 | * cpu_present_mask on the other hand can change dynamically. |
| 1140 | * In case when cpu_hotplug is not compiled, then we resort to current | 1156 | * In case when cpu_hotplug is not compiled, then we resort to current |
| 1141 | * behaviour, which is cpu_possible == cpu_present. | 1157 | * behaviour, which is cpu_possible == cpu_present. |
| 1142 | * - Ashok Raj | 1158 | * - Ashok Raj |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 79c073247284..deb5ebb32c3b 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
| @@ -275,6 +275,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | |||
| 275 | return NULL; | 275 | return NULL; |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | ||
| 279 | |||
| 278 | /** | 280 | /** |
| 279 | * uv_flush_tlb_others - globally purge translation cache of a virtual | 281 | * uv_flush_tlb_others - globally purge translation cache of a virtual |
| 280 | * address or all TLB's | 282 | * address or all TLB's |
| @@ -304,8 +306,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
| 304 | struct mm_struct *mm, | 306 | struct mm_struct *mm, |
| 305 | unsigned long va, unsigned int cpu) | 307 | unsigned long va, unsigned int cpu) |
| 306 | { | 308 | { |
| 307 | static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); | 309 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); |
| 308 | struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); | ||
| 309 | int i; | 310 | int i; |
| 310 | int bit; | 311 | int bit; |
| 311 | int blade; | 312 | int blade; |
| @@ -755,6 +756,10 @@ static int __init uv_bau_init(void) | |||
| 755 | if (!is_uv_system()) | 756 | if (!is_uv_system()) |
| 756 | return 0; | 757 | return 0; |
| 757 | 758 | ||
| 759 | for_each_possible_cpu(cur_cpu) | ||
| 760 | alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), | ||
| 761 | GFP_KERNEL, cpu_to_node(cur_cpu)); | ||
| 762 | |||
| 758 | uv_bau_retry_limit = 1; | 763 | uv_bau_retry_limit = 1; |
| 759 | uv_nshift = uv_hub_info->n_val; | 764 | uv_nshift = uv_hub_info->n_val; |
| 760 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | 765 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
