aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig13
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/process.c17
-rw-r--r--arch/x86_64/kernel/setup.c3
-rw-r--r--arch/x86_64/kernel/smpboot.c24
-rw-r--r--arch/x86_64/kernel/time.c41
-rw-r--r--arch/x86_64/kernel/traps.c18
-rw-r--r--arch/x86_64/mm/init.c2
-rw-r--r--arch/x86_64/mm/numa.c15
9 files changed, 68 insertions, 67 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 45efe0ca88f8..4310b4a311a5 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -250,6 +250,15 @@ config SCHED_SMT
250 cost of slightly increased overhead in some places. If unsure say 250 cost of slightly increased overhead in some places. If unsure say
251 N here. 251 N here.
252 252
253config SCHED_MC
254 bool "Multi-core scheduler support"
255 depends on SMP
256 default y
257 help
258 Multi-core scheduler support improves the CPU scheduler's decision
259 making when dealing with multi-core CPU chips at a cost of slightly
260 increased overhead in some places. If unsure say N here.
261
253source "kernel/Kconfig.preempt" 262source "kernel/Kconfig.preempt"
254 263
255config NUMA 264config NUMA
@@ -325,6 +334,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
325 def_bool y 334 def_bool y
326 depends on NUMA 335 depends on NUMA
327 336
337config OUT_OF_LINE_PFN_TO_PAGE
338 def_bool y
339 depends on DISCONTIGMEM
340
328config NR_CPUS 341config NR_CPUS
329 int "Maximum number of CPUs (2-256)" 342 int "Maximum number of CPUs (2-256)"
330 range 2 255 343 range 2 255
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 7549a4389fbf..35b2faccdc6c 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -688,6 +688,8 @@ ia32_sys_call_table:
688 .quad sys_ni_syscall /* pselect6 for now */ 688 .quad sys_ni_syscall /* pselect6 for now */
689 .quad sys_ni_syscall /* ppoll for now */ 689 .quad sys_ni_syscall /* ppoll for now */
690 .quad sys_unshare /* 310 */ 690 .quad sys_unshare /* 310 */
691 .quad compat_sys_set_robust_list
692 .quad compat_sys_get_robust_list
691ia32_syscall_end: 693ia32_syscall_end:
692 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 694 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
693 .quad ni_syscall 695 .quad ni_syscall
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 0370720515f1..70dd8e5c6889 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override);
66void (*pm_idle)(void); 66void (*pm_idle)(void);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68 68
69static struct notifier_block *idle_notifier; 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
70static DEFINE_SPINLOCK(idle_notifier_lock);
71 70
72void idle_notifier_register(struct notifier_block *n) 71void idle_notifier_register(struct notifier_block *n)
73{ 72{
74 unsigned long flags; 73 atomic_notifier_chain_register(&idle_notifier, n);
75 spin_lock_irqsave(&idle_notifier_lock, flags);
76 notifier_chain_register(&idle_notifier, n);
77 spin_unlock_irqrestore(&idle_notifier_lock, flags);
78} 74}
79EXPORT_SYMBOL_GPL(idle_notifier_register); 75EXPORT_SYMBOL_GPL(idle_notifier_register);
80 76
81void idle_notifier_unregister(struct notifier_block *n) 77void idle_notifier_unregister(struct notifier_block *n)
82{ 78{
83 unsigned long flags; 79 atomic_notifier_chain_unregister(&idle_notifier, n);
84 spin_lock_irqsave(&idle_notifier_lock, flags);
85 notifier_chain_unregister(&idle_notifier, n);
86 spin_unlock_irqrestore(&idle_notifier_lock, flags);
87} 80}
88EXPORT_SYMBOL(idle_notifier_unregister); 81EXPORT_SYMBOL(idle_notifier_unregister);
89 82
@@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
93void enter_idle(void) 86void enter_idle(void)
94{ 87{
95 __get_cpu_var(idle_state) = CPU_IDLE; 88 __get_cpu_var(idle_state) = CPU_IDLE;
96 notifier_call_chain(&idle_notifier, IDLE_START, NULL); 89 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
97} 90}
98 91
99static void __exit_idle(void) 92static void __exit_idle(void)
100{ 93{
101 __get_cpu_var(idle_state) = CPU_NOT_IDLE; 94 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
102 notifier_call_chain(&idle_notifier, IDLE_END, NULL); 95 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
103} 96}
104 97
105/* Called from interrupts to signify idle end */ 98/* Called from interrupts to signify idle end */
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index a57eec8311a7..d1f3e9272c05 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -962,7 +962,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
962 962
963 cpuid(1, &eax, &ebx, &ecx, &edx); 963 cpuid(1, &eax, &ebx, &ecx, &edx);
964 964
965 c->apicid = phys_pkg_id(0);
966 965
967 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 966 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
968 return; 967 return;
@@ -1171,6 +1170,8 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1171 c->x86_capability[2] = cpuid_edx(0x80860001); 1170 c->x86_capability[2] = cpuid_edx(0x80860001);
1172 } 1171 }
1173 1172
1173 c->apicid = phys_pkg_id(0);
1174
1174 /* 1175 /*
1175 * Vendor-specific initialization. In this section we 1176 * Vendor-specific initialization. In this section we
1176 * canonicalize the feature flags, meaning if there are 1177 * canonicalize the feature flags, meaning if there are
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 66e98659d077..ea48fa638070 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -68,6 +68,9 @@ u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68/* core ID of each logical CPU */ 68/* core ID of each logical CPU */
69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
70 70
71/* Last level cache ID of each logical CPU */
72u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
73
71/* Bitmask of currently online CPUs */ 74/* Bitmask of currently online CPUs */
72cpumask_t cpu_online_map __read_mostly; 75cpumask_t cpu_online_map __read_mostly;
73 76
@@ -445,6 +448,18 @@ void __cpuinit smp_callin(void)
445 cpu_set(cpuid, cpu_callin_map); 448 cpu_set(cpuid, cpu_callin_map);
446} 449}
447 450
451/* maps the cpu to the sched domain representing multi-core */
452cpumask_t cpu_coregroup_map(int cpu)
453{
454 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /*
456 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return
458 * cpu_core_map when power saving policy is enabled
459 */
460 return c->llc_shared_map;
461}
462
448/* representing cpus for which sibling maps can be computed */ 463/* representing cpus for which sibling maps can be computed */
449static cpumask_t cpu_sibling_setup_map; 464static cpumask_t cpu_sibling_setup_map;
450 465
@@ -463,12 +478,16 @@ static inline void set_cpu_sibling_map(int cpu)
463 cpu_set(cpu, cpu_sibling_map[i]); 478 cpu_set(cpu, cpu_sibling_map[i]);
464 cpu_set(i, cpu_core_map[cpu]); 479 cpu_set(i, cpu_core_map[cpu]);
465 cpu_set(cpu, cpu_core_map[i]); 480 cpu_set(cpu, cpu_core_map[i]);
481 cpu_set(i, c[cpu].llc_shared_map);
482 cpu_set(cpu, c[i].llc_shared_map);
466 } 483 }
467 } 484 }
468 } else { 485 } else {
469 cpu_set(cpu, cpu_sibling_map[cpu]); 486 cpu_set(cpu, cpu_sibling_map[cpu]);
470 } 487 }
471 488
489 cpu_set(cpu, c[cpu].llc_shared_map);
490
472 if (current_cpu_data.x86_max_cores == 1) { 491 if (current_cpu_data.x86_max_cores == 1) {
473 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 492 cpu_core_map[cpu] = cpu_sibling_map[cpu];
474 c[cpu].booted_cores = 1; 493 c[cpu].booted_cores = 1;
@@ -476,6 +495,11 @@ static inline void set_cpu_sibling_map(int cpu)
476 } 495 }
477 496
478 for_each_cpu_mask(i, cpu_sibling_setup_map) { 497 for_each_cpu_mask(i, cpu_sibling_setup_map) {
498 if (cpu_llc_id[cpu] != BAD_APICID &&
499 cpu_llc_id[cpu] == cpu_llc_id[i]) {
500 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map);
502 }
479 if (phys_proc_id[cpu] == phys_proc_id[i]) { 503 if (phys_proc_id[cpu] == phys_proc_id[i]) {
480 cpu_set(i, cpu_core_map[cpu]); 504 cpu_set(i, cpu_core_map[cpu]);
481 cpu_set(cpu, cpu_core_map[i]); 505 cpu_set(cpu, cpu_core_map[i]);
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7f58fa682491..473b514b66e4 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -504,42 +504,25 @@ unsigned long long sched_clock(void)
504 504
505static unsigned long get_cmos_time(void) 505static unsigned long get_cmos_time(void)
506{ 506{
507 unsigned int timeout = 1000000, year, mon, day, hour, min, sec; 507 unsigned int year, mon, day, hour, min, sec;
508 unsigned char uip = 0, this = 0;
509 unsigned long flags; 508 unsigned long flags;
510 unsigned extyear = 0; 509 unsigned extyear = 0;
511 510
512/*
513 * The Linux interpretation of the CMOS clock register contents: When the
514 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
515 * second which has precisely just started. Waiting for this can take up to 1
516 * second, we timeout approximately after 2.4 seconds on a machine with
517 * standard 8.3 MHz ISA bus.
518 */
519
520 spin_lock_irqsave(&rtc_lock, flags); 511 spin_lock_irqsave(&rtc_lock, flags);
521 512
522 while (timeout && (!uip || this)) { 513 do {
523 uip |= this; 514 sec = CMOS_READ(RTC_SECONDS);
524 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; 515 min = CMOS_READ(RTC_MINUTES);
525 timeout--; 516 hour = CMOS_READ(RTC_HOURS);
526 } 517 day = CMOS_READ(RTC_DAY_OF_MONTH);
527 518 mon = CMOS_READ(RTC_MONTH);
528 /* 519 year = CMOS_READ(RTC_YEAR);
529 * Here we are safe to assume the registers won't change for a whole
530 * second, so we just go ahead and read them.
531 */
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
538
539#ifdef CONFIG_ACPI 520#ifdef CONFIG_ACPI
540 if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century) 521 if (acpi_fadt.revision >= FADT2_REVISION_ID &&
541 extyear = CMOS_READ(acpi_fadt.century); 522 acpi_fadt.century)
523 extyear = CMOS_READ(acpi_fadt.century);
542#endif 524#endif
525 } while (sec != CMOS_READ(RTC_SECONDS));
543 526
544 spin_unlock_irqrestore(&rtc_lock, flags); 527 spin_unlock_irqrestore(&rtc_lock, flags);
545 528
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7b148309c529..edaa9fe654dc 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -69,20 +69,20 @@ asmlinkage void alignment_check(void);
69asmlinkage void machine_check(void); 69asmlinkage void machine_check(void);
70asmlinkage void spurious_interrupt_bug(void); 70asmlinkage void spurious_interrupt_bug(void);
71 71
72struct notifier_block *die_chain; 72ATOMIC_NOTIFIER_HEAD(die_chain);
73static DEFINE_SPINLOCK(die_notifier_lock);
74 73
75int register_die_notifier(struct notifier_block *nb) 74int register_die_notifier(struct notifier_block *nb)
76{ 75{
77 int err = 0;
78 unsigned long flags;
79
80 vmalloc_sync_all(); 76 vmalloc_sync_all();
81 spin_lock_irqsave(&die_notifier_lock, flags); 77 return atomic_notifier_chain_register(&die_chain, nb);
82 err = notifier_chain_register(&die_chain, nb); 78}
83 spin_unlock_irqrestore(&die_notifier_lock, flags); 79EXPORT_SYMBOL(register_die_notifier);
84 return err; 80
81int unregister_die_notifier(struct notifier_block *nb)
82{
83 return atomic_notifier_chain_unregister(&die_chain, nb);
85} 84}
85EXPORT_SYMBOL(unregister_die_notifier);
86 86
87static inline void conditional_sti(struct pt_regs *regs) 87static inline void conditional_sti(struct pt_regs *regs)
88{ 88{
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index b04415625442..e5f7f1c34462 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -72,7 +72,7 @@ void show_mem(void)
72 show_free_areas(); 72 show_free_areas();
73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
74 74
75 for_each_pgdat(pgdat) { 75 for_each_online_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 76 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 page = pfn_to_page(pgdat->node_start_pfn + i); 77 page = pfn_to_page(pgdat->node_start_pfn + i);
78 total++; 78 total++;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 63c72641b737..4be82d6e2b48 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -377,21 +377,6 @@ EXPORT_SYMBOL(node_data);
377 * Should do that. 377 * Should do that.
378 */ 378 */
379 379
380/* Requires pfn_valid(pfn) to be true */
381struct page *pfn_to_page(unsigned long pfn)
382{
383 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT);
384 return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;
385}
386EXPORT_SYMBOL(pfn_to_page);
387
388unsigned long page_to_pfn(struct page *page)
389{
390 return (long)(((page) - page_zone(page)->zone_mem_map) +
391 page_zone(page)->zone_start_pfn);
392}
393EXPORT_SYMBOL(page_to_pfn);
394
395int pfn_valid(unsigned long pfn) 380int pfn_valid(unsigned long pfn)
396{ 381{
397 unsigned nid; 382 unsigned nid;