aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c3
-rw-r--r--arch/x86/kernel/io_apic_32.c4
-rw-r--r--arch/x86/kernel/kprobes_32.c10
-rw-r--r--arch/x86/kernel/kprobes_64.c8
-rw-r--r--arch/x86/kernel/mce_amd_64.c6
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/ptrace_32.c5
-rw-r--r--arch/x86/kernel/ptrace_64.c5
-rw-r--r--arch/x86/kernel/setup_64.c3
-rw-r--r--arch/x86/kernel/smpboot_32.c68
-rw-r--r--arch/x86/kernel/smpboot_64.c48
-rw-r--r--arch/x86/mm/fault_32.c45
-rw-r--r--arch/x86/mm/fault_64.c44
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c51
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/xen/smp.c18
21 files changed, 190 insertions, 153 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ffd01e5dcb52..2ca43ba32bc0 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -595,7 +595,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
595 dmi_check_system(sw_any_bug_dmi_table); 595 dmi_check_system(sw_any_bug_dmi_table);
596 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { 596 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
597 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 597 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
598 policy->cpus = cpu_core_map[cpu]; 598 policy->cpus = per_cpu(cpu_core_map, cpu);
599 } 599 }
600#endif 600#endif
601 601
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 8eb414b906d2..793eae854f4f 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -200,7 +200,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
200 unsigned int i; 200 unsigned int i;
201 201
202#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
203 policy->cpus = cpu_sibling_map[policy->cpu]; 203 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
204#endif 204#endif
205 205
206 /* Errata workaround */ 206 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b273b69cfddf..c06ac680c9ca 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -57,7 +57,7 @@ static struct powernow_k8_data *powernow_data[NR_CPUS];
57static int cpu_family = CPU_OPTERON; 57static int cpu_family = CPU_OPTERON;
58 58
59#ifndef CONFIG_SMP 59#ifndef CONFIG_SMP
60static cpumask_t cpu_core_map[1]; 60DEFINE_PER_CPU(cpumask_t, cpu_core_map);
61#endif 61#endif
62 62
63/* Return a frequency in MHz, given an input fid */ 63/* Return a frequency in MHz, given an input fid */
@@ -667,7 +667,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
667 667
668 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 668 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
669 data->powernow_table = powernow_table; 669 data->powernow_table = powernow_table;
670 if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) 670 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
671 print_basics(data); 671 print_basics(data);
672 672
673 for (j = 0; j < data->numps; j++) 673 for (j = 0; j < data->numps; j++)
@@ -821,7 +821,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
821 821
822 /* fill in data */ 822 /* fill in data */
823 data->numps = data->acpi_data.state_count; 823 data->numps = data->acpi_data.state_count;
824 if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) 824 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
825 print_basics(data); 825 print_basics(data);
826 powernow_k8_acpi_pst_values(data, 0); 826 powernow_k8_acpi_pst_values(data, 0);
827 827
@@ -1214,7 +1214,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1214 if (cpu_family == CPU_HW_PSTATE) 1214 if (cpu_family == CPU_HW_PSTATE)
1215 pol->cpus = cpumask_of_cpu(pol->cpu); 1215 pol->cpus = cpumask_of_cpu(pol->cpu);
1216 else 1216 else
1217 pol->cpus = cpu_core_map[pol->cpu]; 1217 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1218 data->available_cores = &(pol->cpus); 1218 data->available_cores = &(pol->cpus);
1219 1219
1220 /* Take a crude guess here. 1220 /* Take a crude guess here.
@@ -1281,7 +1281,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1281 cpumask_t oldmask = current->cpus_allowed; 1281 cpumask_t oldmask = current->cpus_allowed;
1282 unsigned int khz = 0; 1282 unsigned int khz = 0;
1283 1283
1284 data = powernow_data[first_cpu(cpu_core_map[cpu])]; 1284 data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
1285 1285
1286 if (!data) 1286 if (!data)
1287 return -EINVAL; 1287 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 36685e8f7be1..14d68aa301ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 policy->cpus = cpu_sibling_map[policy->cpu]; 325 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 1e31b6caffb1..879a0f789b1e 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -122,7 +122,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
122#ifdef CONFIG_X86_HT 122#ifdef CONFIG_X86_HT
123 if (c->x86_max_cores * smp_num_siblings > 1) { 123 if (c->x86_max_cores * smp_num_siblings > 1) {
124 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 124 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
125 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 125 seq_printf(m, "siblings\t: %d\n",
126 cpus_weight(per_cpu(cpu_core_map, n)));
126 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 127 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
127 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 128 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
128 } 129 }
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index e2f4a1c68547..4ee1e5ee9b57 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -378,7 +378,7 @@ static struct irq_cpu_info {
378 378
379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) 379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
380 380
381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) 381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
382 382
383static cpumask_t balance_irq_affinity[NR_IRQS] = { 383static cpumask_t balance_irq_affinity[NR_IRQS] = {
384 [0 ... NR_IRQS-1] = CPU_MASK_ALL 384 [0 ... NR_IRQS-1] = CPU_MASK_ALL
@@ -598,7 +598,7 @@ tryanotherirq:
598 * (A+B)/2 vs B 598 * (A+B)/2 vs B
599 */ 599 */
600 load = CPU_IRQ(min_loaded) >> 1; 600 load = CPU_IRQ(min_loaded) >> 1;
601 for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { 601 for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
602 if (load > CPU_IRQ(j)) { 602 if (load > CPU_IRQ(j)) {
603 /* This won't change cpu_sibling_map[min_loaded] */ 603 /* This won't change cpu_sibling_map[min_loaded] */
604 load = CPU_IRQ(j); 604 load = CPU_IRQ(j);
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index e7d0d3c2ef64..90f778c04b3f 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -41,6 +41,13 @@ void jprobe_return_end(void);
41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44struct kretprobe_blackpoint kretprobe_blacklist[] = {
45 {"__switch_to", }, /* This function switches only current task, but
46 doesn't switch kernel stack.*/
47 {NULL, NULL} /* Terminator */
48};
49const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
50
44/* insert a jmp code */ 51/* insert a jmp code */
45static __always_inline void set_jmp_op(void *from, void *to) 52static __always_inline void set_jmp_op(void *from, void *to)
46{ 53{
@@ -584,7 +591,7 @@ out:
584 return 1; 591 return 1;
585} 592}
586 593
587static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 594int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
588{ 595{
589 struct kprobe *cur = kprobe_running(); 596 struct kprobe *cur = kprobe_running();
590 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 597 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -666,7 +673,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
666 ret = NOTIFY_STOP; 673 ret = NOTIFY_STOP;
667 break; 674 break;
668 case DIE_GPF: 675 case DIE_GPF:
669 case DIE_PAGE_FAULT:
670 /* kprobe_running() needs smp_processor_id() */ 676 /* kprobe_running() needs smp_processor_id() */
671 preempt_disable(); 677 preempt_disable();
672 if (kprobe_running() && 678 if (kprobe_running() &&
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 62e28e52d784..681b801c5e26 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -48,6 +48,13 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p);
48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
50 50
51struct kretprobe_blackpoint kretprobe_blacklist[] = {
52 {"__switch_to", }, /* This function switches only current task, but
53 doesn't switch kernel stack.*/
54 {NULL, NULL} /* Terminator */
55};
56const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
57
51/* 58/*
52 * returns non-zero if opcode modifies the interrupt flag. 59 * returns non-zero if opcode modifies the interrupt flag.
53 */ 60 */
@@ -657,7 +664,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
657 ret = NOTIFY_STOP; 664 ret = NOTIFY_STOP;
658 break; 665 break;
659 case DIE_GPF: 666 case DIE_GPF:
660 case DIE_PAGE_FAULT:
661 /* kprobe_running() needs smp_processor_id() */ 667 /* kprobe_running() needs smp_processor_id() */
662 preempt_disable(); 668 preempt_disable();
663 if (kprobe_running() && 669 if (kprobe_running() &&
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 2f8a7f18b0fe..805b62b1e0df 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
472 472
473#ifdef CONFIG_SMP 473#ifdef CONFIG_SMP
474 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ 474 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
475 i = first_cpu(cpu_core_map[cpu]); 475 i = first_cpu(per_cpu(cpu_core_map, cpu));
476 476
477 /* first core not up yet */ 477 /* first core not up yet */
478 if (cpu_data[i].cpu_core_id) 478 if (cpu_data[i].cpu_core_id)
@@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
492 if (err) 492 if (err)
493 goto out; 493 goto out;
494 494
495 b->cpus = cpu_core_map[cpu]; 495 b->cpus = per_cpu(cpu_core_map, cpu);
496 per_cpu(threshold_banks, cpu)[bank] = b; 496 per_cpu(threshold_banks, cpu)[bank] = b;
497 goto out; 497 goto out;
498 } 498 }
@@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
509#ifndef CONFIG_SMP 509#ifndef CONFIG_SMP
510 b->cpus = CPU_MASK_ALL; 510 b->cpus = CPU_MASK_ALL;
511#else 511#else
512 b->cpus = cpu_core_map[cpu]; 512 b->cpus = per_cpu(cpu_core_map, cpu);
513#endif 513#endif
514 err = kobject_register(&b->kobj); 514 err = kobject_register(&b->kobj);
515 if (err) 515 if (err)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7352d4b377e6..6309b275cb9c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -581,7 +581,7 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
581 * 581 *
582 * Kprobes not supported here. Set the probe on schedule instead. 582 * Kprobes not supported here. Set the probe on schedule instead.
583 */ 583 */
584__kprobes struct task_struct * 584struct task_struct *
585__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 585__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
586{ 586{
587 struct thread_struct *prev = &prev_p->thread, 587 struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index 0cecd7513c97..8622b9cd3e38 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -524,11 +524,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
524 ret = 0; 524 ret = 0;
525 break; 525 break;
526 526
527 case PTRACE_DETACH:
528 /* detach a process that was attached. */
529 ret = ptrace_detach(child, data);
530 break;
531
532 case PTRACE_GETREGS: { /* Get all gp regs from the child. */ 527 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
533 if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) { 528 if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
534 ret = -EIO; 529 ret = -EIO;
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index c0cac42df3b6..86321ee6da93 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -500,11 +500,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
500 ret = 0; 500 ret = 0;
501 break; 501 break;
502 502
503 case PTRACE_DETACH:
504 /* detach a process that was attached. */
505 ret = ptrace_detach(child, data);
506 break;
507
508 case PTRACE_GETREGS: { /* Get all gp regs from the child. */ 503 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
509 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data, 504 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
510 sizeof(struct user_regs_struct))) { 505 sizeof(struct user_regs_struct))) {
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index b7da90e79c78..85b5b6310acc 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -1070,7 +1070,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1070 if (smp_num_siblings * c->x86_max_cores > 1) { 1070 if (smp_num_siblings * c->x86_max_cores > 1) {
1071 int cpu = c - cpu_data; 1071 int cpu = c - cpu_data;
1072 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1072 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1073 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); 1073 seq_printf(m, "siblings\t: %d\n",
1074 cpus_weight(per_cpu(cpu_core_map, cpu)));
1074 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 1075 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1075 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1076 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1076 } 1077 }
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e4f61d1c6248..31fc08bd15ef 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -70,12 +70,12 @@ EXPORT_SYMBOL(smp_num_siblings);
70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
71 71
72/* representing HT siblings of each logical CPU */ 72/* representing HT siblings of each logical CPU */
73cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
74EXPORT_SYMBOL(cpu_sibling_map); 74EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75 75
76/* representing HT and core siblings of each logical CPU */ 76/* representing HT and core siblings of each logical CPU */
77cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 77DEFINE_PER_CPU(cpumask_t, cpu_core_map);
78EXPORT_SYMBOL(cpu_core_map); 78EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79 79
80/* bitmap of online cpus */ 80/* bitmap of online cpus */
81cpumask_t cpu_online_map __read_mostly; 81cpumask_t cpu_online_map __read_mostly;
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
300 * And for power savings, we return cpu_core_map 300 * And for power savings, we return cpu_core_map
301 */ 301 */
302 if (sched_mc_power_savings || sched_smt_power_savings) 302 if (sched_mc_power_savings || sched_smt_power_savings)
303 return cpu_core_map[cpu]; 303 return per_cpu(cpu_core_map, cpu);
304 else 304 else
305 return c->llc_shared_map; 305 return c->llc_shared_map;
306} 306}
@@ -319,22 +319,22 @@ void __cpuinit set_cpu_sibling_map(int cpu)
319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 319 for_each_cpu_mask(i, cpu_sibling_setup_map) {
320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 320 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 321 c[cpu].cpu_core_id == c[i].cpu_core_id) {
322 cpu_set(i, cpu_sibling_map[cpu]); 322 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
323 cpu_set(cpu, cpu_sibling_map[i]); 323 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
324 cpu_set(i, cpu_core_map[cpu]); 324 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, cpu_core_map[i]); 325 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 326 cpu_set(i, c[cpu].llc_shared_map);
327 cpu_set(cpu, c[i].llc_shared_map); 327 cpu_set(cpu, c[i].llc_shared_map);
328 } 328 }
329 } 329 }
330 } else { 330 } else {
331 cpu_set(cpu, cpu_sibling_map[cpu]); 331 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
332 } 332 }
333 333
334 cpu_set(cpu, c[cpu].llc_shared_map); 334 cpu_set(cpu, c[cpu].llc_shared_map);
335 335
336 if (current_cpu_data.x86_max_cores == 1) { 336 if (current_cpu_data.x86_max_cores == 1) {
337 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 337 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
338 c[cpu].booted_cores = 1; 338 c[cpu].booted_cores = 1;
339 return; 339 return;
340 } 340 }
@@ -346,17 +346,17 @@ void __cpuinit set_cpu_sibling_map(int cpu)
346 cpu_set(cpu, c[i].llc_shared_map); 346 cpu_set(cpu, c[i].llc_shared_map);
347 } 347 }
348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
349 cpu_set(i, cpu_core_map[cpu]); 349 cpu_set(i, per_cpu(cpu_core_map, cpu));
350 cpu_set(cpu, cpu_core_map[i]); 350 cpu_set(cpu, per_cpu(cpu_core_map, i));
351 /* 351 /*
352 * Does this new cpu bringup a new core? 352 * Does this new cpu bringup a new core?
353 */ 353 */
354 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 354 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
355 /* 355 /*
356 * for each core in package, increment 356 * for each core in package, increment
357 * the booted_cores for this new cpu 357 * the booted_cores for this new cpu
358 */ 358 */
359 if (first_cpu(cpu_sibling_map[i]) == i) 359 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
360 c[cpu].booted_cores++; 360 c[cpu].booted_cores++;
361 /* 361 /*
362 * increment the core count for all 362 * increment the core count for all
@@ -983,8 +983,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
983 printk(KERN_NOTICE "Local APIC not detected." 983 printk(KERN_NOTICE "Local APIC not detected."
984 " Using dummy APIC emulation.\n"); 984 " Using dummy APIC emulation.\n");
985 map_cpu_to_logical_apicid(); 985 map_cpu_to_logical_apicid();
986 cpu_set(0, cpu_sibling_map[0]); 986 cpu_set(0, per_cpu(cpu_sibling_map, 0));
987 cpu_set(0, cpu_core_map[0]); 987 cpu_set(0, per_cpu(cpu_core_map, 0));
988 return; 988 return;
989 } 989 }
990 990
@@ -1008,8 +1008,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1009 smpboot_clear_io_apic_irqs(); 1009 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1010 phys_cpu_present_map = physid_mask_of_physid(0);
1011 cpu_set(0, cpu_sibling_map[0]); 1011 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1012 cpu_set(0, cpu_core_map[0]); 1012 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1013 return;
1014 } 1014 }
1015 1015
@@ -1023,8 +1023,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1024 smpboot_clear_io_apic_irqs(); 1024 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1025 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, cpu_sibling_map[0]); 1026 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1027 cpu_set(0, cpu_core_map[0]); 1027 cpu_set(0, per_cpu(cpu_core_map, 0));
1028 return; 1028 return;
1029 } 1029 }
1030 1030
@@ -1102,16 +1102,16 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1102 Dprintk("Boot done.\n"); 1102 Dprintk("Boot done.\n");
1103 1103
1104 /* 1104 /*
1105 * construct cpu_sibling_map[], so that we can tell sibling CPUs 1105 * construct cpu_sibling_map, so that we can tell sibling CPUs
1106 * efficiently. 1106 * efficiently.
1107 */ 1107 */
1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1109 cpus_clear(cpu_sibling_map[cpu]); 1109 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1110 cpus_clear(cpu_core_map[cpu]); 1110 cpus_clear(per_cpu(cpu_core_map, cpu));
1111 } 1111 }
1112 1112
1113 cpu_set(0, cpu_sibling_map[0]); 1113 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1114 cpu_set(0, cpu_core_map[0]); 1114 cpu_set(0, per_cpu(cpu_core_map, 0));
1115 1115
1116 smpboot_setup_io_apic(); 1116 smpboot_setup_io_apic();
1117 1117
@@ -1148,19 +1148,19 @@ void remove_siblinginfo(int cpu)
1148 int sibling; 1148 int sibling;
1149 struct cpuinfo_x86 *c = cpu_data; 1149 struct cpuinfo_x86 *c = cpu_data;
1150 1150
1151 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 1151 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1152 cpu_clear(cpu, cpu_core_map[sibling]); 1152 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1153 /* 1153 /*/
1154 * last thread sibling in this cpu core going down 1154 * last thread sibling in this cpu core going down
1155 */ 1155 */
1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1157 c[sibling].booted_cores--; 1157 c[sibling].booted_cores--;
1158 } 1158 }
1159 1159
1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1160 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1161 cpu_clear(cpu, cpu_sibling_map[sibling]); 1161 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1162 cpus_clear(cpu_sibling_map[cpu]); 1162 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1163 cpus_clear(cpu_core_map[cpu]); 1163 cpus_clear(per_cpu(cpu_core_map, cpu));
1164 c[cpu].phys_proc_id = 0; 1164 c[cpu].phys_proc_id = 0;
1165 c[cpu].cpu_core_id = 0; 1165 c[cpu].cpu_core_id = 0;
1166 cpu_clear(cpu, cpu_sibling_setup_map); 1166 cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 720a7d1f8862..0faa0a0af272 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -91,12 +91,12 @@ EXPORT_SYMBOL(cpu_data);
91int smp_threads_ready; 91int smp_threads_ready;
92 92
93/* representing HT siblings of each logical CPU */ 93/* representing HT siblings of each logical CPU */
94cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 94DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
95EXPORT_SYMBOL(cpu_sibling_map); 95EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
96 96
97/* representing HT and core siblings of each logical CPU */ 97/* representing HT and core siblings of each logical CPU */
98cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 98DEFINE_PER_CPU(cpumask_t, cpu_core_map);
99EXPORT_SYMBOL(cpu_core_map); 99EXPORT_PER_CPU_SYMBOL(cpu_core_map);
100 100
101/* 101/*
102 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
@@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
243 * And for power savings, we return cpu_core_map 243 * And for power savings, we return cpu_core_map
244 */ 244 */
245 if (sched_mc_power_savings || sched_smt_power_savings) 245 if (sched_mc_power_savings || sched_smt_power_savings)
246 return cpu_core_map[cpu]; 246 return per_cpu(cpu_core_map, cpu);
247 else 247 else
248 return c->llc_shared_map; 248 return c->llc_shared_map;
249} 249}
@@ -262,22 +262,22 @@ static inline void set_cpu_sibling_map(int cpu)
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 262 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 263 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 264 c[cpu].cpu_core_id == c[i].cpu_core_id) {
265 cpu_set(i, cpu_sibling_map[cpu]); 265 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, cpu_sibling_map[i]); 266 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, cpu_core_map[cpu]); 267 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, cpu_core_map[i]); 268 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 269 cpu_set(i, c[cpu].llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 270 cpu_set(cpu, c[i].llc_shared_map);
271 } 271 }
272 } 272 }
273 } else { 273 } else {
274 cpu_set(cpu, cpu_sibling_map[cpu]); 274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 275 }
276 276
277 cpu_set(cpu, c[cpu].llc_shared_map); 277 cpu_set(cpu, c[cpu].llc_shared_map);
278 278
279 if (current_cpu_data.x86_max_cores == 1) { 279 if (current_cpu_data.x86_max_cores == 1) {
280 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 281 c[cpu].booted_cores = 1;
282 return; 282 return;
283 } 283 }
@@ -289,17 +289,17 @@ static inline void set_cpu_sibling_map(int cpu)
289 cpu_set(cpu, c[i].llc_shared_map); 289 cpu_set(cpu, c[i].llc_shared_map);
290 } 290 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
292 cpu_set(i, cpu_core_map[cpu]); 292 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, cpu_core_map[i]); 293 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 294 /*
295 * Does this new cpu bringup a new core? 295 * Does this new cpu bringup a new core?
296 */ 296 */
297 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 297 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
298 /* 298 /*
299 * for each core in package, increment 299 * for each core in package, increment
300 * the booted_cores for this new cpu 300 * the booted_cores for this new cpu
301 */ 301 */
302 if (first_cpu(cpu_sibling_map[i]) == i) 302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 303 c[cpu].booted_cores++;
304 /* 304 /*
305 * increment the core count for all 305 * increment the core count for all
@@ -735,8 +735,8 @@ static __init void disable_smp(void)
735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); 735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
736 else 736 else
737 phys_cpu_present_map = physid_mask_of_physid(0); 737 phys_cpu_present_map = physid_mask_of_physid(0);
738 cpu_set(0, cpu_sibling_map[0]); 738 cpu_set(0, per_cpu(cpu_sibling_map, 0));
739 cpu_set(0, cpu_core_map[0]); 739 cpu_set(0, per_cpu(cpu_core_map, 0));
740} 740}
741 741
742#ifdef CONFIG_HOTPLUG_CPU 742#ifdef CONFIG_HOTPLUG_CPU
@@ -971,19 +971,19 @@ static void remove_siblinginfo(int cpu)
971 int sibling; 971 int sibling;
972 struct cpuinfo_x86 *c = cpu_data; 972 struct cpuinfo_x86 *c = cpu_data;
973 973
974 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 974 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
975 cpu_clear(cpu, cpu_core_map[sibling]); 975 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
976 /* 976 /*
977 * last thread sibling in this cpu core going down 977 * last thread sibling in this cpu core going down
978 */ 978 */
979 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 979 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
980 c[sibling].booted_cores--; 980 c[sibling].booted_cores--;
981 } 981 }
982 982
983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 983 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
984 cpu_clear(cpu, cpu_sibling_map[sibling]); 984 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
985 cpus_clear(cpu_sibling_map[cpu]); 985 cpus_clear(per_cpu(cpu_sibling_map, cpu));
986 cpus_clear(cpu_core_map[cpu]); 986 cpus_clear(per_cpu(cpu_core_map, cpu));
987 c[cpu].phys_proc_id = 0; 987 c[cpu].phys_proc_id = 0;
988 c[cpu].cpu_core_id = 0; 988 c[cpu].cpu_core_id = 0;
989 cpu_clear(cpu, cpu_sibling_setup_map); 989 cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index fcb38e7f3543..c686ae20fd6b 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -25,6 +25,7 @@
25#include <linux/kprobes.h> 25#include <linux/kprobes.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/desc.h> 31#include <asm/desc.h>
@@ -32,33 +33,27 @@
32 33
33extern void die(const char *,struct pt_regs *,long); 34extern void die(const char *,struct pt_regs *,long);
34 35
35static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 36#ifdef CONFIG_KPROBES
36 37static inline int notify_page_fault(struct pt_regs *regs)
37int register_page_fault_notifier(struct notifier_block *nb)
38{ 38{
39 vmalloc_sync_all(); 39 int ret = 0;
40 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 40
41} 41 /* kprobe_running() needs smp_processor_id() */
42EXPORT_SYMBOL_GPL(register_page_fault_notifier); 42 if (!user_mode_vm(regs)) {
43 preempt_disable();
44 if (kprobe_running() && kprobe_fault_handler(regs, 14))
45 ret = 1;
46 preempt_enable();
47 }
43 48
44int unregister_page_fault_notifier(struct notifier_block *nb) 49 return ret;
45{
46 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
47} 50}
48EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); 51#else
49 52static inline int notify_page_fault(struct pt_regs *regs)
50static inline int notify_page_fault(struct pt_regs *regs, long err)
51{ 53{
52 struct die_args args = { 54 return 0;
53 .regs = regs,
54 .str = "page fault",
55 .err = err,
56 .trapnr = 14,
57 .signr = SIGSEGV
58 };
59 return atomic_notifier_call_chain(&notify_page_fault_chain,
60 DIE_PAGE_FAULT, &args);
61} 55}
56#endif
62 57
63/* 58/*
64 * Return EIP plus the CS segment base. The segment limit is also 59 * Return EIP plus the CS segment base. The segment limit is also
@@ -331,7 +326,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
331 if (unlikely(address >= TASK_SIZE)) { 326 if (unlikely(address >= TASK_SIZE)) {
332 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) 327 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
333 return; 328 return;
334 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 329 if (notify_page_fault(regs))
335 return; 330 return;
336 /* 331 /*
337 * Don't take the mm semaphore here. If we fixup a prefetch 332 * Don't take the mm semaphore here. If we fixup a prefetch
@@ -340,7 +335,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
340 goto bad_area_nosemaphore; 335 goto bad_area_nosemaphore;
341 } 336 }
342 337
343 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 338 if (notify_page_fault(regs))
344 return; 339 return;
345 340
346 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 341 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
@@ -598,7 +593,7 @@ out_of_memory:
598 } 593 }
599 printk("VM: killing process %s\n", tsk->comm); 594 printk("VM: killing process %s\n", tsk->comm);
600 if (error_code & 4) 595 if (error_code & 4)
601 do_exit(SIGKILL); 596 do_group_exit(SIGKILL);
602 goto no_context; 597 goto no_context;
603 598
604do_sigbus: 599do_sigbus:
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 54816adb8e93..5e0e54906c48 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -25,6 +25,7 @@
25#include <linux/kprobes.h> 25#include <linux/kprobes.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -40,34 +41,27 @@
40#define PF_RSVD (1<<3) 41#define PF_RSVD (1<<3)
41#define PF_INSTR (1<<4) 42#define PF_INSTR (1<<4)
42 43
43static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 44#ifdef CONFIG_KPROBES
44 45static inline int notify_page_fault(struct pt_regs *regs)
45/* Hook to register for page fault notifications */
46int register_page_fault_notifier(struct notifier_block *nb)
47{ 46{
48 vmalloc_sync_all(); 47 int ret = 0;
49 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 48
50} 49 /* kprobe_running() needs smp_processor_id() */
51EXPORT_SYMBOL_GPL(register_page_fault_notifier); 50 if (!user_mode(regs)) {
51 preempt_disable();
52 if (kprobe_running() && kprobe_fault_handler(regs, 14))
53 ret = 1;
54 preempt_enable();
55 }
52 56
53int unregister_page_fault_notifier(struct notifier_block *nb) 57 return ret;
54{
55 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
56} 58}
57EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); 59#else
58 60static inline int notify_page_fault(struct pt_regs *regs)
59static inline int notify_page_fault(struct pt_regs *regs, long err)
60{ 61{
61 struct die_args args = { 62 return 0;
62 .regs = regs,
63 .str = "page fault",
64 .err = err,
65 .trapnr = 14,
66 .signr = SIGSEGV
67 };
68 return atomic_notifier_call_chain(&notify_page_fault_chain,
69 DIE_PAGE_FAULT, &args);
70} 63}
64#endif
71 65
72/* Sometimes the CPU reports invalid exceptions on prefetch. 66/* Sometimes the CPU reports invalid exceptions on prefetch.
73 Check that here and ignore. 67 Check that here and ignore.
@@ -345,7 +339,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
345 if (vmalloc_fault(address) >= 0) 339 if (vmalloc_fault(address) >= 0)
346 return; 340 return;
347 } 341 }
348 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 342 if (notify_page_fault(regs))
349 return; 343 return;
350 /* 344 /*
351 * Don't take the mm semaphore here. If we fixup a prefetch 345 * Don't take the mm semaphore here. If we fixup a prefetch
@@ -354,7 +348,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
354 goto bad_area_nosemaphore; 348 goto bad_area_nosemaphore;
355 } 349 }
356 350
357 if (notify_page_fault(regs, error_code) == NOTIFY_STOP) 351 if (notify_page_fault(regs))
358 return; 352 return;
359 353
360 if (likely(regs->eflags & X86_EFLAGS_IF)) 354 if (likely(regs->eflags & X86_EFLAGS_IF))
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 730a5b177b1f..dda4e83649a0 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -735,11 +735,6 @@ int arch_add_memory(int nid, u64 start, u64 size)
735 return __add_pages(zone, start_pfn, nr_pages); 735 return __add_pages(zone, start_pfn, nr_pages);
736} 736}
737 737
738int remove_memory(u64 start, u64 size)
739{
740 return -EINVAL;
741}
742EXPORT_SYMBOL_GPL(remove_memory);
743#endif 738#endif
744 739
745struct kmem_cache *pmd_cache; 740struct kmem_cache *pmd_cache;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 458893b376f8..1e3862e41065 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -474,12 +474,6 @@ error:
474} 474}
475EXPORT_SYMBOL_GPL(arch_add_memory); 475EXPORT_SYMBOL_GPL(arch_add_memory);
476 476
477int remove_memory(u64 start, u64 size)
478{
479 return -EINVAL;
480}
481EXPORT_SYMBOL_GPL(remove_memory);
482
483#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) 477#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
484int memory_add_physaddr_to_nid(u64 start) 478int memory_add_physaddr_to_nid(u64 start)
485{ 479{
@@ -748,3 +742,48 @@ const char *arch_vma_name(struct vm_area_struct *vma)
748 return "[vsyscall]"; 742 return "[vsyscall]";
749 return NULL; 743 return NULL;
750} 744}
745
746#ifdef CONFIG_SPARSEMEM_VMEMMAP
747/*
748 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
749 */
750int __meminit vmemmap_populate(struct page *start_page,
751 unsigned long size, int node)
752{
753 unsigned long addr = (unsigned long)start_page;
754 unsigned long end = (unsigned long)(start_page + size);
755 unsigned long next;
756 pgd_t *pgd;
757 pud_t *pud;
758 pmd_t *pmd;
759
760 for (; addr < end; addr = next) {
761 next = pmd_addr_end(addr, end);
762
763 pgd = vmemmap_pgd_populate(addr, node);
764 if (!pgd)
765 return -ENOMEM;
766 pud = vmemmap_pud_populate(pgd, addr, node);
767 if (!pud)
768 return -ENOMEM;
769
770 pmd = pmd_offset(pud, addr);
771 if (pmd_none(*pmd)) {
772 pte_t entry;
773 void *p = vmemmap_alloc_block(PMD_SIZE, node);
774 if (!p)
775 return -ENOMEM;
776
777 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
778 mk_pte_huge(entry);
779 set_pmd(pmd, __pmd(pte_val(entry)));
780
781 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
782 addr, addr + PMD_SIZE - 1, p, node);
783 } else
784 vmemmap_verify((pte_t *)pmd, node, addr, next);
785 }
786
787 return 0;
788}
789#endif
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 47925927b12f..56b4757a1f47 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -379,7 +379,7 @@ static unsigned int get_stagger(void)
379{ 379{
380#ifdef CONFIG_SMP 380#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 381 int cpu = smp_processor_id();
382 return (cpu != first_cpu(cpu_sibling_map[cpu])); 382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 383#endif
384 return 0; 384 return 0;
385} 385}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 557b8e24706a..4fa33c27ccb6 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -147,8 +147,13 @@ void __init xen_smp_prepare_boot_cpu(void)
147 make_lowmem_page_readwrite(&per_cpu__gdt_page); 147 make_lowmem_page_readwrite(&per_cpu__gdt_page);
148 148
149 for (cpu = 0; cpu < NR_CPUS; cpu++) { 149 for (cpu = 0; cpu < NR_CPUS; cpu++) {
150 cpus_clear(cpu_sibling_map[cpu]); 150 cpus_clear(per_cpu(cpu_sibling_map, cpu));
151 cpus_clear(cpu_core_map[cpu]); 151 /*
152 * cpu_core_map lives in a per cpu area that is cleared
153 * when the per cpu array is allocated.
154 *
155 * cpus_clear(per_cpu(cpu_core_map, cpu));
156 */
152 } 157 }
153 158
154 xen_setup_vcpu_info_placement(); 159 xen_setup_vcpu_info_placement();
@@ -159,8 +164,13 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
159 unsigned cpu; 164 unsigned cpu;
160 165
161 for (cpu = 0; cpu < NR_CPUS; cpu++) { 166 for (cpu = 0; cpu < NR_CPUS; cpu++) {
162 cpus_clear(cpu_sibling_map[cpu]); 167 cpus_clear(per_cpu(cpu_sibling_map, cpu));
163 cpus_clear(cpu_core_map[cpu]); 168 /*
169 * cpu_core_ map will be zeroed when the per
170 * cpu area is allocated.
171 *
172 * cpus_clear(per_cpu(cpu_core_map, cpu));
173 */
164 } 174 }
165 175
166 smp_store_cpu_info(0); 176 smp_store_cpu_info(0);