diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 11 | ||||
-rw-r--r-- | arch/ia64/configs/tiger_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/hp/sim/hpsim_irq.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_lsapic.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/salinfo.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 32 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 57 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 5 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 2 |
19 files changed, 123 insertions, 100 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 18318749884b..b487e227a1f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -271,6 +271,9 @@ config HOTPLUG_CPU | |||
271 | can be controlled through /sys/devices/system/cpu/cpu#. | 271 | can be controlled through /sys/devices/system/cpu/cpu#. |
272 | Say N if you want to disable CPU hotplug. | 272 | Say N if you want to disable CPU hotplug. |
273 | 273 | ||
274 | config ARCH_ENABLE_MEMORY_HOTPLUG | ||
275 | def_bool y | ||
276 | |||
274 | config SCHED_SMT | 277 | config SCHED_SMT |
275 | bool "SMT scheduler support" | 278 | bool "SMT scheduler support" |
276 | depends on SMP | 279 | depends on SMP |
@@ -374,6 +377,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID | |||
374 | def_bool y | 377 | def_bool y |
375 | depends on NEED_MULTIPLE_NODES | 378 | depends on NEED_MULTIPLE_NODES |
376 | 379 | ||
380 | config HAVE_ARCH_NODEDATA_EXTENSION | ||
381 | def_bool y | ||
382 | depends on NUMA | ||
383 | |||
377 | config IA32_SUPPORT | 384 | config IA32_SUPPORT |
378 | bool "Support for Linux/x86 binaries" | 385 | bool "Support for Linux/x86 binaries" |
379 | help | 386 | help |
@@ -485,6 +492,10 @@ config GENERIC_PENDING_IRQ | |||
485 | depends on GENERIC_HARDIRQS && SMP | 492 | depends on GENERIC_HARDIRQS && SMP |
486 | default y | 493 | default y |
487 | 494 | ||
495 | config IRQ_PER_CPU | ||
496 | bool | ||
497 | default y | ||
498 | |||
488 | source "arch/ia64/hp/sim/Kconfig" | 499 | source "arch/ia64/hp/sim/Kconfig" |
489 | 500 | ||
490 | menu "Instrumentation Support" | 501 | menu "Instrumentation Support" |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 766bf4955432..9d1cffb57cde 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -114,7 +114,7 @@ CONFIG_IA64_CYCLONE=y | |||
114 | CONFIG_IOSAPIC=y | 114 | CONFIG_IOSAPIC=y |
115 | CONFIG_FORCE_MAX_ZONEORDER=17 | 115 | CONFIG_FORCE_MAX_ZONEORDER=17 |
116 | CONFIG_SMP=y | 116 | CONFIG_SMP=y |
117 | CONFIG_NR_CPUS=4 | 117 | CONFIG_NR_CPUS=16 |
118 | CONFIG_HOTPLUG_CPU=y | 118 | CONFIG_HOTPLUG_CPU=y |
119 | CONFIG_PERMIT_BSP_REMOVE=y | 119 | CONFIG_PERMIT_BSP_REMOVE=y |
120 | CONFIG_FORCE_CPEI_RETARGET=y | 120 | CONFIG_FORCE_CPEI_RETARGET=y |
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index c0d25a2a3e9c..8145547bb52d 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -44,8 +44,8 @@ hpsim_irq_init (void) | |||
44 | int i; | 44 | int i; |
45 | 45 | ||
46 | for (i = 0; i < NR_IRQS; ++i) { | 46 | for (i = 0; i < NR_IRQS; ++i) { |
47 | idesc = irq_descp(i); | 47 | idesc = irq_desc + i; |
48 | if (idesc->handler == &no_irq_type) | 48 | if (idesc->chip == &no_irq_type) |
49 | idesc->handler = &irq_type_hp_sim; | 49 | idesc->chip = &irq_type_hp_sim; |
50 | } | 50 | } |
51 | } | 51 | } |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d58c1c5c903a..efc7df4b0fd2 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -456,7 +456,7 @@ iosapic_startup_edge_irq (unsigned int irq) | |||
456 | static void | 456 | static void |
457 | iosapic_ack_edge_irq (unsigned int irq) | 457 | iosapic_ack_edge_irq (unsigned int irq) |
458 | { | 458 | { |
459 | irq_desc_t *idesc = irq_descp(irq); | 459 | irq_desc_t *idesc = irq_desc + irq; |
460 | 460 | ||
461 | move_native_irq(irq); | 461 | move_native_irq(irq); |
462 | /* | 462 | /* |
@@ -659,14 +659,14 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
659 | else | 659 | else |
660 | irq_type = &irq_type_iosapic_level; | 660 | irq_type = &irq_type_iosapic_level; |
661 | 661 | ||
662 | idesc = irq_descp(vector); | 662 | idesc = irq_desc + vector; |
663 | if (idesc->handler != irq_type) { | 663 | if (idesc->chip != irq_type) { |
664 | if (idesc->handler != &no_irq_type) | 664 | if (idesc->chip != &no_irq_type) |
665 | printk(KERN_WARNING | 665 | printk(KERN_WARNING |
666 | "%s: changing vector %d from %s to %s\n", | 666 | "%s: changing vector %d from %s to %s\n", |
667 | __FUNCTION__, vector, | 667 | __FUNCTION__, vector, |
668 | idesc->handler->typename, irq_type->typename); | 668 | idesc->chip->typename, irq_type->typename); |
669 | idesc->handler = irq_type; | 669 | idesc->chip = irq_type; |
670 | } | 670 | } |
671 | return 0; | 671 | return 0; |
672 | } | 672 | } |
@@ -793,14 +793,14 @@ again: | |||
793 | return -ENOSPC; | 793 | return -ENOSPC; |
794 | } | 794 | } |
795 | 795 | ||
796 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); | 796 | spin_lock_irqsave(&irq_desc[vector].lock, flags); |
797 | spin_lock(&iosapic_lock); | 797 | spin_lock(&iosapic_lock); |
798 | { | 798 | { |
799 | if (gsi_to_vector(gsi) > 0) { | 799 | if (gsi_to_vector(gsi) > 0) { |
800 | if (list_empty(&iosapic_intr_info[vector].rtes)) | 800 | if (list_empty(&iosapic_intr_info[vector].rtes)) |
801 | free_irq_vector(vector); | 801 | free_irq_vector(vector); |
802 | spin_unlock(&iosapic_lock); | 802 | spin_unlock(&iosapic_lock); |
803 | spin_unlock_irqrestore(&irq_descp(vector)->lock, | 803 | spin_unlock_irqrestore(&irq_desc[vector].lock, |
804 | flags); | 804 | flags); |
805 | goto again; | 805 | goto again; |
806 | } | 806 | } |
@@ -810,7 +810,7 @@ again: | |||
810 | polarity, trigger); | 810 | polarity, trigger); |
811 | if (err < 0) { | 811 | if (err < 0) { |
812 | spin_unlock(&iosapic_lock); | 812 | spin_unlock(&iosapic_lock); |
813 | spin_unlock_irqrestore(&irq_descp(vector)->lock, | 813 | spin_unlock_irqrestore(&irq_desc[vector].lock, |
814 | flags); | 814 | flags); |
815 | return err; | 815 | return err; |
816 | } | 816 | } |
@@ -825,7 +825,7 @@ again: | |||
825 | set_rte(gsi, vector, dest, mask); | 825 | set_rte(gsi, vector, dest, mask); |
826 | } | 826 | } |
827 | spin_unlock(&iosapic_lock); | 827 | spin_unlock(&iosapic_lock); |
828 | spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); | 828 | spin_unlock_irqrestore(&irq_desc[vector].lock, flags); |
829 | 829 | ||
830 | printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", | 830 | printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", |
831 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), | 831 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), |
@@ -860,7 +860,7 @@ iosapic_unregister_intr (unsigned int gsi) | |||
860 | } | 860 | } |
861 | vector = irq_to_vector(irq); | 861 | vector = irq_to_vector(irq); |
862 | 862 | ||
863 | idesc = irq_descp(irq); | 863 | idesc = irq_desc + irq; |
864 | spin_lock_irqsave(&idesc->lock, flags); | 864 | spin_lock_irqsave(&idesc->lock, flags); |
865 | spin_lock(&iosapic_lock); | 865 | spin_lock(&iosapic_lock); |
866 | { | 866 | { |
@@ -903,7 +903,7 @@ iosapic_unregister_intr (unsigned int gsi) | |||
903 | BUG_ON(iosapic_intr_info[vector].count); | 903 | BUG_ON(iosapic_intr_info[vector].count); |
904 | 904 | ||
905 | /* Clear the interrupt controller descriptor */ | 905 | /* Clear the interrupt controller descriptor */ |
906 | idesc->handler = &no_irq_type; | 906 | idesc->chip = &no_irq_type; |
907 | 907 | ||
908 | /* Clear the interrupt information */ | 908 | /* Clear the interrupt information */ |
909 | memset(&iosapic_intr_info[vector], 0, | 909 | memset(&iosapic_intr_info[vector], 0, |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 9c72ea3f6432..7852382de2fa 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
76 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 76 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
77 | } | 77 | } |
78 | #endif | 78 | #endif |
79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 79 | seq_printf(p, " %14s", irq_desc[i].chip->typename); |
80 | seq_printf(p, " %s", action->name); | 80 | seq_printf(p, " %s", action->name); |
81 | 81 | ||
82 | for (action=action->next; action; action = action->next) | 82 | for (action=action->next; action; action = action->next) |
@@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
100 | cpu_set(cpu_logical_id(hwid), mask); | 100 | cpu_set(cpu_logical_id(hwid), mask); |
101 | 101 | ||
102 | if (irq < NR_IRQS) { | 102 | if (irq < NR_IRQS) { |
103 | irq_affinity[irq] = mask; | 103 | irq_desc[irq].affinity = mask; |
104 | irq_redir[irq] = (char) (redir & 0xff); | 104 | irq_redir[irq] = (char) (redir & 0xff); |
105 | } | 105 | } |
106 | } | 106 | } |
@@ -120,7 +120,7 @@ static void migrate_irqs(void) | |||
120 | int irq, new_cpu; | 120 | int irq, new_cpu; |
121 | 121 | ||
122 | for (irq=0; irq < NR_IRQS; irq++) { | 122 | for (irq=0; irq < NR_IRQS; irq++) { |
123 | desc = irq_descp(irq); | 123 | desc = irq_desc + irq; |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * No handling for now. | 126 | * No handling for now. |
@@ -131,7 +131,7 @@ static void migrate_irqs(void) | |||
131 | if (desc->status == IRQ_PER_CPU) | 131 | if (desc->status == IRQ_PER_CPU) |
132 | continue; | 132 | continue; |
133 | 133 | ||
134 | cpus_and(mask, irq_affinity[irq], cpu_online_map); | 134 | cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); |
135 | if (any_online_cpu(mask) == NR_CPUS) { | 135 | if (any_online_cpu(mask) == NR_CPUS) { |
136 | /* | 136 | /* |
137 | * Save it for phase 2 processing | 137 | * Save it for phase 2 processing |
@@ -144,15 +144,15 @@ static void migrate_irqs(void) | |||
144 | /* | 144 | /* |
145 | * Al three are essential, currently WARN_ON.. maybe panic? | 145 | * Al three are essential, currently WARN_ON.. maybe panic? |
146 | */ | 146 | */ |
147 | if (desc->handler && desc->handler->disable && | 147 | if (desc->chip && desc->chip->disable && |
148 | desc->handler->enable && desc->handler->set_affinity) { | 148 | desc->chip->enable && desc->chip->set_affinity) { |
149 | desc->handler->disable(irq); | 149 | desc->chip->disable(irq); |
150 | desc->handler->set_affinity(irq, mask); | 150 | desc->chip->set_affinity(irq, mask); |
151 | desc->handler->enable(irq); | 151 | desc->chip->enable(irq); |
152 | } else { | 152 | } else { |
153 | WARN_ON((!(desc->handler) || !(desc->handler->disable) || | 153 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || |
154 | !(desc->handler->enable) || | 154 | !(desc->chip->enable) || |
155 | !(desc->handler->set_affinity))); | 155 | !(desc->chip->set_affinity))); |
156 | } | 156 | } |
157 | } | 157 | } |
158 | } | 158 | } |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index ef9a2b49307a..f5035304594e 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -249,9 +249,9 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) | |||
249 | 249 | ||
250 | for (irq = 0; irq < NR_IRQS; ++irq) | 250 | for (irq = 0; irq < NR_IRQS; ++irq) |
251 | if (irq_to_vector(irq) == vec) { | 251 | if (irq_to_vector(irq) == vec) { |
252 | desc = irq_descp(irq); | 252 | desc = irq_desc + irq; |
253 | desc->status |= IRQ_PER_CPU; | 253 | desc->status |= IRQ_PER_CPU; |
254 | desc->handler = &irq_type_ia64_lsapic; | 254 | desc->chip = &irq_type_ia64_lsapic; |
255 | if (action) | 255 | if (action) |
256 | setup_irq(irq, action); | 256 | setup_irq(irq, action); |
257 | } | 257 | } |
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c index ea14e6a04409..1ab58b09f3d7 100644 --- a/arch/ia64/kernel/irq_lsapic.c +++ b/arch/ia64/kernel/irq_lsapic.c | |||
@@ -26,6 +26,13 @@ lsapic_noop (unsigned int irq) | |||
26 | /* nuthing to do... */ | 26 | /* nuthing to do... */ |
27 | } | 27 | } |
28 | 28 | ||
29 | static int lsapic_retrigger(unsigned int irq) | ||
30 | { | ||
31 | ia64_resend_irq(irq); | ||
32 | |||
33 | return 1; | ||
34 | } | ||
35 | |||
29 | struct hw_interrupt_type irq_type_ia64_lsapic = { | 36 | struct hw_interrupt_type irq_type_ia64_lsapic = { |
30 | .typename = "LSAPIC", | 37 | .typename = "LSAPIC", |
31 | .startup = lsapic_noop_startup, | 38 | .startup = lsapic_noop_startup, |
@@ -33,5 +40,6 @@ struct hw_interrupt_type irq_type_ia64_lsapic = { | |||
33 | .enable = lsapic_noop, | 40 | .enable = lsapic_noop, |
34 | .disable = lsapic_noop, | 41 | .disable = lsapic_noop, |
35 | .ack = lsapic_noop, | 42 | .ack = lsapic_noop, |
36 | .end = lsapic_noop | 43 | .end = lsapic_noop, |
44 | .retrigger = lsapic_retrigger, | ||
37 | }; | 45 | }; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6a0880639bc9..d7dc5e63de63 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1788,7 +1788,7 @@ ia64_mca_late_init(void) | |||
1788 | cpe_poll_enabled = 0; | 1788 | cpe_poll_enabled = 0; |
1789 | for (irq = 0; irq < NR_IRQS; ++irq) | 1789 | for (irq = 0; irq < NR_IRQS; ++irq) |
1790 | if (irq_to_vector(irq) == cpe_vector) { | 1790 | if (irq_to_vector(irq) == cpe_vector) { |
1791 | desc = irq_descp(irq); | 1791 | desc = irq_desc + irq; |
1792 | desc->status |= IRQ_PER_CPU; | 1792 | desc->status |= IRQ_PER_CPU; |
1793 | setup_irq(irq, &mca_cpe_irqaction); | 1793 | setup_irq(irq, &mca_cpe_irqaction); |
1794 | ia64_cpe_irq = irq; | 1794 | ia64_cpe_irq = irq; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 859fb37ff49b..8a1208419138 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) | |||
959 | } | 959 | } |
960 | } | 960 | } |
961 | 961 | ||
962 | static int palinfo_cpu_callback(struct notifier_block *nfb, | 962 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, |
963 | unsigned long action, | 963 | unsigned long action, |
964 | void *hcpu) | 964 | void *hcpu) |
965 | { | 965 | { |
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, | |||
978 | return NOTIFY_OK; | 978 | return NOTIFY_OK; |
979 | } | 979 | } |
980 | 980 | ||
981 | static struct notifier_block palinfo_cpu_notifier = | 981 | static struct notifier_block __cpuinitdata palinfo_cpu_notifier = |
982 | { | 982 | { |
983 | .notifier_call = palinfo_cpu_callback, | 983 | .notifier_call = palinfo_cpu_callback, |
984 | .priority = 0, | 984 | .priority = 0, |
@@ -998,7 +998,7 @@ palinfo_init(void) | |||
998 | } | 998 | } |
999 | 999 | ||
1000 | /* Register for future delivery via notify registration */ | 1000 | /* Register for future delivery via notify registration */ |
1001 | register_cpu_notifier(&palinfo_cpu_notifier); | 1001 | register_hotcpu_notifier(&palinfo_cpu_notifier); |
1002 | 1002 | ||
1003 | return 0; | 1003 | return 0; |
1004 | } | 1004 | } |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6d7bc8ff7b3a..a0055d3d695c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6165,7 +6165,7 @@ pfm_load_regs (struct task_struct *task) | |||
6165 | /* | 6165 | /* |
6166 | * will replay the PMU interrupt | 6166 | * will replay the PMU interrupt |
6167 | */ | 6167 | */ |
6168 | if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); | 6168 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
6169 | 6169 | ||
6170 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | 6170 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; |
6171 | } | 6171 | } |
@@ -6305,7 +6305,7 @@ pfm_load_regs (struct task_struct *task) | |||
6305 | /* | 6305 | /* |
6306 | * will replay the PMU interrupt | 6306 | * will replay the PMU interrupt |
6307 | */ | 6307 | */ |
6308 | if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); | 6308 | if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); |
6309 | 6309 | ||
6310 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | 6310 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; |
6311 | } | 6311 | } |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 663a186ad194..9065f0f01ba3 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = { | |||
572 | }; | 572 | }; |
573 | 573 | ||
574 | #ifdef CONFIG_HOTPLUG_CPU | 574 | #ifdef CONFIG_HOTPLUG_CPU |
575 | static int | 575 | static int __devinit |
576 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | 576 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
577 | { | 577 | { |
578 | unsigned int i, cpu = (unsigned long)hcpu; | 578 | unsigned int i, cpu = (unsigned long)hcpu; |
@@ -673,9 +673,7 @@ salinfo_init(void) | |||
673 | salinfo_timer.function = &salinfo_timeout; | 673 | salinfo_timer.function = &salinfo_timeout; |
674 | add_timer(&salinfo_timer); | 674 | add_timer(&salinfo_timer); |
675 | 675 | ||
676 | #ifdef CONFIG_HOTPLUG_CPU | 676 | register_hotcpu_notifier(&salinfo_cpu_notifier); |
677 | register_cpu_notifier(&salinfo_cpu_notifier); | ||
678 | #endif | ||
679 | 677 | ||
680 | return 0; | 678 | return 0; |
681 | } | 679 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 44e9547878ac..5203df78f150 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -677,16 +677,16 @@ int migrate_platform_irqs(unsigned int cpu) | |||
677 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 677 | new_cpei_cpu = any_online_cpu(cpu_online_map); |
678 | mask = cpumask_of_cpu(new_cpei_cpu); | 678 | mask = cpumask_of_cpu(new_cpei_cpu); |
679 | set_cpei_target_cpu(new_cpei_cpu); | 679 | set_cpei_target_cpu(new_cpei_cpu); |
680 | desc = irq_descp(ia64_cpe_irq); | 680 | desc = irq_desc + ia64_cpe_irq; |
681 | /* | 681 | /* |
682 | * Switch for now, immediatly, we need to do fake intr | 682 | * Switch for now, immediatly, we need to do fake intr |
683 | * as other interrupts, but need to study CPEI behaviour with | 683 | * as other interrupts, but need to study CPEI behaviour with |
684 | * polling before making changes. | 684 | * polling before making changes. |
685 | */ | 685 | */ |
686 | if (desc) { | 686 | if (desc) { |
687 | desc->handler->disable(ia64_cpe_irq); | 687 | desc->chip->disable(ia64_cpe_irq); |
688 | desc->handler->set_affinity(ia64_cpe_irq, mask); | 688 | desc->chip->set_affinity(ia64_cpe_irq, mask); |
689 | desc->handler->enable(ia64_cpe_irq); | 689 | desc->chip->enable(ia64_cpe_irq); |
690 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | 690 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); |
691 | } | 691 | } |
692 | } | 692 | } |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 879edb51d1e0..5511d9c6c701 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -26,19 +26,10 @@ | |||
26 | #include <asm/numa.h> | 26 | #include <asm/numa.h> |
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | 28 | ||
29 | #ifdef CONFIG_NUMA | ||
30 | static struct node *sysfs_nodes; | ||
31 | #endif | ||
32 | static struct ia64_cpu *sysfs_cpus; | 29 | static struct ia64_cpu *sysfs_cpus; |
33 | 30 | ||
34 | int arch_register_cpu(int num) | 31 | int arch_register_cpu(int num) |
35 | { | 32 | { |
36 | struct node *parent = NULL; | ||
37 | |||
38 | #ifdef CONFIG_NUMA | ||
39 | parent = &sysfs_nodes[cpu_to_node(num)]; | ||
40 | #endif /* CONFIG_NUMA */ | ||
41 | |||
42 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) | 33 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) |
43 | /* | 34 | /* |
44 | * If CPEI cannot be re-targetted, and this is | 35 | * If CPEI cannot be re-targetted, and this is |
@@ -48,21 +39,14 @@ int arch_register_cpu(int num) | |||
48 | sysfs_cpus[num].cpu.no_control = 1; | 39 | sysfs_cpus[num].cpu.no_control = 1; |
49 | #endif | 40 | #endif |
50 | 41 | ||
51 | return register_cpu(&sysfs_cpus[num].cpu, num, parent); | 42 | return register_cpu(&sysfs_cpus[num].cpu, num); |
52 | } | 43 | } |
53 | 44 | ||
54 | #ifdef CONFIG_HOTPLUG_CPU | 45 | #ifdef CONFIG_HOTPLUG_CPU |
55 | 46 | ||
56 | void arch_unregister_cpu(int num) | 47 | void arch_unregister_cpu(int num) |
57 | { | 48 | { |
58 | struct node *parent = NULL; | 49 | return unregister_cpu(&sysfs_cpus[num].cpu); |
59 | |||
60 | #ifdef CONFIG_NUMA | ||
61 | int node = cpu_to_node(num); | ||
62 | parent = &sysfs_nodes[node]; | ||
63 | #endif /* CONFIG_NUMA */ | ||
64 | |||
65 | return unregister_cpu(&sysfs_cpus[num].cpu, parent); | ||
66 | } | 50 | } |
67 | EXPORT_SYMBOL(arch_register_cpu); | 51 | EXPORT_SYMBOL(arch_register_cpu); |
68 | EXPORT_SYMBOL(arch_unregister_cpu); | 52 | EXPORT_SYMBOL(arch_unregister_cpu); |
@@ -74,17 +58,11 @@ static int __init topology_init(void) | |||
74 | int i, err = 0; | 58 | int i, err = 0; |
75 | 59 | ||
76 | #ifdef CONFIG_NUMA | 60 | #ifdef CONFIG_NUMA |
77 | sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); | ||
78 | if (!sysfs_nodes) { | ||
79 | err = -ENOMEM; | ||
80 | goto out; | ||
81 | } | ||
82 | |||
83 | /* | 61 | /* |
84 | * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? | 62 | * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? |
85 | */ | 63 | */ |
86 | for_each_online_node(i) { | 64 | for_each_online_node(i) { |
87 | if ((err = register_node(&sysfs_nodes[i], i, 0))) | 65 | if ((err = register_one_node(i))) |
88 | goto out; | 66 | goto out; |
89 | } | 67 | } |
90 | #endif | 68 | #endif |
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
426 | * When a cpu is hot-plugged, do a check and initiate | 404 | * When a cpu is hot-plugged, do a check and initiate |
427 | * cache kobject if necessary | 405 | * cache kobject if necessary |
428 | */ | 406 | */ |
429 | static int cache_cpu_callback(struct notifier_block *nfb, | 407 | static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, |
430 | unsigned long action, void *hcpu) | 408 | unsigned long action, void *hcpu) |
431 | { | 409 | { |
432 | unsigned int cpu = (unsigned long)hcpu; | 410 | unsigned int cpu = (unsigned long)hcpu; |
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb, | |||
444 | return NOTIFY_OK; | 422 | return NOTIFY_OK; |
445 | } | 423 | } |
446 | 424 | ||
447 | static struct notifier_block cache_cpu_notifier = | 425 | static struct notifier_block __cpuinitdata cache_cpu_notifier = |
448 | { | 426 | { |
449 | .notifier_call = cache_cpu_callback | 427 | .notifier_call = cache_cpu_callback |
450 | }; | 428 | }; |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b6bcc9fa3603..525b082eb661 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -33,7 +33,6 @@ | |||
33 | */ | 33 | */ |
34 | struct early_node_data { | 34 | struct early_node_data { |
35 | struct ia64_node_data *node_data; | 35 | struct ia64_node_data *node_data; |
36 | pg_data_t *pgdat; | ||
37 | unsigned long pernode_addr; | 36 | unsigned long pernode_addr; |
38 | unsigned long pernode_size; | 37 | unsigned long pernode_size; |
39 | struct bootmem_data bootmem_data; | 38 | struct bootmem_data bootmem_data; |
@@ -46,6 +45,8 @@ struct early_node_data { | |||
46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | 45 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; |
47 | static nodemask_t memory_less_mask __initdata; | 46 | static nodemask_t memory_less_mask __initdata; |
48 | 47 | ||
48 | static pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
49 | |||
49 | /* | 50 | /* |
50 | * To prevent cache aliasing effects, align per-node structures so that they | 51 | * To prevent cache aliasing effects, align per-node structures so that they |
51 | * start at addresses that are strided by node number. | 52 | * start at addresses that are strided by node number. |
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, | |||
99 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 100 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
100 | * called yet. Note that node 0 will also count all non-existent cpus. | 101 | * called yet. Note that node 0 will also count all non-existent cpus. |
101 | */ | 102 | */ |
102 | static int __init early_nr_cpus_node(int node) | 103 | static int __meminit early_nr_cpus_node(int node) |
103 | { | 104 | { |
104 | int cpu, n = 0; | 105 | int cpu, n = 0; |
105 | 106 | ||
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node) | |||
114 | * compute_pernodesize - compute size of pernode data | 115 | * compute_pernodesize - compute size of pernode data |
115 | * @node: the node id. | 116 | * @node: the node id. |
116 | */ | 117 | */ |
117 | static unsigned long __init compute_pernodesize(int node) | 118 | static unsigned long __meminit compute_pernodesize(int node) |
118 | { | 119 | { |
119 | unsigned long pernodesize = 0, cpus; | 120 | unsigned long pernodesize = 0, cpus; |
120 | 121 | ||
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode, | |||
175 | pernode += PERCPU_PAGE_SIZE * cpus; | 176 | pernode += PERCPU_PAGE_SIZE * cpus; |
176 | pernode += node * L1_CACHE_BYTES; | 177 | pernode += node * L1_CACHE_BYTES; |
177 | 178 | ||
178 | mem_data[node].pgdat = __va(pernode); | 179 | pgdat_list[node] = __va(pernode); |
179 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 180 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
180 | 181 | ||
181 | mem_data[node].node_data = __va(pernode); | 182 | mem_data[node].node_data = __va(pernode); |
182 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | 183 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); |
183 | 184 | ||
184 | mem_data[node].pgdat->bdata = bdp; | 185 | pgdat_list[node]->bdata = bdp; |
185 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 186 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
186 | 187 | ||
187 | cpu_data = per_cpu_node_setup(cpu_data, node); | 188 | cpu_data = per_cpu_node_setup(cpu_data, node); |
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, | |||
268 | static int __init free_node_bootmem(unsigned long start, unsigned long len, | 269 | static int __init free_node_bootmem(unsigned long start, unsigned long len, |
269 | int node) | 270 | int node) |
270 | { | 271 | { |
271 | free_bootmem_node(mem_data[node].pgdat, start, len); | 272 | free_bootmem_node(pgdat_list[node], start, len); |
272 | 273 | ||
273 | return 0; | 274 | return 0; |
274 | } | 275 | } |
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void) | |||
287 | int node; | 288 | int node; |
288 | 289 | ||
289 | for_each_online_node(node) { | 290 | for_each_online_node(node) { |
290 | pg_data_t *pdp = mem_data[node].pgdat; | 291 | pg_data_t *pdp = pgdat_list[node]; |
291 | 292 | ||
292 | if (node_isset(node, memory_less_mask)) | 293 | if (node_isset(node, memory_less_mask)) |
293 | continue; | 294 | continue; |
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void) | |||
307 | } | 308 | } |
308 | } | 309 | } |
309 | 310 | ||
311 | static void __meminit scatter_node_data(void) | ||
312 | { | ||
313 | pg_data_t **dst; | ||
314 | int node; | ||
315 | |||
316 | for_each_online_node(node) { | ||
317 | dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; | ||
318 | memcpy(dst, pgdat_list, sizeof(pgdat_list)); | ||
319 | } | ||
320 | } | ||
321 | |||
310 | /** | 322 | /** |
311 | * initialize_pernode_data - fixup per-cpu & per-node pointers | 323 | * initialize_pernode_data - fixup per-cpu & per-node pointers |
312 | * | 324 | * |
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void) | |||
317 | */ | 329 | */ |
318 | static void __init initialize_pernode_data(void) | 330 | static void __init initialize_pernode_data(void) |
319 | { | 331 | { |
320 | pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
321 | int cpu, node; | 332 | int cpu, node; |
322 | 333 | ||
323 | for_each_online_node(node) | 334 | scatter_node_data(); |
324 | pgdat_list[node] = mem_data[node].pgdat; | ||
325 | 335 | ||
326 | /* Copy the pg_data_t list to each node and init the node field */ | ||
327 | for_each_online_node(node) { | ||
328 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | ||
329 | sizeof(pgdat_list)); | ||
330 | } | ||
331 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
332 | /* Set the node_data pointer for each per-cpu struct */ | 337 | /* Set the node_data pointer for each per-cpu struct */ |
333 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 338 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) | |||
372 | if (bestnode == -1) | 377 | if (bestnode == -1) |
373 | bestnode = anynode; | 378 | bestnode = anynode; |
374 | 379 | ||
375 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, | 380 | ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, |
376 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 381 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
377 | 382 | ||
378 | return ptr; | 383 | return ptr; |
@@ -476,7 +481,7 @@ void __init find_memory(void) | |||
476 | pernodesize = mem_data[node].pernode_size; | 481 | pernodesize = mem_data[node].pernode_size; |
477 | map = pernode + pernodesize; | 482 | map = pernode + pernodesize; |
478 | 483 | ||
479 | init_bootmem_node(mem_data[node].pgdat, | 484 | init_bootmem_node(pgdat_list[node], |
480 | map>>PAGE_SHIFT, | 485 | map>>PAGE_SHIFT, |
481 | bdp->node_boot_start>>PAGE_SHIFT, | 486 | bdp->node_boot_start>>PAGE_SHIFT, |
482 | bdp->node_low_pfn); | 487 | bdp->node_low_pfn); |
@@ -786,3 +791,21 @@ void __init paging_init(void) | |||
786 | 791 | ||
787 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 792 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
788 | } | 793 | } |
794 | |||
795 | pg_data_t *arch_alloc_nodedata(int nid) | ||
796 | { | ||
797 | unsigned long size = compute_pernodesize(nid); | ||
798 | |||
799 | return kzalloc(size, GFP_KERNEL); | ||
800 | } | ||
801 | |||
802 | void arch_free_nodedata(pg_data_t *pgdat) | ||
803 | { | ||
804 | kfree(pgdat); | ||
805 | } | ||
806 | |||
807 | void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) | ||
808 | { | ||
809 | pgdat_list[update_node] = update_pgdat; | ||
810 | scatter_node_data(); | ||
811 | } | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 11f08001f8c2..38306e98f04b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -652,7 +652,7 @@ void online_page(struct page *page) | |||
652 | num_physpages++; | 652 | num_physpages++; |
653 | } | 653 | } |
654 | 654 | ||
655 | int add_memory(u64 start, u64 size) | 655 | int arch_add_memory(int nid, u64 start, u64 size) |
656 | { | 656 | { |
657 | pg_data_t *pgdat; | 657 | pg_data_t *pgdat; |
658 | struct zone *zone; | 658 | struct zone *zone; |
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size) | |||
660 | unsigned long nr_pages = size >> PAGE_SHIFT; | 660 | unsigned long nr_pages = size >> PAGE_SHIFT; |
661 | int ret; | 661 | int ret; |
662 | 662 | ||
663 | pgdat = NODE_DATA(0); | 663 | pgdat = NODE_DATA(nid); |
664 | 664 | ||
665 | zone = pgdat->node_zones + ZONE_NORMAL; | 665 | zone = pgdat->node_zones + ZONE_NORMAL; |
666 | ret = __add_pages(zone, start_pfn, nr_pages); | 666 | ret = __add_pages(zone, start_pfn, nr_pages); |
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size) | |||
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(add_memory); | ||
675 | 674 | ||
676 | int remove_memory(u64 start, u64 size) | 675 | int remove_memory(u64 start, u64 size) |
677 | { | 676 | { |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 77375a55da31..5bef0e3603f2 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -568,7 +568,7 @@ pcibios_disable_device (struct pci_dev *dev) | |||
568 | 568 | ||
569 | void | 569 | void |
570 | pcibios_align_resource (void *data, struct resource *res, | 570 | pcibios_align_resource (void *data, struct resource *res, |
571 | unsigned long size, unsigned long align) | 571 | resource_size_t size, resource_size_t align) |
572 | { | 572 | { |
573 | } | 573 | } |
574 | 574 | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index dc8e2b696713..7bb6ad188ba3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
27 | int sn_force_interrupt_flag = 1; | 27 | int sn_force_interrupt_flag = 1; |
28 | extern int sn_ioif_inited; | 28 | extern int sn_ioif_inited; |
29 | struct list_head **sn_irq_lh; | 29 | struct list_head **sn_irq_lh; |
30 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | 30 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
31 | 31 | ||
32 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, | 32 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
33 | struct sn_irq_info *sn_irq_info, | 33 | struct sn_irq_info *sn_irq_info, |
@@ -225,8 +225,8 @@ void sn_irq_init(void) | |||
225 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; | 225 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
226 | 226 | ||
227 | for (i = 0; i < NR_IRQS; i++) { | 227 | for (i = 0; i < NR_IRQS; i++) { |
228 | if (base_desc[i].handler == &no_irq_type) { | 228 | if (base_desc[i].chip == &no_irq_type) { |
229 | base_desc[i].handler = &irq_type_sn; | 229 | base_desc[i].chip = &irq_type_sn; |
230 | } | 230 | } |
231 | } | 231 | } |
232 | } | 232 | } |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 93577abae36d..3bfccf354343 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -458,7 +458,7 @@ void __init sn_setup(char **cmdline_p) | |||
458 | * support here so we don't have to listen to failed keyboard probe | 458 | * support here so we don't have to listen to failed keyboard probe |
459 | * messages. | 459 | * messages. |
460 | */ | 460 | */ |
461 | if (version <= 0x0209 && acpi_kbd_controller_present) { | 461 | if (is_shub1() && version <= 0x0209 && acpi_kbd_controller_present) { |
462 | printk(KERN_INFO "Disabling legacy keyboard support as prom " | 462 | printk(KERN_INFO "Disabling legacy keyboard support as prom " |
463 | "is too old and doesn't provide FADT\n"); | 463 | "is too old and doesn't provide FADT\n"); |
464 | acpi_kbd_controller_present = 0; | 464 | acpi_kbd_controller_present = 0; |
@@ -577,7 +577,8 @@ void __init sn_cpu_init(void) | |||
577 | int i; | 577 | int i; |
578 | static int wars_have_been_checked; | 578 | static int wars_have_been_checked; |
579 | 579 | ||
580 | if (smp_processor_id() == 0 && IS_MEDUSA()) { | 580 | cpuid = smp_processor_id(); |
581 | if (cpuid == 0 && IS_MEDUSA()) { | ||
581 | if (ia64_sn_is_fake_prom()) | 582 | if (ia64_sn_is_fake_prom()) |
582 | sn_prom_type = 2; | 583 | sn_prom_type = 2; |
583 | else | 584 | else |
@@ -597,6 +598,12 @@ void __init sn_cpu_init(void) | |||
597 | sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; | 598 | sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; |
598 | 599 | ||
599 | /* | 600 | /* |
601 | * Don't check status. The SAL call is not supported on all PROMs | ||
602 | * but a failure is harmless. | ||
603 | */ | ||
604 | (void) ia64_sn_set_cpu_number(cpuid); | ||
605 | |||
606 | /* | ||
600 | * The boot cpu makes this call again after platform initialization is | 607 | * The boot cpu makes this call again after platform initialization is |
601 | * complete. | 608 | * complete. |
602 | */ | 609 | */ |
@@ -607,7 +614,6 @@ void __init sn_cpu_init(void) | |||
607 | if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) | 614 | if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) |
608 | break; | 615 | break; |
609 | 616 | ||
610 | cpuid = smp_processor_id(); | ||
611 | cpuphyid = get_sapicid(); | 617 | cpuphyid = get_sapicid(); |
612 | 618 | ||
613 | if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) | 619 | if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 20de72791b97..e4aa839d0189 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -595,7 +595,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
595 | 595 | ||
596 | /* sanity check prom rev */ | 596 | /* sanity check prom rev */ |
597 | 597 | ||
598 | if (sn_sal_rev() < 0x0406) { | 598 | if (is_shub1() && sn_sal_rev() < 0x0406) { |
599 | printk | 599 | printk |
600 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | 600 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " |
601 | "for tioca support\n", __FUNCTION__); | 601 | "for tioca support\n", __FUNCTION__); |