diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-29 03:30:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-29 03:31:47 -0400 |
commit | eebc57f73d42095b778e899f6aa90ad050c72655 (patch) | |
tree | 2ba80c75e9284093e6d7606dbb1b6a4bb752a2a5 /arch/x86/kernel/apic | |
parent | d3a247bfb2c26f5b67367d58af7ad8c2efbbc6c1 (diff) | |
parent | 2a4ab640d3c28c2952967e5f63ea495555bf2a5f (diff) |
Merge branch 'for-ingo' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6 into x86/apic
Merge reason: the SFI (Simple Firmware Interface) feature in the ACPI
tree needs this cleanup, pull it into the APIC branch as
well so that there's no interactions.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r-- | arch/x86/kernel/apic/es7000_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 114 | ||||
-rw-r--r-- | arch/x86/kernel/apic/ipi.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/numaq_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_phys.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 42 |
7 files changed, 128 insertions, 57 deletions
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 420f95da7bf6..89174f847b49 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -652,7 +652,8 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, | |||
652 | return ret && es7000_apic_is_cluster(); | 652 | return ret && es7000_apic_is_cluster(); |
653 | } | 653 | } |
654 | 654 | ||
655 | struct apic apic_es7000_cluster = { | 655 | /* We've been warned by a false positive warning.Use __refdata to keep calm. */ |
656 | struct apic __refdata apic_es7000_cluster = { | ||
656 | 657 | ||
657 | .name = "es7000", | 658 | .name = "es7000", |
658 | .probe = probe_es7000, | 659 | .probe = probe_es7000, |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d836b4d347e6..3c8f9e75d038 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -87,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS]; | |||
87 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; | 87 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; |
88 | int nr_ioapics; | 88 | int nr_ioapics; |
89 | 89 | ||
90 | /* IO APIC gsi routing info */ | ||
91 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; | ||
92 | |||
90 | /* MP IRQ source entries */ | 93 | /* MP IRQ source entries */ |
91 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 94 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
92 | 95 | ||
@@ -3736,6 +3739,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3736 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3739 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3737 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3740 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
3738 | 3741 | ||
3742 | if (cfg->move_in_progress) | ||
3743 | send_cleanup_vector(cfg); | ||
3744 | |||
3739 | return irq; | 3745 | return irq; |
3740 | } | 3746 | } |
3741 | 3747 | ||
@@ -3885,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq, | |||
3885 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | 3891 | return __io_apic_set_pci_routing(dev, irq, irq_attr); |
3886 | } | 3892 | } |
3887 | 3893 | ||
3888 | /* -------------------------------------------------------------------------- | 3894 | u8 __init io_apic_unique_id(u8 id) |
3889 | ACPI-based IOAPIC Configuration | 3895 | { |
3890 | -------------------------------------------------------------------------- */ | 3896 | #ifdef CONFIG_X86_32 |
3897 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3898 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3899 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3900 | else | ||
3901 | return id; | ||
3902 | #else | ||
3903 | int i; | ||
3904 | DECLARE_BITMAP(used, 256); | ||
3891 | 3905 | ||
3892 | #ifdef CONFIG_ACPI | 3906 | bitmap_zero(used, 256); |
3907 | for (i = 0; i < nr_ioapics; i++) { | ||
3908 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3909 | __set_bit(ia->apicid, used); | ||
3910 | } | ||
3911 | if (!test_bit(id, used)) | ||
3912 | return id; | ||
3913 | return find_first_zero_bit(used, 256); | ||
3914 | #endif | ||
3915 | } | ||
3893 | 3916 | ||
3894 | #ifdef CONFIG_X86_32 | 3917 | #ifdef CONFIG_X86_32 |
3895 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | 3918 | int __init io_apic_get_unique_id(int ioapic, int apic_id) |
@@ -3998,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
3998 | return 0; | 4021 | return 0; |
3999 | } | 4022 | } |
4000 | 4023 | ||
4001 | #endif /* CONFIG_ACPI */ | ||
4002 | |||
4003 | /* | 4024 | /* |
4004 | * This function currently is only a helper for the i386 smp boot process where | 4025 | * This function currently is only a helper for the i386 smp boot process where |
4005 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | 4026 | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
@@ -4124,28 +4145,93 @@ fake_ioapic_page: | |||
4124 | } | 4145 | } |
4125 | } | 4146 | } |
4126 | 4147 | ||
4127 | static int __init ioapic_insert_resources(void) | 4148 | void __init ioapic_insert_resources(void) |
4128 | { | 4149 | { |
4129 | int i; | 4150 | int i; |
4130 | struct resource *r = ioapic_resources; | 4151 | struct resource *r = ioapic_resources; |
4131 | 4152 | ||
4132 | if (!r) { | 4153 | if (!r) { |
4133 | if (nr_ioapics > 0) { | 4154 | if (nr_ioapics > 0) |
4134 | printk(KERN_ERR | 4155 | printk(KERN_ERR |
4135 | "IO APIC resources couldn't be allocated.\n"); | 4156 | "IO APIC resources couldn't be allocated.\n"); |
4136 | return -1; | 4157 | return; |
4137 | } | ||
4138 | return 0; | ||
4139 | } | 4158 | } |
4140 | 4159 | ||
4141 | for (i = 0; i < nr_ioapics; i++) { | 4160 | for (i = 0; i < nr_ioapics; i++) { |
4142 | insert_resource(&iomem_resource, r); | 4161 | insert_resource(&iomem_resource, r); |
4143 | r++; | 4162 | r++; |
4144 | } | 4163 | } |
4164 | } | ||
4165 | |||
4166 | int mp_find_ioapic(int gsi) | ||
4167 | { | ||
4168 | int i = 0; | ||
4169 | |||
4170 | /* Find the IOAPIC that manages this GSI. */ | ||
4171 | for (i = 0; i < nr_ioapics; i++) { | ||
4172 | if ((gsi >= mp_gsi_routing[i].gsi_base) | ||
4173 | && (gsi <= mp_gsi_routing[i].gsi_end)) | ||
4174 | return i; | ||
4175 | } | ||
4145 | 4176 | ||
4177 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
4178 | return -1; | ||
4179 | } | ||
4180 | |||
4181 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
4182 | { | ||
4183 | if (WARN_ON(ioapic == -1)) | ||
4184 | return -1; | ||
4185 | if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) | ||
4186 | return -1; | ||
4187 | |||
4188 | return gsi - mp_gsi_routing[ioapic].gsi_base; | ||
4189 | } | ||
4190 | |||
4191 | static int bad_ioapic(unsigned long address) | ||
4192 | { | ||
4193 | if (nr_ioapics >= MAX_IO_APICS) { | ||
4194 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | ||
4195 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); | ||
4196 | return 1; | ||
4197 | } | ||
4198 | if (!address) { | ||
4199 | printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" | ||
4200 | " found in table, skipping!\n"); | ||
4201 | return 1; | ||
4202 | } | ||
4146 | return 0; | 4203 | return 0; |
4147 | } | 4204 | } |
4148 | 4205 | ||
4149 | /* Insert the IO APIC resources after PCI initialization has occured to handle | 4206 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) |
4150 | * IO APICS that are mapped in on a BAR in PCI space. */ | 4207 | { |
4151 | late_initcall(ioapic_insert_resources); | 4208 | int idx = 0; |
4209 | |||
4210 | if (bad_ioapic(address)) | ||
4211 | return; | ||
4212 | |||
4213 | idx = nr_ioapics; | ||
4214 | |||
4215 | mp_ioapics[idx].type = MP_IOAPIC; | ||
4216 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
4217 | mp_ioapics[idx].apicaddr = address; | ||
4218 | |||
4219 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
4220 | mp_ioapics[idx].apicid = io_apic_unique_id(id); | ||
4221 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
4222 | |||
4223 | /* | ||
4224 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
4225 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
4226 | */ | ||
4227 | mp_gsi_routing[idx].gsi_base = gsi_base; | ||
4228 | mp_gsi_routing[idx].gsi_end = gsi_base + | ||
4229 | io_apic_get_redir_entries(idx); | ||
4230 | |||
4231 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
4232 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
4233 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
4234 | mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); | ||
4235 | |||
4236 | nr_ioapics++; | ||
4237 | } | ||
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index dbf5445727a9..6ef00ba4c886 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) | |||
106 | unsigned long mask = cpumask_bits(cpumask)[0]; | 106 | unsigned long mask = cpumask_bits(cpumask)[0]; |
107 | unsigned long flags; | 107 | unsigned long flags; |
108 | 108 | ||
109 | if (WARN_ONCE(!mask, "empty IPI mask")) | ||
110 | return; | ||
111 | |||
109 | local_irq_save(flags); | 112 | local_irq_save(flags); |
110 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); | 113 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
111 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | 114 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 533e59c6fc82..ca96e68f0d23 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -493,7 +493,8 @@ static void numaq_setup_portio_remap(void) | |||
493 | (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); | 493 | (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); |
494 | } | 494 | } |
495 | 495 | ||
496 | struct apic apic_numaq = { | 496 | /* Use __refdata to keep false positive warning calm. */ |
497 | struct apic __refdata apic_numaq = { | ||
497 | 498 | ||
498 | .name = "NUMAQ", | 499 | .name = "NUMAQ", |
499 | .probe = probe_numaq, | 500 | .probe = probe_numaq, |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 8e4cbb255c38..a5371ec36776 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
17 | return x2apic_enabled(); | 17 | return x2apic_enabled(); |
18 | } | 18 | } |
19 | 19 | ||
20 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 20 | /* |
21 | 21 | * need to use more than cpu 0, because we need more vectors when | |
22 | * MSI-X are used. | ||
23 | */ | ||
22 | static const struct cpumask *x2apic_target_cpus(void) | 24 | static const struct cpumask *x2apic_target_cpus(void) |
23 | { | 25 | { |
24 | return cpumask_of(0); | 26 | return cpu_online_mask; |
25 | } | 27 | } |
26 | 28 | ||
27 | /* | 29 | /* |
@@ -170,7 +172,7 @@ static unsigned long set_apic_id(unsigned int id) | |||
170 | 172 | ||
171 | static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) | 173 | static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) |
172 | { | 174 | { |
173 | return current_cpu_data.initial_apicid >> index_msb; | 175 | return initial_apicid >> index_msb; |
174 | } | 176 | } |
175 | 177 | ||
176 | static void x2apic_send_IPI_self(int vector) | 178 | static void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index a284359627e7..a8989aadc99a 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
29 | 29 | ||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* |
31 | 31 | * need to use more than cpu 0, because we need more vectors when | |
32 | * MSI-X are used. | ||
33 | */ | ||
32 | static const struct cpumask *x2apic_target_cpus(void) | 34 | static const struct cpumask *x2apic_target_cpus(void) |
33 | { | 35 | { |
34 | return cpumask_of(0); | 36 | return cpu_online_mask; |
35 | } | 37 | } |
36 | 38 | ||
37 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | 39 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
@@ -162,7 +164,7 @@ static unsigned long set_apic_id(unsigned int id) | |||
162 | 164 | ||
163 | static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) | 165 | static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) |
164 | { | 166 | { |
165 | return current_cpu_data.initial_apicid >> index_msb; | 167 | return initial_apicid >> index_msb; |
166 | } | 168 | } |
167 | 169 | ||
168 | static void x2apic_send_IPI_self(int vector) | 170 | static void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 096d19aea2f7..601159374e87 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -46,7 +46,7 @@ static int early_get_nodeid(void) | |||
46 | return node_id.s.node_id; | 46 | return node_id.s.node_id; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 49 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
50 | { | 50 | { |
51 | if (!strcmp(oem_id, "SGI")) { | 51 | if (!strcmp(oem_id, "SGI")) { |
52 | if (!strcmp(oem_table_id, "UVL")) | 52 | if (!strcmp(oem_table_id, "UVL")) |
@@ -253,7 +253,7 @@ static void uv_send_IPI_self(int vector) | |||
253 | apic_write(APIC_SELF_IPI, vector); | 253 | apic_write(APIC_SELF_IPI, vector); |
254 | } | 254 | } |
255 | 255 | ||
256 | struct apic apic_x2apic_uv_x = { | 256 | struct apic __refdata apic_x2apic_uv_x = { |
257 | 257 | ||
258 | .name = "UV large system", | 258 | .name = "UV large system", |
259 | .probe = NULL, | 259 | .probe = NULL, |
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = { | |||
261 | .apic_id_registered = uv_apic_id_registered, | 261 | .apic_id_registered = uv_apic_id_registered, |
262 | 262 | ||
263 | .irq_delivery_mode = dest_Fixed, | 263 | .irq_delivery_mode = dest_Fixed, |
264 | .irq_dest_mode = 1, /* logical */ | 264 | .irq_dest_mode = 0, /* physical */ |
265 | 265 | ||
266 | .target_cpus = uv_target_cpus, | 266 | .target_cpus = uv_target_cpus, |
267 | .disable_esr = 0, | 267 | .disable_esr = 0, |
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | |||
362 | BUG(); | 362 | BUG(); |
363 | } | 363 | } |
364 | 364 | ||
365 | static __init void map_low_mmrs(void) | ||
366 | { | ||
367 | init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); | ||
368 | init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); | ||
369 | } | ||
370 | |||
371 | enum map_type {map_wb, map_uc}; | 365 | enum map_type {map_wb, map_uc}; |
372 | 366 | ||
373 | static __init void map_high(char *id, unsigned long base, int shift, | 367 | static __init void map_high(char *id, unsigned long base, int shift, |
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode) | |||
395 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 389 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); |
396 | } | 390 | } |
397 | 391 | ||
398 | static __init void map_config_high(int max_pnode) | ||
399 | { | ||
400 | union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; | ||
401 | int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
402 | |||
403 | cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); | ||
404 | if (cfg.s.enable) | ||
405 | map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); | ||
406 | } | ||
407 | |||
408 | static __init void map_mmr_high(int max_pnode) | ||
409 | { | ||
410 | union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; | ||
411 | int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
412 | |||
413 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | ||
414 | if (mmr.s.enable) | ||
415 | map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | ||
416 | } | ||
417 | |||
418 | static __init void map_mmioh_high(int max_pnode) | 392 | static __init void map_mmioh_high(int max_pnode) |
419 | { | 393 | { |
420 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; | 394 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; |
@@ -566,8 +540,6 @@ void __init uv_system_init(void) | |||
566 | unsigned long mmr_base, present, paddr; | 540 | unsigned long mmr_base, present, paddr; |
567 | unsigned short pnode_mask; | 541 | unsigned short pnode_mask; |
568 | 542 | ||
569 | map_low_mmrs(); | ||
570 | |||
571 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 543 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
572 | m_val = m_n_config.s.m_skt; | 544 | m_val = m_n_config.s.m_skt; |
573 | n_val = m_n_config.s.n_skt; | 545 | n_val = m_n_config.s.n_skt; |
@@ -591,6 +563,8 @@ void __init uv_system_init(void) | |||
591 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 563 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
592 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); | 564 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
593 | BUG_ON(!uv_blade_info); | 565 | BUG_ON(!uv_blade_info); |
566 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | ||
567 | uv_blade_info[blade].memory_nid = -1; | ||
594 | 568 | ||
595 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 569 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
596 | 570 | ||
@@ -629,6 +603,9 @@ void __init uv_system_init(void) | |||
629 | lcpu = uv_blade_info[blade].nr_possible_cpus; | 603 | lcpu = uv_blade_info[blade].nr_possible_cpus; |
630 | uv_blade_info[blade].nr_possible_cpus++; | 604 | uv_blade_info[blade].nr_possible_cpus++; |
631 | 605 | ||
606 | /* Any node on the blade, else will contain -1. */ | ||
607 | uv_blade_info[blade].memory_nid = nid; | ||
608 | |||
632 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; | 609 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; |
633 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; | 610 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; |
634 | uv_cpu_hub_info(cpu)->m_val = m_val; | 611 | uv_cpu_hub_info(cpu)->m_val = m_val; |
@@ -662,11 +639,10 @@ void __init uv_system_init(void) | |||
662 | pnode = (paddr >> m_val) & pnode_mask; | 639 | pnode = (paddr >> m_val) & pnode_mask; |
663 | blade = boot_pnode_to_blade(pnode); | 640 | blade = boot_pnode_to_blade(pnode); |
664 | uv_node_to_blade[nid] = blade; | 641 | uv_node_to_blade[nid] = blade; |
642 | max_pnode = max(pnode, max_pnode); | ||
665 | } | 643 | } |
666 | 644 | ||
667 | map_gru_high(max_pnode); | 645 | map_gru_high(max_pnode); |
668 | map_mmr_high(max_pnode); | ||
669 | map_config_high(max_pnode); | ||
670 | map_mmioh_high(max_pnode); | 646 | map_mmioh_high(max_pnode); |
671 | 647 | ||
672 | uv_cpu_init(); | 648 | uv_cpu_init(); |