diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-07-13 16:23:51 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-07-13 16:23:51 -0400 |
commit | 327309e899662b482c58cf25f574513d38b5788c (patch) | |
tree | 069de438aa0e92dd9b6ba28e6b207e2cd07151a5 /arch/ia64 | |
parent | 0c168775709faa74c1b87f1e61046e0c51ade7f3 (diff) | |
parent | c32511e2718618f0b53479eb36e07439aa363a74 (diff) |
Merge upstream 2.6.13-rc3 into ieee80211 branch of netdev-2.6.
Diffstat (limited to 'arch/ia64')
35 files changed, 684 insertions, 1200 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 01b78e7f992e..2e08942339ad 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -423,6 +423,8 @@ endmenu | |||
423 | 423 | ||
424 | endif | 424 | endif |
425 | 425 | ||
426 | source "net/Kconfig" | ||
427 | |||
426 | source "drivers/Kconfig" | 428 | source "drivers/Kconfig" |
427 | 429 | ||
428 | source "fs/Kconfig" | 430 | source "fs/Kconfig" |
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index ae84a1018a89..0639ec0ed015 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -191,7 +191,7 @@ simeth_probe1(void) | |||
191 | unsigned char mac_addr[ETH_ALEN]; | 191 | unsigned char mac_addr[ETH_ALEN]; |
192 | struct simeth_local *local; | 192 | struct simeth_local *local; |
193 | struct net_device *dev; | 193 | struct net_device *dev; |
194 | int fd, i, err; | 194 | int fd, i, err, rc; |
195 | 195 | ||
196 | /* | 196 | /* |
197 | * XXX Fix me | 197 | * XXX Fix me |
@@ -228,7 +228,9 @@ simeth_probe1(void) | |||
228 | return err; | 228 | return err; |
229 | } | 229 | } |
230 | 230 | ||
231 | dev->irq = assign_irq_vector(AUTO_ASSIGN); | 231 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) |
232 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
233 | dev->irq = rc; | ||
232 | 234 | ||
233 | /* | 235 | /* |
234 | * attach the interrupt in the simulator, this does enable interrupts | 236 | * attach the interrupt in the simulator, this does enable interrupts |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 7a8ae0f4b387..7dcb8582ae0d 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -982,7 +982,7 @@ static struct tty_operations hp_ops = { | |||
982 | static int __init | 982 | static int __init |
983 | simrs_init (void) | 983 | simrs_init (void) |
984 | { | 984 | { |
985 | int i; | 985 | int i, rc; |
986 | struct serial_state *state; | 986 | struct serial_state *state; |
987 | 987 | ||
988 | if (!ia64_platform_is("hpsim")) | 988 | if (!ia64_platform_is("hpsim")) |
@@ -1017,7 +1017,10 @@ simrs_init (void) | |||
1017 | if (state->type == PORT_UNKNOWN) continue; | 1017 | if (state->type == PORT_UNKNOWN) continue; |
1018 | 1018 | ||
1019 | if (!state->irq) { | 1019 | if (!state->irq) { |
1020 | state->irq = assign_irq_vector(AUTO_ASSIGN); | 1020 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) |
1021 | panic("%s: out of interrupt vectors!\n", | ||
1022 | __FUNCTION__); | ||
1023 | state->irq = rc; | ||
1021 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); | 1024 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); |
1022 | } | 1025 | } |
1023 | 1026 | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b2e2f6509eb0..e1fb68ddec26 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o | |||
17 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
18 | obj-$(CONFIG_MODULES) += module.o | 18 | obj-$(CONFIG_MODULES) += module.o |
19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o | 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o |
20 | obj-$(CONFIG_NUMA) += numa.o | ||
20 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o | 21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o |
21 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | 22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o |
22 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 23 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index cda06f88c66e..9609f243e5d0 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> | 11 | * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> |
12 | * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> | 12 | * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> |
13 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> | 13 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> |
14 | * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> | ||
14 | * | 15 | * |
15 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 16 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
16 | * | 17 | * |
@@ -67,6 +68,11 @@ EXPORT_SYMBOL(pm_power_off); | |||
67 | unsigned char acpi_kbd_controller_present = 1; | 68 | unsigned char acpi_kbd_controller_present = 1; |
68 | unsigned char acpi_legacy_devices; | 69 | unsigned char acpi_legacy_devices; |
69 | 70 | ||
71 | static unsigned int __initdata acpi_madt_rev; | ||
72 | |||
73 | unsigned int acpi_cpei_override; | ||
74 | unsigned int acpi_cpei_phys_cpuid; | ||
75 | |||
70 | #define MAX_SAPICS 256 | 76 | #define MAX_SAPICS 256 |
71 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = | 77 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = |
72 | { [0 ... MAX_SAPICS - 1] = -1 }; | 78 | { [0 ... MAX_SAPICS - 1] = -1 }; |
@@ -265,10 +271,56 @@ acpi_parse_plat_int_src ( | |||
265 | (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 271 | (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); |
266 | 272 | ||
267 | platform_intr_list[plintsrc->type] = vector; | 273 | platform_intr_list[plintsrc->type] = vector; |
274 | if (acpi_madt_rev > 1) { | ||
275 | acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Save the physical id, so we can check when its being removed | ||
280 | */ | ||
281 | acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; | ||
282 | |||
268 | return 0; | 283 | return 0; |
269 | } | 284 | } |
270 | 285 | ||
271 | 286 | ||
287 | unsigned int can_cpei_retarget(void) | ||
288 | { | ||
289 | extern int cpe_vector; | ||
290 | |||
291 | /* | ||
292 | * Only if CPEI is supported and the override flag | ||
293 | * is present, otherwise return that its re-targettable | ||
294 | * if we are in polling mode. | ||
295 | */ | ||
296 | if (cpe_vector > 0 && !acpi_cpei_override) | ||
297 | return 0; | ||
298 | else | ||
299 | return 1; | ||
300 | } | ||
301 | |||
302 | unsigned int is_cpu_cpei_target(unsigned int cpu) | ||
303 | { | ||
304 | unsigned int logical_id; | ||
305 | |||
306 | logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); | ||
307 | |||
308 | if (logical_id == cpu) | ||
309 | return 1; | ||
310 | else | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | void set_cpei_target_cpu(unsigned int cpu) | ||
315 | { | ||
316 | acpi_cpei_phys_cpuid = cpu_physical_id(cpu); | ||
317 | } | ||
318 | |||
319 | unsigned int get_cpei_target_cpu(void) | ||
320 | { | ||
321 | return acpi_cpei_phys_cpuid; | ||
322 | } | ||
323 | |||
272 | static int __init | 324 | static int __init |
273 | acpi_parse_int_src_ovr ( | 325 | acpi_parse_int_src_ovr ( |
274 | acpi_table_entry_header *header, const unsigned long end) | 326 | acpi_table_entry_header *header, const unsigned long end) |
@@ -326,6 +378,8 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) | |||
326 | 378 | ||
327 | acpi_madt = (struct acpi_table_madt *) __va(phys_addr); | 379 | acpi_madt = (struct acpi_table_madt *) __va(phys_addr); |
328 | 380 | ||
381 | acpi_madt_rev = acpi_madt->header.revision; | ||
382 | |||
329 | /* remember the value for reference after free_initmem() */ | 383 | /* remember the value for reference after free_initmem() */ |
330 | #ifdef CONFIG_ITANIUM | 384 | #ifdef CONFIG_ITANIUM |
331 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ | 385 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ |
@@ -640,9 +694,11 @@ acpi_boot_init (void) | |||
640 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) | 694 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) |
641 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; | 695 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; |
642 | } | 696 | } |
643 | build_cpu_to_node_map(); | ||
644 | # endif | 697 | # endif |
645 | #endif | 698 | #endif |
699 | #ifdef CONFIG_ACPI_NUMA | ||
700 | build_cpu_to_node_map(); | ||
701 | #endif | ||
646 | /* Make boot-up look pretty */ | 702 | /* Make boot-up look pretty */ |
647 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); | 703 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); |
648 | return 0; | 704 | return 0; |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 69f88d561d62..bb9a506deb78 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1249,7 +1249,7 @@ ENTRY(sys_rt_sigreturn) | |||
1249 | stf.spill [r17]=f11 | 1249 | stf.spill [r17]=f11 |
1250 | adds out0=16,sp // out0 = &sigscratch | 1250 | adds out0=16,sp // out0 = &sigscratch |
1251 | br.call.sptk.many rp=ia64_rt_sigreturn | 1251 | br.call.sptk.many rp=ia64_rt_sigreturn |
1252 | .ret19: .restore sp 0 | 1252 | .ret19: .restore sp,0 |
1253 | adds sp=16,sp | 1253 | adds sp=16,sp |
1254 | ;; | 1254 | ;; |
1255 | ld8 r9=[sp] // load new ar.unat | 1255 | ld8 r9=[sp] // load new ar.unat |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c170be095ccd..7936b62f7a2e 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -489,8 +489,6 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po | |||
489 | } | 489 | } |
490 | } | 490 | } |
491 | } | 491 | } |
492 | if (vector < 0) | ||
493 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
494 | 492 | ||
495 | return vector; | 493 | return vector; |
496 | } | 494 | } |
@@ -506,6 +504,8 @@ iosapic_reassign_vector (int vector) | |||
506 | 504 | ||
507 | if (!list_empty(&iosapic_intr_info[vector].rtes)) { | 505 | if (!list_empty(&iosapic_intr_info[vector].rtes)) { |
508 | new_vector = assign_irq_vector(AUTO_ASSIGN); | 506 | new_vector = assign_irq_vector(AUTO_ASSIGN); |
507 | if (new_vector < 0) | ||
508 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
509 | printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); | 509 | printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); |
510 | memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], | 510 | memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], |
511 | sizeof(struct iosapic_intr_info)); | 511 | sizeof(struct iosapic_intr_info)); |
@@ -734,9 +734,12 @@ again: | |||
734 | spin_unlock_irqrestore(&iosapic_lock, flags); | 734 | spin_unlock_irqrestore(&iosapic_lock, flags); |
735 | 735 | ||
736 | /* If vector is running out, we try to find a sharable vector */ | 736 | /* If vector is running out, we try to find a sharable vector */ |
737 | vector = assign_irq_vector_nopanic(AUTO_ASSIGN); | 737 | vector = assign_irq_vector(AUTO_ASSIGN); |
738 | if (vector < 0) | 738 | if (vector < 0) { |
739 | vector = iosapic_find_sharable_vector(trigger, polarity); | 739 | vector = iosapic_find_sharable_vector(trigger, polarity); |
740 | if (vector < 0) | ||
741 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
742 | } | ||
740 | 743 | ||
741 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); | 744 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); |
742 | spin_lock(&iosapic_lock); | 745 | spin_lock(&iosapic_lock); |
@@ -884,6 +887,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, | |||
884 | break; | 887 | break; |
885 | case ACPI_INTERRUPT_INIT: | 888 | case ACPI_INTERRUPT_INIT: |
886 | vector = assign_irq_vector(AUTO_ASSIGN); | 889 | vector = assign_irq_vector(AUTO_ASSIGN); |
890 | if (vector < 0) | ||
891 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
887 | delivery = IOSAPIC_INIT; | 892 | delivery = IOSAPIC_INIT; |
888 | break; | 893 | break; |
889 | case ACPI_INTERRUPT_CPEI: | 894 | case ACPI_INTERRUPT_CPEI: |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 4fe60c7a2e90..6c4d59fd0364 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -63,30 +63,19 @@ EXPORT_SYMBOL(isa_irq_to_vector_map); | |||
63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; | 63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; |
64 | 64 | ||
65 | int | 65 | int |
66 | assign_irq_vector_nopanic (int irq) | 66 | assign_irq_vector (int irq) |
67 | { | 67 | { |
68 | int pos, vector; | 68 | int pos, vector; |
69 | again: | 69 | again: |
70 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); | 70 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); |
71 | vector = IA64_FIRST_DEVICE_VECTOR + pos; | 71 | vector = IA64_FIRST_DEVICE_VECTOR + pos; |
72 | if (vector > IA64_LAST_DEVICE_VECTOR) | 72 | if (vector > IA64_LAST_DEVICE_VECTOR) |
73 | return -1; | 73 | return -ENOSPC; |
74 | if (test_and_set_bit(pos, ia64_vector_mask)) | 74 | if (test_and_set_bit(pos, ia64_vector_mask)) |
75 | goto again; | 75 | goto again; |
76 | return vector; | 76 | return vector; |
77 | } | 77 | } |
78 | 78 | ||
79 | int | ||
80 | assign_irq_vector (int irq) | ||
81 | { | ||
82 | int vector = assign_irq_vector_nopanic(irq); | ||
83 | |||
84 | if (vector < 0) | ||
85 | panic("assign_irq_vector: out of interrupt vectors!"); | ||
86 | |||
87 | return vector; | ||
88 | } | ||
89 | |||
90 | void | 79 | void |
91 | free_irq_vector (int vector) | 80 | free_irq_vector (int vector) |
92 | { | 81 | { |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 3aa3167edbec..884f5cd27d8a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -713,7 +713,7 @@ static struct kprobe trampoline_p = { | |||
713 | .pre_handler = trampoline_probe_handler | 713 | .pre_handler = trampoline_probe_handler |
714 | }; | 714 | }; |
715 | 715 | ||
716 | int __init arch_init(void) | 716 | int __init arch_init_kprobes(void) |
717 | { | 717 | { |
718 | trampoline_p.addr = | 718 | trampoline_p.addr = |
719 | (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; | 719 | (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 736e328b5e61..4ebbf3974381 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -271,7 +271,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
271 | 271 | ||
272 | #ifdef CONFIG_ACPI | 272 | #ifdef CONFIG_ACPI |
273 | 273 | ||
274 | static int cpe_vector = -1; | 274 | int cpe_vector = -1; |
275 | 275 | ||
276 | static irqreturn_t | 276 | static irqreturn_t |
277 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | 277 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) |
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c new file mode 100644 index 000000000000..a68ce6678092 --- /dev/null +++ b/arch/ia64/kernel/numa.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * ia64 kernel NUMA specific stuff | ||
17 | * | ||
18 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> | ||
19 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
20 | * Jesse Barnes <jbarnes@sgi.com> | ||
21 | */ | ||
22 | #include <linux/config.h> | ||
23 | #include <linux/topology.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/smp.h> | ||
27 | |||
28 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
29 | EXPORT_SYMBOL(cpu_to_node_map); | ||
30 | |||
31 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
32 | |||
33 | /** | ||
34 | * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays | ||
35 | * | ||
36 | * Build cpu to node mapping and initialize the per node cpu masks using | ||
37 | * info from the node_cpuid array handed to us by ACPI. | ||
38 | */ | ||
39 | void __init build_cpu_to_node_map(void) | ||
40 | { | ||
41 | int cpu, i, node; | ||
42 | |||
43 | for(node=0; node < MAX_NUMNODES; node++) | ||
44 | cpus_clear(node_to_cpu_mask[node]); | ||
45 | |||
46 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
47 | node = -1; | ||
48 | for (i = 0; i < NR_CPUS; ++i) | ||
49 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
50 | node = node_cpuid[i].nid; | ||
51 | break; | ||
52 | } | ||
53 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
54 | if (node >= 0) | ||
55 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
56 | } | ||
57 | } | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6407bff6bfd7..b8ebb8e427ef 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/vfs.h> | 37 | #include <linux/vfs.h> |
38 | #include <linux/pagemap.h> | 38 | #include <linux/pagemap.h> |
39 | #include <linux/mount.h> | 39 | #include <linux/mount.h> |
40 | #include <linux/version.h> | ||
41 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
42 | 41 | ||
43 | #include <asm/errno.h> | 42 | #include <asm/errno.h> |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6e35bff05d59..e484910246ad 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -196,6 +196,7 @@ update_pal_halt_status(int status) | |||
196 | void | 196 | void |
197 | default_idle (void) | 197 | default_idle (void) |
198 | { | 198 | { |
199 | local_irq_enable(); | ||
199 | while (!need_resched()) | 200 | while (!need_resched()) |
200 | if (can_do_pal_halt) | 201 | if (can_do_pal_halt) |
201 | safe_halt(); | 202 | safe_halt(); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2693e1522d7c..5c7c95737bbf 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <linux/serial_core.h> | 40 | #include <linux/serial_core.h> |
41 | #include <linux/efi.h> | 41 | #include <linux/efi.h> |
42 | #include <linux/initrd.h> | 42 | #include <linux/initrd.h> |
43 | #include <linux/platform.h> | ||
44 | #include <linux/pm.h> | ||
43 | 45 | ||
44 | #include <asm/ia32.h> | 46 | #include <asm/ia32.h> |
45 | #include <asm/machvec.h> | 47 | #include <asm/machvec.h> |
@@ -783,6 +785,7 @@ cpu_init (void) | |||
783 | /* size of physical stacked register partition plus 8 bytes: */ | 785 | /* size of physical stacked register partition plus 8 bytes: */ |
784 | __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; | 786 | __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; |
785 | platform_cpu_init(); | 787 | platform_cpu_init(); |
788 | pm_idle = default_idle; | ||
786 | } | 789 | } |
787 | 790 | ||
788 | void | 791 | void |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index edd9f07860b2..b8a0a7d257a9 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -143,6 +143,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | |||
143 | 143 | ||
144 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); | 144 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); |
145 | psr->mfh = 0; /* drop signal handler's fph contents... */ | 145 | psr->mfh = 0; /* drop signal handler's fph contents... */ |
146 | preempt_disable(); | ||
146 | if (psr->dfh) | 147 | if (psr->dfh) |
147 | ia64_drop_fpu(current); | 148 | ia64_drop_fpu(current); |
148 | else { | 149 | else { |
@@ -150,6 +151,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | |||
150 | __ia64_load_fpu(current->thread.fph); | 151 | __ia64_load_fpu(current->thread.fph); |
151 | ia64_set_local_fpu_owner(current); | 152 | ia64_set_local_fpu_owner(current); |
152 | } | 153 | } |
154 | preempt_enable(); | ||
153 | } | 155 | } |
154 | return err; | 156 | return err; |
155 | } | 157 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 623b0a546709..7d72c0d872b3 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -525,47 +525,6 @@ smp_build_cpu_map (void) | |||
525 | } | 525 | } |
526 | } | 526 | } |
527 | 527 | ||
528 | #ifdef CONFIG_NUMA | ||
529 | |||
530 | /* on which node is each logical CPU (one cacheline even for 64 CPUs) */ | ||
531 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
532 | EXPORT_SYMBOL(cpu_to_node_map); | ||
533 | /* which logical CPUs are on which nodes */ | ||
534 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
535 | |||
536 | /* | ||
537 | * Build cpu to node mapping and initialize the per node cpu masks. | ||
538 | */ | ||
539 | void __init | ||
540 | build_cpu_to_node_map (void) | ||
541 | { | ||
542 | int cpu, i, node; | ||
543 | |||
544 | for(node=0; node<MAX_NUMNODES; node++) | ||
545 | cpus_clear(node_to_cpu_mask[node]); | ||
546 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
547 | /* | ||
548 | * All Itanium NUMA platforms I know use ACPI, so maybe we | ||
549 | * can drop this ifdef completely. [EF] | ||
550 | */ | ||
551 | #ifdef CONFIG_ACPI_NUMA | ||
552 | node = -1; | ||
553 | for (i = 0; i < NR_CPUS; ++i) | ||
554 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
555 | node = node_cpuid[i].nid; | ||
556 | break; | ||
557 | } | ||
558 | #else | ||
559 | # error Fixme: Dunno how to build CPU-to-node map. | ||
560 | #endif | ||
561 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
562 | if (node >= 0) | ||
563 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | #endif /* CONFIG_NUMA */ | ||
568 | |||
569 | /* | 528 | /* |
570 | * Cycle through the APs sending Wakeup IPIs to boot each. | 529 | * Cycle through the APs sending Wakeup IPIs to boot each. |
571 | */ | 530 | */ |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index f1aafd4c05f9..d8030f3bd865 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -36,6 +36,13 @@ int arch_register_cpu(int num) | |||
36 | parent = &sysfs_nodes[cpu_to_node(num)]; | 36 | parent = &sysfs_nodes[cpu_to_node(num)]; |
37 | #endif /* CONFIG_NUMA */ | 37 | #endif /* CONFIG_NUMA */ |
38 | 38 | ||
39 | /* | ||
40 | * If CPEI cannot be re-targetted, and this is | ||
41 | * CPEI target, then dont create the control file | ||
42 | */ | ||
43 | if (!can_cpei_retarget() && is_cpu_cpei_target(num)) | ||
44 | sysfs_cpus[num].cpu.no_control = 1; | ||
45 | |||
39 | return register_cpu(&sysfs_cpus[num].cpu, num, parent); | 46 | return register_cpu(&sysfs_cpus[num].cpu, num, parent); |
40 | } | 47 | } |
41 | 48 | ||
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e7e520d90f03..4440c8343fa4 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -90,14 +90,16 @@ die (const char *str, struct pt_regs *regs, long err) | |||
90 | .lock_owner_depth = 0 | 90 | .lock_owner_depth = 0 |
91 | }; | 91 | }; |
92 | static int die_counter; | 92 | static int die_counter; |
93 | int cpu = get_cpu(); | ||
93 | 94 | ||
94 | if (die.lock_owner != smp_processor_id()) { | 95 | if (die.lock_owner != cpu) { |
95 | console_verbose(); | 96 | console_verbose(); |
96 | spin_lock_irq(&die.lock); | 97 | spin_lock_irq(&die.lock); |
97 | die.lock_owner = smp_processor_id(); | 98 | die.lock_owner = cpu; |
98 | die.lock_owner_depth = 0; | 99 | die.lock_owner_depth = 0; |
99 | bust_spinlocks(1); | 100 | bust_spinlocks(1); |
100 | } | 101 | } |
102 | put_cpu(); | ||
101 | 103 | ||
102 | if (++die.lock_owner_depth < 3) { | 104 | if (++die.lock_owner_depth < 3) { |
103 | printk("%s[%d]: %s %ld [%d]\n", | 105 | printk("%s[%d]: %s %ld [%d]\n", |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index f3fd528ead3b..b5c90e548195 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -44,150 +44,7 @@ struct early_node_data { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | 46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; |
47 | 47 | static nodemask_t memory_less_mask __initdata; | |
48 | /** | ||
49 | * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node | ||
50 | * | ||
51 | * This function will move nodes with only CPUs (no memory) | ||
52 | * to a node with memory which is at the minimum numa_slit distance. | ||
53 | * Any reassigments will result in the compression of the nodes | ||
54 | * and renumbering the nid values where appropriate. | ||
55 | * The static declarations below are to avoid large stack size which | ||
56 | * makes the code not re-entrant. | ||
57 | */ | ||
58 | static void __init reassign_cpu_only_nodes(void) | ||
59 | { | ||
60 | struct node_memblk_s *p; | ||
61 | int i, j, k, nnode, nid, cpu, cpunid, pxm; | ||
62 | u8 cslit, slit; | ||
63 | static DECLARE_BITMAP(nodes_with_mem, MAX_NUMNODES) __initdata; | ||
64 | static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata; | ||
65 | static int node_flip[MAX_NUMNODES] __initdata; | ||
66 | static int old_nid_map[NR_CPUS] __initdata; | ||
67 | |||
68 | for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) | ||
69 | if (!test_bit(p->nid, (void *) nodes_with_mem)) { | ||
70 | set_bit(p->nid, (void *) nodes_with_mem); | ||
71 | nnode++; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * All nids with memory. | ||
76 | */ | ||
77 | if (nnode == num_online_nodes()) | ||
78 | return; | ||
79 | |||
80 | /* | ||
81 | * Change nids and attempt to migrate CPU-only nodes | ||
82 | * to the best numa_slit (closest neighbor) possible. | ||
83 | * For reassigned CPU nodes a nid can't be arrived at | ||
84 | * until after this loop because the target nid's new | ||
85 | * identity might not have been established yet. So | ||
86 | * new nid values are fabricated above num_online_nodes() and | ||
87 | * mapped back later to their true value. | ||
88 | */ | ||
89 | /* MCD - This code is a bit complicated, but may be unnecessary now. | ||
90 | * We can now handle much more interesting node-numbering. | ||
91 | * The old requirement that 0 <= nid <= numnodes <= MAX_NUMNODES | ||
92 | * and that there be no holes in the numbering 0..numnodes | ||
93 | * has become simply 0 <= nid <= MAX_NUMNODES. | ||
94 | */ | ||
95 | nid = 0; | ||
96 | for_each_online_node(i) { | ||
97 | if (test_bit(i, (void *) nodes_with_mem)) { | ||
98 | /* | ||
99 | * Save original nid value for numa_slit | ||
100 | * fixup and node_cpuid reassignments. | ||
101 | */ | ||
102 | node_flip[nid] = i; | ||
103 | |||
104 | if (i == nid) { | ||
105 | nid++; | ||
106 | continue; | ||
107 | } | ||
108 | |||
109 | for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) | ||
110 | if (p->nid == i) | ||
111 | p->nid = nid; | ||
112 | |||
113 | cpunid = nid; | ||
114 | nid++; | ||
115 | } else | ||
116 | cpunid = MAX_NUMNODES; | ||
117 | |||
118 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
119 | if (node_cpuid[cpu].nid == i) { | ||
120 | /* | ||
121 | * For nodes not being reassigned just | ||
122 | * fix the cpu's nid and reverse pxm map | ||
123 | */ | ||
124 | if (cpunid < MAX_NUMNODES) { | ||
125 | pxm = nid_to_pxm_map[i]; | ||
126 | pxm_to_nid_map[pxm] = | ||
127 | node_cpuid[cpu].nid = cpunid; | ||
128 | continue; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * For nodes being reassigned, find best node by | ||
133 | * numa_slit information and then make a temporary | ||
134 | * nid value based on current nid and num_online_nodes(). | ||
135 | */ | ||
136 | slit = 0xff; | ||
137 | k = 2*num_online_nodes(); | ||
138 | for_each_online_node(j) { | ||
139 | if (i == j) | ||
140 | continue; | ||
141 | else if (test_bit(j, (void *) nodes_with_mem)) { | ||
142 | cslit = numa_slit[i * num_online_nodes() + j]; | ||
143 | if (cslit < slit) { | ||
144 | k = num_online_nodes() + j; | ||
145 | slit = cslit; | ||
146 | } | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* save old nid map so we can update the pxm */ | ||
151 | old_nid_map[cpu] = node_cpuid[cpu].nid; | ||
152 | node_cpuid[cpu].nid = k; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Fixup temporary nid values for CPU-only nodes. | ||
158 | */ | ||
159 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
160 | if (node_cpuid[cpu].nid == (2*num_online_nodes())) { | ||
161 | pxm = nid_to_pxm_map[old_nid_map[cpu]]; | ||
162 | pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1; | ||
163 | } else { | ||
164 | for (i = 0; i < nnode; i++) { | ||
165 | if (node_flip[i] != (node_cpuid[cpu].nid - num_online_nodes())) | ||
166 | continue; | ||
167 | |||
168 | pxm = nid_to_pxm_map[old_nid_map[cpu]]; | ||
169 | pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i; | ||
170 | break; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Fix numa_slit by compressing from larger | ||
176 | * nid array to reduced nid array. | ||
177 | */ | ||
178 | for (i = 0; i < nnode; i++) | ||
179 | for (j = 0; j < nnode; j++) | ||
180 | numa_slit_fix[i * nnode + j] = | ||
181 | numa_slit[node_flip[i] * num_online_nodes() + node_flip[j]]; | ||
182 | |||
183 | memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit)); | ||
184 | |||
185 | nodes_clear(node_online_map); | ||
186 | for (i = 0; i < nnode; i++) | ||
187 | node_set_online(i); | ||
188 | |||
189 | return; | ||
190 | } | ||
191 | 48 | ||
192 | /* | 49 | /* |
193 | * To prevent cache aliasing effects, align per-node structures so that they | 50 | * To prevent cache aliasing effects, align per-node structures so that they |
@@ -233,44 +90,101 @@ static int __init build_node_maps(unsigned long start, unsigned long len, | |||
233 | } | 90 | } |
234 | 91 | ||
235 | /** | 92 | /** |
236 | * early_nr_phys_cpus_node - return number of physical cpus on a given node | 93 | * early_nr_cpus_node - return number of cpus on a given node |
237 | * @node: node to check | 94 | * @node: node to check |
238 | * | 95 | * |
239 | * Count the number of physical cpus on @node. These are cpus that actually | 96 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because |
240 | * exist. We can't use nr_cpus_node() yet because | ||
241 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 97 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
242 | * called yet. | 98 | * called yet. Note that node 0 will also count all non-existent cpus. |
243 | */ | 99 | */ |
244 | static int early_nr_phys_cpus_node(int node) | 100 | static int __init early_nr_cpus_node(int node) |
245 | { | 101 | { |
246 | int cpu, n = 0; | 102 | int cpu, n = 0; |
247 | 103 | ||
248 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 104 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
249 | if (node == node_cpuid[cpu].nid) | 105 | if (node == node_cpuid[cpu].nid) |
250 | if ((cpu == 0) || node_cpuid[cpu].phys_id) | 106 | n++; |
251 | n++; | ||
252 | 107 | ||
253 | return n; | 108 | return n; |
254 | } | 109 | } |
255 | 110 | ||
111 | /** | ||
112 | * compute_pernodesize - compute size of pernode data | ||
113 | * @node: the node id. | ||
114 | */ | ||
115 | static unsigned long __init compute_pernodesize(int node) | ||
116 | { | ||
117 | unsigned long pernodesize = 0, cpus; | ||
118 | |||
119 | cpus = early_nr_cpus_node(node); | ||
120 | pernodesize += PERCPU_PAGE_SIZE * cpus; | ||
121 | pernodesize += node * L1_CACHE_BYTES; | ||
122 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
123 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
124 | pernodesize = PAGE_ALIGN(pernodesize); | ||
125 | return pernodesize; | ||
126 | } | ||
256 | 127 | ||
257 | /** | 128 | /** |
258 | * early_nr_cpus_node - return number of cpus on a given node | 129 | * per_cpu_node_setup - setup per-cpu areas on each node |
259 | * @node: node to check | 130 | * @cpu_data: per-cpu area on this node |
131 | * @node: node to setup | ||
260 | * | 132 | * |
261 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because | 133 | * Copy the static per-cpu data into the region we just set aside and then |
262 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 134 | * setup __per_cpu_offset for each CPU on this node. Return a pointer to |
263 | * called yet. Note that node 0 will also count all non-existent cpus. | 135 | * the end of the area. |
264 | */ | 136 | */ |
265 | static int early_nr_cpus_node(int node) | 137 | static void *per_cpu_node_setup(void *cpu_data, int node) |
266 | { | 138 | { |
267 | int cpu, n = 0; | 139 | #ifdef CONFIG_SMP |
140 | int cpu; | ||
268 | 141 | ||
269 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 142 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
270 | if (node == node_cpuid[cpu].nid) | 143 | if (node == node_cpuid[cpu].nid) { |
271 | n++; | 144 | memcpy(__va(cpu_data), __phys_per_cpu_start, |
145 | __per_cpu_end - __per_cpu_start); | ||
146 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
147 | __per_cpu_start; | ||
148 | cpu_data += PERCPU_PAGE_SIZE; | ||
149 | } | ||
150 | } | ||
151 | #endif | ||
152 | return cpu_data; | ||
153 | } | ||
272 | 154 | ||
273 | return n; | 155 | /** |
156 | * fill_pernode - initialize pernode data. | ||
157 | * @node: the node id. | ||
158 | * @pernode: physical address of pernode data | ||
159 | * @pernodesize: size of the pernode data | ||
160 | */ | ||
161 | static void __init fill_pernode(int node, unsigned long pernode, | ||
162 | unsigned long pernodesize) | ||
163 | { | ||
164 | void *cpu_data; | ||
165 | int cpus = early_nr_cpus_node(node); | ||
166 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | ||
167 | |||
168 | mem_data[node].pernode_addr = pernode; | ||
169 | mem_data[node].pernode_size = pernodesize; | ||
170 | memset(__va(pernode), 0, pernodesize); | ||
171 | |||
172 | cpu_data = (void *)pernode; | ||
173 | pernode += PERCPU_PAGE_SIZE * cpus; | ||
174 | pernode += node * L1_CACHE_BYTES; | ||
175 | |||
176 | mem_data[node].pgdat = __va(pernode); | ||
177 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
178 | |||
179 | mem_data[node].node_data = __va(pernode); | ||
180 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
181 | |||
182 | mem_data[node].pgdat->bdata = bdp; | ||
183 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
184 | |||
185 | cpu_data = per_cpu_node_setup(cpu_data, node); | ||
186 | |||
187 | return; | ||
274 | } | 188 | } |
275 | 189 | ||
276 | /** | 190 | /** |
@@ -304,9 +218,8 @@ static int early_nr_cpus_node(int node) | |||
304 | static int __init find_pernode_space(unsigned long start, unsigned long len, | 218 | static int __init find_pernode_space(unsigned long start, unsigned long len, |
305 | int node) | 219 | int node) |
306 | { | 220 | { |
307 | unsigned long epfn, cpu, cpus, phys_cpus; | 221 | unsigned long epfn; |
308 | unsigned long pernodesize = 0, pernode, pages, mapsize; | 222 | unsigned long pernodesize = 0, pernode, pages, mapsize; |
309 | void *cpu_data; | ||
310 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | 223 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
311 | 224 | ||
312 | epfn = (start + len) >> PAGE_SHIFT; | 225 | epfn = (start + len) >> PAGE_SHIFT; |
@@ -329,49 +242,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, | |||
329 | * Calculate total size needed, incl. what's necessary | 242 | * Calculate total size needed, incl. what's necessary |
330 | * for good alignment and alias prevention. | 243 | * for good alignment and alias prevention. |
331 | */ | 244 | */ |
332 | cpus = early_nr_cpus_node(node); | 245 | pernodesize = compute_pernodesize(node); |
333 | phys_cpus = early_nr_phys_cpus_node(node); | ||
334 | pernodesize += PERCPU_PAGE_SIZE * cpus; | ||
335 | pernodesize += node * L1_CACHE_BYTES; | ||
336 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
337 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
338 | pernodesize = PAGE_ALIGN(pernodesize); | ||
339 | pernode = NODEDATA_ALIGN(start, node); | 246 | pernode = NODEDATA_ALIGN(start, node); |
340 | 247 | ||
341 | /* Is this range big enough for what we want to store here? */ | 248 | /* Is this range big enough for what we want to store here? */ |
342 | if (start + len > (pernode + pernodesize + mapsize)) { | 249 | if (start + len > (pernode + pernodesize + mapsize)) |
343 | mem_data[node].pernode_addr = pernode; | 250 | fill_pernode(node, pernode, pernodesize); |
344 | mem_data[node].pernode_size = pernodesize; | ||
345 | memset(__va(pernode), 0, pernodesize); | ||
346 | |||
347 | cpu_data = (void *)pernode; | ||
348 | pernode += PERCPU_PAGE_SIZE * cpus; | ||
349 | pernode += node * L1_CACHE_BYTES; | ||
350 | |||
351 | mem_data[node].pgdat = __va(pernode); | ||
352 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
353 | |||
354 | mem_data[node].node_data = __va(pernode); | ||
355 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
356 | |||
357 | mem_data[node].pgdat->bdata = bdp; | ||
358 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
359 | |||
360 | /* | ||
361 | * Copy the static per-cpu data into the region we | ||
362 | * just set aside and then setup __per_cpu_offset | ||
363 | * for each CPU on this node. | ||
364 | */ | ||
365 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
366 | if (node == node_cpuid[cpu].nid) { | ||
367 | memcpy(__va(cpu_data), __phys_per_cpu_start, | ||
368 | __per_cpu_end - __per_cpu_start); | ||
369 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
370 | __per_cpu_start; | ||
371 | cpu_data += PERCPU_PAGE_SIZE; | ||
372 | } | ||
373 | } | ||
374 | } | ||
375 | 251 | ||
376 | return 0; | 252 | return 0; |
377 | } | 253 | } |
@@ -411,6 +287,9 @@ static void __init reserve_pernode_space(void) | |||
411 | for_each_online_node(node) { | 287 | for_each_online_node(node) { |
412 | pg_data_t *pdp = mem_data[node].pgdat; | 288 | pg_data_t *pdp = mem_data[node].pgdat; |
413 | 289 | ||
290 | if (node_isset(node, memory_less_mask)) | ||
291 | continue; | ||
292 | |||
414 | bdp = pdp->bdata; | 293 | bdp = pdp->bdata; |
415 | 294 | ||
416 | /* First the bootmem_map itself */ | 295 | /* First the bootmem_map itself */ |
@@ -436,8 +315,8 @@ static void __init reserve_pernode_space(void) | |||
436 | */ | 315 | */ |
437 | static void __init initialize_pernode_data(void) | 316 | static void __init initialize_pernode_data(void) |
438 | { | 317 | { |
439 | int cpu, node; | ||
440 | pg_data_t *pgdat_list[MAX_NUMNODES]; | 318 | pg_data_t *pgdat_list[MAX_NUMNODES]; |
319 | int cpu, node; | ||
441 | 320 | ||
442 | for_each_online_node(node) | 321 | for_each_online_node(node) |
443 | pgdat_list[node] = mem_data[node].pgdat; | 322 | pgdat_list[node] = mem_data[node].pgdat; |
@@ -447,12 +326,99 @@ static void __init initialize_pernode_data(void) | |||
447 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | 326 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, |
448 | sizeof(pgdat_list)); | 327 | sizeof(pgdat_list)); |
449 | } | 328 | } |
450 | 329 | #ifdef CONFIG_SMP | |
451 | /* Set the node_data pointer for each per-cpu struct */ | 330 | /* Set the node_data pointer for each per-cpu struct */ |
452 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 331 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
453 | node = node_cpuid[cpu].nid; | 332 | node = node_cpuid[cpu].nid; |
454 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 333 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; |
455 | } | 334 | } |
335 | #else | ||
336 | { | ||
337 | struct cpuinfo_ia64 *cpu0_cpu_info; | ||
338 | cpu = 0; | ||
339 | node = node_cpuid[cpu].nid; | ||
340 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | ||
341 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | ||
342 | cpu0_cpu_info->node_data = mem_data[node].node_data; | ||
343 | } | ||
344 | #endif /* CONFIG_SMP */ | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit | ||
349 | * node but fall back to any other node when __alloc_bootmem_node fails | ||
350 | * for best. | ||
351 | * @nid: node id | ||
352 | * @pernodesize: size of this node's pernode data | ||
353 | * @align: alignment to use for this node's pernode data | ||
354 | */ | ||
355 | static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize, | ||
356 | unsigned long align) | ||
357 | { | ||
358 | void *ptr = NULL; | ||
359 | u8 best = 0xff; | ||
360 | int bestnode = -1, node; | ||
361 | |||
362 | for_each_online_node(node) { | ||
363 | if (node_isset(node, memory_less_mask)) | ||
364 | continue; | ||
365 | else if (node_distance(nid, node) < best) { | ||
366 | best = node_distance(nid, node); | ||
367 | bestnode = node; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, | ||
372 | pernodesize, align, __pa(MAX_DMA_ADDRESS)); | ||
373 | |||
374 | if (!ptr) | ||
375 | panic("NO memory for memory less node\n"); | ||
376 | return ptr; | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * pgdat_insert - insert the pgdat into global pgdat_list | ||
381 | * @pgdat: the pgdat for a node. | ||
382 | */ | ||
383 | static void __init pgdat_insert(pg_data_t *pgdat) | ||
384 | { | ||
385 | pg_data_t *prev = NULL, *next; | ||
386 | |||
387 | for_each_pgdat(next) | ||
388 | if (pgdat->node_id < next->node_id) | ||
389 | break; | ||
390 | else | ||
391 | prev = next; | ||
392 | |||
393 | if (prev) { | ||
394 | prev->pgdat_next = pgdat; | ||
395 | pgdat->pgdat_next = next; | ||
396 | } else { | ||
397 | pgdat->pgdat_next = pgdat_list; | ||
398 | pgdat_list = pgdat; | ||
399 | } | ||
400 | |||
401 | return; | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * memory_less_nodes - allocate and initialize CPU only nodes pernode | ||
406 | * information. | ||
407 | */ | ||
408 | static void __init memory_less_nodes(void) | ||
409 | { | ||
410 | unsigned long pernodesize; | ||
411 | void *pernode; | ||
412 | int node; | ||
413 | |||
414 | for_each_node_mask(node, memory_less_mask) { | ||
415 | pernodesize = compute_pernodesize(node); | ||
416 | pernode = memory_less_node_alloc(node, pernodesize, | ||
417 | (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024)); | ||
418 | fill_pernode(node, __pa(pernode), pernodesize); | ||
419 | } | ||
420 | |||
421 | return; | ||
456 | } | 422 | } |
457 | 423 | ||
458 | /** | 424 | /** |
@@ -472,16 +438,19 @@ void __init find_memory(void) | |||
472 | node_set_online(0); | 438 | node_set_online(0); |
473 | } | 439 | } |
474 | 440 | ||
441 | nodes_or(memory_less_mask, memory_less_mask, node_online_map); | ||
475 | min_low_pfn = -1; | 442 | min_low_pfn = -1; |
476 | max_low_pfn = 0; | 443 | max_low_pfn = 0; |
477 | 444 | ||
478 | if (num_online_nodes() > 1) | ||
479 | reassign_cpu_only_nodes(); | ||
480 | |||
481 | /* These actually end up getting called by call_pernode_memory() */ | 445 | /* These actually end up getting called by call_pernode_memory() */ |
482 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); | 446 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); |
483 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); | 447 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); |
484 | 448 | ||
449 | for_each_online_node(node) | ||
450 | if (mem_data[node].bootmem_data.node_low_pfn) { | ||
451 | node_clear(node, memory_less_mask); | ||
452 | mem_data[node].min_pfn = ~0UL; | ||
453 | } | ||
485 | /* | 454 | /* |
486 | * Initialize the boot memory maps in reverse order since that's | 455 | * Initialize the boot memory maps in reverse order since that's |
487 | * what the bootmem allocator expects | 456 | * what the bootmem allocator expects |
@@ -492,17 +461,14 @@ void __init find_memory(void) | |||
492 | 461 | ||
493 | if (!node_online(node)) | 462 | if (!node_online(node)) |
494 | continue; | 463 | continue; |
464 | else if (node_isset(node, memory_less_mask)) | ||
465 | continue; | ||
495 | 466 | ||
496 | bdp = &mem_data[node].bootmem_data; | 467 | bdp = &mem_data[node].bootmem_data; |
497 | pernode = mem_data[node].pernode_addr; | 468 | pernode = mem_data[node].pernode_addr; |
498 | pernodesize = mem_data[node].pernode_size; | 469 | pernodesize = mem_data[node].pernode_size; |
499 | map = pernode + pernodesize; | 470 | map = pernode + pernodesize; |
500 | 471 | ||
501 | /* Sanity check... */ | ||
502 | if (!pernode) | ||
503 | panic("pernode space for node %d " | ||
504 | "could not be allocated!", node); | ||
505 | |||
506 | init_bootmem_node(mem_data[node].pgdat, | 472 | init_bootmem_node(mem_data[node].pgdat, |
507 | map>>PAGE_SHIFT, | 473 | map>>PAGE_SHIFT, |
508 | bdp->node_boot_start>>PAGE_SHIFT, | 474 | bdp->node_boot_start>>PAGE_SHIFT, |
@@ -512,6 +478,7 @@ void __init find_memory(void) | |||
512 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); | 478 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); |
513 | 479 | ||
514 | reserve_pernode_space(); | 480 | reserve_pernode_space(); |
481 | memory_less_nodes(); | ||
515 | initialize_pernode_data(); | 482 | initialize_pernode_data(); |
516 | 483 | ||
517 | max_pfn = max_low_pfn; | 484 | max_pfn = max_low_pfn; |
@@ -519,6 +486,7 @@ void __init find_memory(void) | |||
519 | find_initrd(); | 486 | find_initrd(); |
520 | } | 487 | } |
521 | 488 | ||
489 | #ifdef CONFIG_SMP | ||
522 | /** | 490 | /** |
523 | * per_cpu_init - setup per-cpu variables | 491 | * per_cpu_init - setup per-cpu variables |
524 | * | 492 | * |
@@ -529,15 +497,15 @@ void *per_cpu_init(void) | |||
529 | { | 497 | { |
530 | int cpu; | 498 | int cpu; |
531 | 499 | ||
532 | if (smp_processor_id() == 0) { | 500 | if (smp_processor_id() != 0) |
533 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 501 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
534 | per_cpu(local_per_cpu_offset, cpu) = | 502 | |
535 | __per_cpu_offset[cpu]; | 503 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
536 | } | 504 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; |
537 | } | ||
538 | 505 | ||
539 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 506 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
540 | } | 507 | } |
508 | #endif /* CONFIG_SMP */ | ||
541 | 509 | ||
542 | /** | 510 | /** |
543 | * show_mem - give short summary of memory stats | 511 | * show_mem - give short summary of memory stats |
@@ -680,12 +648,13 @@ void __init paging_init(void) | |||
680 | 648 | ||
681 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 649 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
682 | 650 | ||
683 | /* so min() will work in count_node_pages */ | ||
684 | for_each_online_node(node) | ||
685 | mem_data[node].min_pfn = ~0UL; | ||
686 | |||
687 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 651 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
688 | 652 | ||
653 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | ||
654 | vmem_map = (struct page *) vmalloc_end; | ||
655 | efi_memmap_walk(create_mem_map_page_table, NULL); | ||
656 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | ||
657 | |||
689 | for_each_online_node(node) { | 658 | for_each_online_node(node) { |
690 | memset(zones_size, 0, sizeof(zones_size)); | 659 | memset(zones_size, 0, sizeof(zones_size)); |
691 | memset(zholes_size, 0, sizeof(zholes_size)); | 660 | memset(zholes_size, 0, sizeof(zholes_size)); |
@@ -719,15 +688,6 @@ void __init paging_init(void) | |||
719 | mem_data[node].num_dma_physpages); | 688 | mem_data[node].num_dma_physpages); |
720 | } | 689 | } |
721 | 690 | ||
722 | if (node == 0) { | ||
723 | vmalloc_end -= | ||
724 | PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | ||
725 | vmem_map = (struct page *) vmalloc_end; | ||
726 | |||
727 | efi_memmap_walk(create_mem_map_page_table, NULL); | ||
728 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | ||
729 | } | ||
730 | |||
731 | pfn_offset = mem_data[node].min_pfn; | 691 | pfn_offset = mem_data[node].min_pfn; |
732 | 692 | ||
733 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; | 693 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
@@ -735,5 +695,11 @@ void __init paging_init(void) | |||
735 | pfn_offset, zholes_size); | 695 | pfn_offset, zholes_size); |
736 | } | 696 | } |
737 | 697 | ||
698 | /* | ||
699 | * Make memory less nodes become a member of the known nodes. | ||
700 | */ | ||
701 | for_each_node_mask(node, memory_less_mask) | ||
702 | pgdat_insert(mem_data[node].pgdat); | ||
703 | |||
738 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 704 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
739 | } | 705 | } |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4eb2f52b87a1..65f9958db9f0 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -597,7 +597,8 @@ mem_init (void) | |||
597 | kclist_add(&kcore_kernel, _stext, _end - _stext); | 597 | kclist_add(&kcore_kernel, _stext, _end - _stext); |
598 | 598 | ||
599 | for_each_pgdat(pgdat) | 599 | for_each_pgdat(pgdat) |
600 | totalram_pages += free_all_bootmem_node(pgdat); | 600 | if (pgdat->bdata->node_bootmem_map) |
601 | totalram_pages += free_all_bootmem_node(pgdat); | ||
601 | 602 | ||
602 | reserved_pages = 0; | 603 | reserved_pages = 0; |
603 | efi_memmap_walk(count_reserved_pages, &reserved_pages); | 604 | efi_memmap_walk(count_reserved_pages, &reserved_pages); |
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h deleted file mode 100644 index 1cd291d8badd..000000000000 --- a/arch/ia64/sn/include/pci/pcibr_provider.h +++ /dev/null | |||
@@ -1,151 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | ||
10 | |||
11 | /* Workarounds */ | ||
12 | #define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ | ||
13 | |||
14 | #define BUSTYPE_MASK 0x1 | ||
15 | |||
16 | /* Macros given a pcibus structure */ | ||
17 | #define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK) | ||
18 | #define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \ | ||
19 | asic == PCIIO_ASIC_TYPE_TIOCP) | ||
20 | #define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) | ||
21 | |||
22 | |||
23 | /* | ||
24 | * The different PCI Bridge types supported on the SGI Altix platforms | ||
25 | */ | ||
26 | #define PCIBR_BRIDGETYPE_UNKNOWN -1 | ||
27 | #define PCIBR_BRIDGETYPE_PIC 2 | ||
28 | #define PCIBR_BRIDGETYPE_TIOCP 3 | ||
29 | |||
30 | /* | ||
31 | * Bridge 64bit Direct Map Attributes | ||
32 | */ | ||
33 | #define PCI64_ATTR_PREF (1ull << 59) | ||
34 | #define PCI64_ATTR_PREC (1ull << 58) | ||
35 | #define PCI64_ATTR_VIRTUAL (1ull << 57) | ||
36 | #define PCI64_ATTR_BAR (1ull << 56) | ||
37 | #define PCI64_ATTR_SWAP (1ull << 55) | ||
38 | #define PCI64_ATTR_VIRTUAL1 (1ull << 54) | ||
39 | |||
40 | #define PCI32_LOCAL_BASE 0 | ||
41 | #define PCI32_MAPPED_BASE 0x40000000 | ||
42 | #define PCI32_DIRECT_BASE 0x80000000 | ||
43 | |||
44 | #define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \ | ||
45 | (uint64_t)(x) >= PCI32_MAPPED_BASE) | ||
46 | #define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE) | ||
47 | |||
48 | |||
49 | /* | ||
50 | * Bridge PMU Address Transaltion Entry Attibutes | ||
51 | */ | ||
52 | #define PCI32_ATE_V (0x1 << 0) | ||
53 | #define PCI32_ATE_CO (0x1 << 1) | ||
54 | #define PCI32_ATE_PREC (0x1 << 2) | ||
55 | #define PCI32_ATE_PREF (0x1 << 3) | ||
56 | #define PCI32_ATE_BAR (0x1 << 4) | ||
57 | #define PCI32_ATE_ADDR_SHFT 12 | ||
58 | |||
59 | #define MINIMAL_ATES_REQUIRED(addr, size) \ | ||
60 | (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1)) | ||
61 | |||
62 | #define MINIMAL_ATE_FLAG(addr, size) \ | ||
63 | (MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0) | ||
64 | |||
65 | /* bit 29 of the pci address is the SWAP bit */ | ||
66 | #define ATE_SWAPSHIFT 29 | ||
67 | #define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT)) | ||
68 | #define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT)) | ||
69 | |||
70 | /* | ||
71 | * I/O page size | ||
72 | */ | ||
73 | #if PAGE_SIZE < 16384 | ||
74 | #define IOPFNSHIFT 12 /* 4K per mapped page */ | ||
75 | #else | ||
76 | #define IOPFNSHIFT 14 /* 16K per mapped page */ | ||
77 | #endif | ||
78 | |||
79 | #define IOPGSIZE (1 << IOPFNSHIFT) | ||
80 | #define IOPG(x) ((x) >> IOPFNSHIFT) | ||
81 | #define IOPGOFF(x) ((x) & (IOPGSIZE-1)) | ||
82 | |||
83 | #define PCIBR_DEV_SWAP_DIR (1ull << 19) | ||
84 | #define PCIBR_CTRL_PAGE_SIZE (0x1 << 21) | ||
85 | |||
86 | /* | ||
87 | * PMU resources. | ||
88 | */ | ||
89 | struct ate_resource{ | ||
90 | uint64_t *ate; | ||
91 | uint64_t num_ate; | ||
92 | uint64_t lowest_free_index; | ||
93 | }; | ||
94 | |||
95 | struct pcibus_info { | ||
96 | struct pcibus_bussoft pbi_buscommon; /* common header */ | ||
97 | uint32_t pbi_moduleid; | ||
98 | short pbi_bridge_type; | ||
99 | short pbi_bridge_mode; | ||
100 | |||
101 | struct ate_resource pbi_int_ate_resource; | ||
102 | uint64_t pbi_int_ate_size; | ||
103 | |||
104 | uint64_t pbi_dir_xbase; | ||
105 | char pbi_hub_xid; | ||
106 | |||
107 | uint64_t pbi_devreg[8]; | ||
108 | spinlock_t pbi_lock; | ||
109 | |||
110 | uint32_t pbi_valid_devices; | ||
111 | uint32_t pbi_enabled_devices; | ||
112 | }; | ||
113 | |||
114 | /* | ||
115 | * pcibus_info structure locking macros | ||
116 | */ | ||
117 | inline static unsigned long | ||
118 | pcibr_lock(struct pcibus_info *pcibus_info) | ||
119 | { | ||
120 | unsigned long flag; | ||
121 | spin_lock_irqsave(&pcibus_info->pbi_lock, flag); | ||
122 | return(flag); | ||
123 | } | ||
124 | #define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) | ||
125 | |||
126 | extern int pcibr_init_provider(void); | ||
127 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *); | ||
128 | extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); | ||
129 | extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); | ||
130 | extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); | ||
131 | |||
132 | /* | ||
133 | * prototypes for the bridge asic register access routines in pcibr_reg.c | ||
134 | */ | ||
135 | extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t); | ||
136 | extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t); | ||
137 | extern uint64_t pcireg_tflush_get(struct pcibus_info *); | ||
138 | extern uint64_t pcireg_intr_status_get(struct pcibus_info *); | ||
139 | extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t); | ||
140 | extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t); | ||
141 | extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t); | ||
142 | extern void pcireg_force_intr_set(struct pcibus_info *, int); | ||
143 | extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int); | ||
144 | extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t); | ||
145 | extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int); | ||
146 | extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info); | ||
147 | extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info); | ||
148 | extern int pcibr_ate_alloc(struct pcibus_info *, int); | ||
149 | extern void pcibr_ate_free(struct pcibus_info *, int); | ||
150 | extern void ate_write(struct pcibus_info *, int, int, uint64_t); | ||
151 | #endif | ||
diff --git a/arch/ia64/sn/include/pci/pic.h b/arch/ia64/sn/include/pci/pic.h deleted file mode 100644 index fd18acecb1e6..000000000000 --- a/arch/ia64/sn/include/pci/pic.h +++ /dev/null | |||
@@ -1,261 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PIC_H | ||
9 | #define _ASM_IA64_SN_PCI_PIC_H | ||
10 | |||
11 | /* | ||
12 | * PIC AS DEVICE ZERO | ||
13 | * ------------------ | ||
14 | * | ||
15 | * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) | ||
16 | * be designated as 'device 0'. That is a departure from earlier SGI | ||
17 | * PCI bridges. Because of that we use config space 1 to access the | ||
18 | * config space of the first actual PCI device on the bus. | ||
19 | * Here's what the PIC manual says: | ||
20 | * | ||
21 | * The current PCI-X bus specification now defines that the parent | ||
22 | * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC | ||
23 | * reduced the total number of devices from 8 to 4 and removed the | ||
24 | * device registers and windows, now only supporting devices 0,1,2, and | ||
25 | * 3. PIC did leave all 8 configuration space windows. The reason was | ||
26 | * there was nothing to gain by removing them. Here in lies the problem. | ||
27 | * The device numbering we do using 0 through 3 is unrelated to the device | ||
28 | * numbering which PCI-X requires in configuration space. In the past we | ||
29 | * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. | ||
30 | * PCI-X requires we start a 1, not 0 and currently the PX brick | ||
31 | * does associate our: | ||
32 | * | ||
33 | * device 0 with configuration space window 1, | ||
34 | * device 1 with configuration space window 2, | ||
35 | * device 2 with configuration space window 3, | ||
36 | * device 3 with configuration space window 4. | ||
37 | * | ||
38 | * The net effect is that all config space access are off-by-one with | ||
39 | * relation to other per-slot accesses on the PIC. | ||
40 | * Here is a table that shows some of that: | ||
41 | * | ||
42 | * Internal Slot# | ||
43 | * | | ||
44 | * | 0 1 2 3 | ||
45 | * ----------|--------------------------------------- | ||
46 | * config | 0x21000 0x22000 0x23000 0x24000 | ||
47 | * | | ||
48 | * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd | ||
49 | * | | ||
50 | * odd rrb | n/a 0[1] n/a 1[1] | ||
51 | * | | ||
52 | * int dev | 00 01 10 11 | ||
53 | * | | ||
54 | * ext slot# | 1 2 3 4 | ||
55 | * ----------|--------------------------------------- | ||
56 | */ | ||
57 | |||
58 | #define PIC_ATE_TARGETID_SHFT 8 | ||
59 | #define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL | ||
60 | #define PIC_PCI64_ATTR_TARG_SHFT 60 | ||
61 | |||
62 | |||
63 | /***************************************************************************** | ||
64 | *********************** PIC MMR structure mapping *************************** | ||
65 | *****************************************************************************/ | ||
66 | |||
67 | /* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] | ||
68 | * of a 64-bit register. When writing PIC registers, always write the | ||
69 | * entire 64 bits. | ||
70 | */ | ||
71 | |||
72 | struct pic { | ||
73 | |||
74 | /* 0x000000-0x00FFFF -- Local Registers */ | ||
75 | |||
76 | /* 0x000000-0x000057 -- Standard Widget Configuration */ | ||
77 | uint64_t p_wid_id; /* 0x000000 */ | ||
78 | uint64_t p_wid_stat; /* 0x000008 */ | ||
79 | uint64_t p_wid_err_upper; /* 0x000010 */ | ||
80 | uint64_t p_wid_err_lower; /* 0x000018 */ | ||
81 | #define p_wid_err p_wid_err_lower | ||
82 | uint64_t p_wid_control; /* 0x000020 */ | ||
83 | uint64_t p_wid_req_timeout; /* 0x000028 */ | ||
84 | uint64_t p_wid_int_upper; /* 0x000030 */ | ||
85 | uint64_t p_wid_int_lower; /* 0x000038 */ | ||
86 | #define p_wid_int p_wid_int_lower | ||
87 | uint64_t p_wid_err_cmdword; /* 0x000040 */ | ||
88 | uint64_t p_wid_llp; /* 0x000048 */ | ||
89 | uint64_t p_wid_tflush; /* 0x000050 */ | ||
90 | |||
91 | /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */ | ||
92 | uint64_t p_wid_aux_err; /* 0x000058 */ | ||
93 | uint64_t p_wid_resp_upper; /* 0x000060 */ | ||
94 | uint64_t p_wid_resp_lower; /* 0x000068 */ | ||
95 | #define p_wid_resp p_wid_resp_lower | ||
96 | uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */ | ||
97 | uint64_t p_wid_addr_lkerr; /* 0x000078 */ | ||
98 | |||
99 | /* 0x000080-0x00008F -- PMU & MAP */ | ||
100 | uint64_t p_dir_map; /* 0x000080 */ | ||
101 | uint64_t _pad_000088; /* 0x000088 */ | ||
102 | |||
103 | /* 0x000090-0x00009F -- SSRAM */ | ||
104 | uint64_t p_map_fault; /* 0x000090 */ | ||
105 | uint64_t _pad_000098; /* 0x000098 */ | ||
106 | |||
107 | /* 0x0000A0-0x0000AF -- Arbitration */ | ||
108 | uint64_t p_arb; /* 0x0000A0 */ | ||
109 | uint64_t _pad_0000A8; /* 0x0000A8 */ | ||
110 | |||
111 | /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ | ||
112 | uint64_t p_ate_parity_err; /* 0x0000B0 */ | ||
113 | uint64_t _pad_0000B8; /* 0x0000B8 */ | ||
114 | |||
115 | /* 0x0000C0-0x0000FF -- PCI/GIO */ | ||
116 | uint64_t p_bus_timeout; /* 0x0000C0 */ | ||
117 | uint64_t p_pci_cfg; /* 0x0000C8 */ | ||
118 | uint64_t p_pci_err_upper; /* 0x0000D0 */ | ||
119 | uint64_t p_pci_err_lower; /* 0x0000D8 */ | ||
120 | #define p_pci_err p_pci_err_lower | ||
121 | uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */ | ||
122 | |||
123 | /* 0x000100-0x0001FF -- Interrupt */ | ||
124 | uint64_t p_int_status; /* 0x000100 */ | ||
125 | uint64_t p_int_enable; /* 0x000108 */ | ||
126 | uint64_t p_int_rst_stat; /* 0x000110 */ | ||
127 | uint64_t p_int_mode; /* 0x000118 */ | ||
128 | uint64_t p_int_device; /* 0x000120 */ | ||
129 | uint64_t p_int_host_err; /* 0x000128 */ | ||
130 | uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */ | ||
131 | uint64_t p_err_int_view; /* 0x000170 */ | ||
132 | uint64_t p_mult_int; /* 0x000178 */ | ||
133 | uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */ | ||
134 | uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */ | ||
135 | |||
136 | /* 0x000200-0x000298 -- Device */ | ||
137 | uint64_t p_device[4]; /* 0x0002{00,,,18} */ | ||
138 | uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */ | ||
139 | uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */ | ||
140 | uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */ | ||
141 | uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */ | ||
142 | #define p_even_resp p_rrb_map[0] /* 0x000280 */ | ||
143 | #define p_odd_resp p_rrb_map[1] /* 0x000288 */ | ||
144 | uint64_t p_resp_status; /* 0x000290 */ | ||
145 | uint64_t p_resp_clear; /* 0x000298 */ | ||
146 | |||
147 | uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */ | ||
148 | |||
149 | /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ | ||
150 | struct { | ||
151 | uint64_t upper; /* 0x0003{00,,,F0} */ | ||
152 | uint64_t lower; /* 0x0003{08,,,F8} */ | ||
153 | } p_buf_addr_match[16]; | ||
154 | |||
155 | /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ | ||
156 | struct { | ||
157 | uint64_t flush_w_touch; /* 0x000{400,,,5C0} */ | ||
158 | uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */ | ||
159 | uint64_t inflight; /* 0x000{410,,,5D0} */ | ||
160 | uint64_t prefetch; /* 0x000{418,,,5D8} */ | ||
161 | uint64_t total_pci_retry; /* 0x000{420,,,5E0} */ | ||
162 | uint64_t max_pci_retry; /* 0x000{428,,,5E8} */ | ||
163 | uint64_t max_latency; /* 0x000{430,,,5F0} */ | ||
164 | uint64_t clear_all; /* 0x000{438,,,5F8} */ | ||
165 | } p_buf_count[8]; | ||
166 | |||
167 | |||
168 | /* 0x000600-0x0009FF -- PCI/X registers */ | ||
169 | uint64_t p_pcix_bus_err_addr; /* 0x000600 */ | ||
170 | uint64_t p_pcix_bus_err_attr; /* 0x000608 */ | ||
171 | uint64_t p_pcix_bus_err_data; /* 0x000610 */ | ||
172 | uint64_t p_pcix_pio_split_addr; /* 0x000618 */ | ||
173 | uint64_t p_pcix_pio_split_attr; /* 0x000620 */ | ||
174 | uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */ | ||
175 | uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */ | ||
176 | uint64_t p_pcix_timeout; /* 0x000638 */ | ||
177 | |||
178 | uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */ | ||
179 | |||
180 | /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ | ||
181 | struct { | ||
182 | uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */ | ||
183 | uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */ | ||
184 | } p_pcix_read_buf_64[16]; | ||
185 | |||
186 | struct { | ||
187 | uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */ | ||
188 | uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */ | ||
189 | uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */ | ||
190 | uint64_t __pad1; /* 0x000{B18,,,BF8} */ | ||
191 | } p_pcix_write_buf_64[8]; | ||
192 | |||
193 | /* End of Local Registers -- Start of Address Map space */ | ||
194 | |||
195 | char _pad_000c00[0x010000 - 0x000c00]; | ||
196 | |||
197 | /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */ | ||
198 | uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */ | ||
199 | |||
200 | /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */ | ||
201 | uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */ | ||
202 | |||
203 | char _pad_014000[0x18000 - 0x014000]; | ||
204 | |||
205 | /* 0x18000-0x197F8 -- PIC Write Request Ram */ | ||
206 | uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ | ||
207 | uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ | ||
208 | uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ | ||
209 | |||
210 | char _pad_019800[0x20000 - 0x019800]; | ||
211 | |||
212 | /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */ | ||
213 | union { | ||
214 | uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ | ||
215 | uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ | ||
216 | uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ | ||
217 | uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ | ||
218 | union { | ||
219 | uint8_t c[0x100 / 1]; | ||
220 | uint16_t s[0x100 / 2]; | ||
221 | uint32_t l[0x100 / 4]; | ||
222 | uint64_t d[0x100 / 8]; | ||
223 | } f[8]; | ||
224 | } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */ | ||
225 | |||
226 | /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ | ||
227 | union { | ||
228 | uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */ | ||
229 | uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */ | ||
230 | uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */ | ||
231 | uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */ | ||
232 | union { | ||
233 | uint8_t c[0x100 / 1]; | ||
234 | uint16_t s[0x100 / 2]; | ||
235 | uint32_t l[0x100 / 4]; | ||
236 | uint64_t d[0x100 / 8]; | ||
237 | } f[8]; | ||
238 | } p_type1_cfg; /* 0x028000-0x029000 */ | ||
239 | |||
240 | char _pad_029000[0x030000-0x029000]; | ||
241 | |||
242 | /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ | ||
243 | union { | ||
244 | uint8_t c[8 / 1]; | ||
245 | uint16_t s[8 / 2]; | ||
246 | uint32_t l[8 / 4]; | ||
247 | uint64_t d[8 / 8]; | ||
248 | } p_pci_iack; /* 0x030000-0x030007 */ | ||
249 | |||
250 | char _pad_030007[0x040000-0x030008]; | ||
251 | |||
252 | /* 0x040000-0x030007 -- PCIX Special Cycle */ | ||
253 | union { | ||
254 | uint8_t c[8 / 1]; | ||
255 | uint16_t s[8 / 2]; | ||
256 | uint32_t l[8 / 4]; | ||
257 | uint64_t d[8 / 8]; | ||
258 | } p_pcix_cycle; /* 0x040000-0x040007 */ | ||
259 | }; | ||
260 | |||
261 | #endif /* _ASM_IA64_SN_PCI_PIC_H */ | ||
diff --git a/arch/ia64/sn/include/pci/tiocp.h b/arch/ia64/sn/include/pci/tiocp.h deleted file mode 100644 index f07c83b2bf6e..000000000000 --- a/arch/ia64/sn/include/pci/tiocp.h +++ /dev/null | |||
@@ -1,256 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_TIOCP_H | ||
9 | #define _ASM_IA64_SN_PCI_TIOCP_H | ||
10 | |||
11 | #define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL | ||
12 | #define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) | ||
13 | |||
14 | |||
15 | /***************************************************************************** | ||
16 | *********************** TIOCP MMR structure mapping *************************** | ||
17 | *****************************************************************************/ | ||
18 | |||
19 | struct tiocp{ | ||
20 | |||
21 | /* 0x000000-0x00FFFF -- Local Registers */ | ||
22 | |||
23 | /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */ | ||
24 | uint64_t cp_id; /* 0x000000 */ | ||
25 | uint64_t cp_stat; /* 0x000008 */ | ||
26 | uint64_t cp_err_upper; /* 0x000010 */ | ||
27 | uint64_t cp_err_lower; /* 0x000018 */ | ||
28 | #define cp_err cp_err_lower | ||
29 | uint64_t cp_control; /* 0x000020 */ | ||
30 | uint64_t cp_req_timeout; /* 0x000028 */ | ||
31 | uint64_t cp_intr_upper; /* 0x000030 */ | ||
32 | uint64_t cp_intr_lower; /* 0x000038 */ | ||
33 | #define cp_intr cp_intr_lower | ||
34 | uint64_t cp_err_cmdword; /* 0x000040 */ | ||
35 | uint64_t _pad_000048; /* 0x000048 */ | ||
36 | uint64_t cp_tflush; /* 0x000050 */ | ||
37 | |||
38 | /* 0x000058-0x00007F -- Bridge-specific Configuration */ | ||
39 | uint64_t cp_aux_err; /* 0x000058 */ | ||
40 | uint64_t cp_resp_upper; /* 0x000060 */ | ||
41 | uint64_t cp_resp_lower; /* 0x000068 */ | ||
42 | #define cp_resp cp_resp_lower | ||
43 | uint64_t cp_tst_pin_ctrl; /* 0x000070 */ | ||
44 | uint64_t cp_addr_lkerr; /* 0x000078 */ | ||
45 | |||
46 | /* 0x000080-0x00008F -- PMU & MAP */ | ||
47 | uint64_t cp_dir_map; /* 0x000080 */ | ||
48 | uint64_t _pad_000088; /* 0x000088 */ | ||
49 | |||
50 | /* 0x000090-0x00009F -- SSRAM */ | ||
51 | uint64_t cp_map_fault; /* 0x000090 */ | ||
52 | uint64_t _pad_000098; /* 0x000098 */ | ||
53 | |||
54 | /* 0x0000A0-0x0000AF -- Arbitration */ | ||
55 | uint64_t cp_arb; /* 0x0000A0 */ | ||
56 | uint64_t _pad_0000A8; /* 0x0000A8 */ | ||
57 | |||
58 | /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ | ||
59 | uint64_t cp_ate_parity_err; /* 0x0000B0 */ | ||
60 | uint64_t _pad_0000B8; /* 0x0000B8 */ | ||
61 | |||
62 | /* 0x0000C0-0x0000FF -- PCI/GIO */ | ||
63 | uint64_t cp_bus_timeout; /* 0x0000C0 */ | ||
64 | uint64_t cp_pci_cfg; /* 0x0000C8 */ | ||
65 | uint64_t cp_pci_err_upper; /* 0x0000D0 */ | ||
66 | uint64_t cp_pci_err_lower; /* 0x0000D8 */ | ||
67 | #define cp_pci_err cp_pci_err_lower | ||
68 | uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */ | ||
69 | |||
70 | /* 0x000100-0x0001FF -- Interrupt */ | ||
71 | uint64_t cp_int_status; /* 0x000100 */ | ||
72 | uint64_t cp_int_enable; /* 0x000108 */ | ||
73 | uint64_t cp_int_rst_stat; /* 0x000110 */ | ||
74 | uint64_t cp_int_mode; /* 0x000118 */ | ||
75 | uint64_t cp_int_device; /* 0x000120 */ | ||
76 | uint64_t cp_int_host_err; /* 0x000128 */ | ||
77 | uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */ | ||
78 | uint64_t cp_err_int_view; /* 0x000170 */ | ||
79 | uint64_t cp_mult_int; /* 0x000178 */ | ||
80 | uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */ | ||
81 | uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */ | ||
82 | |||
83 | /* 0x000200-0x000298 -- Device */ | ||
84 | uint64_t cp_device[4]; /* 0x0002{00,,,18} */ | ||
85 | uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */ | ||
86 | uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */ | ||
87 | uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */ | ||
88 | uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */ | ||
89 | #define cp_even_resp cp_rrb_map[0] /* 0x000280 */ | ||
90 | #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */ | ||
91 | uint64_t cp_resp_status; /* 0x000290 */ | ||
92 | uint64_t cp_resp_clear; /* 0x000298 */ | ||
93 | |||
94 | uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */ | ||
95 | |||
96 | /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ | ||
97 | struct { | ||
98 | uint64_t upper; /* 0x0003{00,,,F0} */ | ||
99 | uint64_t lower; /* 0x0003{08,,,F8} */ | ||
100 | } cp_buf_addr_match[16]; | ||
101 | |||
102 | /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ | ||
103 | struct { | ||
104 | uint64_t flush_w_touch; /* 0x000{400,,,5C0} */ | ||
105 | uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */ | ||
106 | uint64_t inflight; /* 0x000{410,,,5D0} */ | ||
107 | uint64_t prefetch; /* 0x000{418,,,5D8} */ | ||
108 | uint64_t total_pci_retry; /* 0x000{420,,,5E0} */ | ||
109 | uint64_t max_pci_retry; /* 0x000{428,,,5E8} */ | ||
110 | uint64_t max_latency; /* 0x000{430,,,5F0} */ | ||
111 | uint64_t clear_all; /* 0x000{438,,,5F8} */ | ||
112 | } cp_buf_count[8]; | ||
113 | |||
114 | |||
115 | /* 0x000600-0x0009FF -- PCI/X registers */ | ||
116 | uint64_t cp_pcix_bus_err_addr; /* 0x000600 */ | ||
117 | uint64_t cp_pcix_bus_err_attr; /* 0x000608 */ | ||
118 | uint64_t cp_pcix_bus_err_data; /* 0x000610 */ | ||
119 | uint64_t cp_pcix_pio_split_addr; /* 0x000618 */ | ||
120 | uint64_t cp_pcix_pio_split_attr; /* 0x000620 */ | ||
121 | uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */ | ||
122 | uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */ | ||
123 | uint64_t cp_pcix_timeout; /* 0x000638 */ | ||
124 | |||
125 | uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */ | ||
126 | |||
127 | /* 0x000700-0x000737 -- Debug Registers */ | ||
128 | uint64_t cp_ct_debug_ctl; /* 0x000700 */ | ||
129 | uint64_t cp_br_debug_ctl; /* 0x000708 */ | ||
130 | uint64_t cp_mux3_debug_ctl; /* 0x000710 */ | ||
131 | uint64_t cp_mux4_debug_ctl; /* 0x000718 */ | ||
132 | uint64_t cp_mux5_debug_ctl; /* 0x000720 */ | ||
133 | uint64_t cp_mux6_debug_ctl; /* 0x000728 */ | ||
134 | uint64_t cp_mux7_debug_ctl; /* 0x000730 */ | ||
135 | |||
136 | uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */ | ||
137 | |||
138 | /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ | ||
139 | struct { | ||
140 | uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */ | ||
141 | uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */ | ||
142 | } cp_pcix_read_buf_64[16]; | ||
143 | |||
144 | struct { | ||
145 | uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */ | ||
146 | uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */ | ||
147 | uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */ | ||
148 | uint64_t __pad1; /* 0x000{B18,,,BF8} */ | ||
149 | } cp_pcix_write_buf_64[8]; | ||
150 | |||
151 | /* End of Local Registers -- Start of Address Map space */ | ||
152 | |||
153 | char _pad_000c00[0x010000 - 0x000c00]; | ||
154 | |||
155 | /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */ | ||
156 | uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */ | ||
157 | |||
158 | char _pad_012000[0x14000 - 0x012000]; | ||
159 | |||
160 | /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */ | ||
161 | uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */ | ||
162 | |||
163 | char _pad_016000[0x18000 - 0x016000]; | ||
164 | |||
165 | /* 0x18000-0x197F8 -- TIOCP Write Request Ram */ | ||
166 | uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ | ||
167 | uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ | ||
168 | uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ | ||
169 | |||
170 | char _pad_019800[0x1C000 - 0x019800]; | ||
171 | |||
172 | /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */ | ||
173 | uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */ | ||
174 | uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */ | ||
175 | uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */ | ||
176 | |||
177 | char _pad_01F000[0x20000 - 0x01F000]; | ||
178 | |||
179 | /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */ | ||
180 | char _pad_020000[0x021000 - 0x20000]; | ||
181 | |||
182 | /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */ | ||
183 | union { | ||
184 | uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ | ||
185 | uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ | ||
186 | uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ | ||
187 | uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ | ||
188 | union { | ||
189 | uint8_t c[0x100 / 1]; | ||
190 | uint16_t s[0x100 / 2]; | ||
191 | uint32_t l[0x100 / 4]; | ||
192 | uint64_t d[0x100 / 8]; | ||
193 | } f[8]; | ||
194 | } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */ | ||
195 | |||
196 | /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ | ||
197 | union { | ||
198 | uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */ | ||
199 | uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */ | ||
200 | uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */ | ||
201 | uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */ | ||
202 | union { | ||
203 | uint8_t c[0x100 / 1]; | ||
204 | uint16_t s[0x100 / 2]; | ||
205 | uint32_t l[0x100 / 4]; | ||
206 | uint64_t d[0x100 / 8]; | ||
207 | } f[8]; | ||
208 | } cp_type1_cfg; /* 0x028000-0x029000 */ | ||
209 | |||
210 | char _pad_029000[0x030000-0x029000]; | ||
211 | |||
212 | /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ | ||
213 | union { | ||
214 | uint8_t c[8 / 1]; | ||
215 | uint16_t s[8 / 2]; | ||
216 | uint32_t l[8 / 4]; | ||
217 | uint64_t d[8 / 8]; | ||
218 | } cp_pci_iack; /* 0x030000-0x030007 */ | ||
219 | |||
220 | char _pad_030007[0x040000-0x030008]; | ||
221 | |||
222 | /* 0x040000-0x040007 -- PCIX Special Cycle */ | ||
223 | union { | ||
224 | uint8_t c[8 / 1]; | ||
225 | uint16_t s[8 / 2]; | ||
226 | uint32_t l[8 / 4]; | ||
227 | uint64_t d[8 / 8]; | ||
228 | } cp_pcix_cycle; /* 0x040000-0x040007 */ | ||
229 | |||
230 | char _pad_040007[0x200000-0x040008]; | ||
231 | |||
232 | /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */ | ||
233 | union { | ||
234 | uint8_t c[0x100000 / 1]; | ||
235 | uint16_t s[0x100000 / 2]; | ||
236 | uint32_t l[0x100000 / 4]; | ||
237 | uint64_t d[0x100000 / 8]; | ||
238 | } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */ | ||
239 | |||
240 | #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)] | ||
241 | |||
242 | char _pad_800000[0xA00000-0x800000]; | ||
243 | |||
244 | /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */ | ||
245 | union { | ||
246 | uint8_t c[0x100000 / 1]; | ||
247 | uint16_t s[0x100000 / 2]; | ||
248 | uint32_t l[0x100000 / 4]; | ||
249 | uint64_t d[0x100000 / 8]; | ||
250 | } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */ | ||
251 | |||
252 | #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)] | ||
253 | |||
254 | }; | ||
255 | |||
256 | #endif /* _ASM_IA64_SN_PCI_TIOCP_H */ | ||
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 868e7ecae84b..580a1c0403a7 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H | 8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H |
9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H | 9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H |
10 | 10 | ||
11 | #include "xtalk/xwidgetdev.h" | ||
12 | |||
11 | #define HUB_WIDGET_ID_MAX 0xf | 13 | #define HUB_WIDGET_ID_MAX 0xf |
12 | #define DEV_PER_WIDGET (2*2*8) | 14 | #define DEV_PER_WIDGET (2*2*8) |
13 | #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ | 15 | #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 783eb4323847..a67f39e448cb 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -9,21 +9,28 @@ | |||
9 | #include <linux/bootmem.h> | 9 | #include <linux/bootmem.h> |
10 | #include <linux/nodemask.h> | 10 | #include <linux/nodemask.h> |
11 | #include <asm/sn/types.h> | 11 | #include <asm/sn/types.h> |
12 | #include <asm/sn/sn_sal.h> | ||
13 | #include <asm/sn/addrs.h> | 12 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/pcibus_provider_defs.h> | ||
15 | #include <asm/sn/pcidev.h> | ||
16 | #include "pci/pcibr_provider.h" | ||
17 | #include "xtalk/xwidgetdev.h" | ||
18 | #include <asm/sn/geo.h> | 13 | #include <asm/sn/geo.h> |
19 | #include "xtalk/hubdev.h" | ||
20 | #include <asm/sn/io.h> | 14 | #include <asm/sn/io.h> |
15 | #include <asm/sn/pcibr_provider.h> | ||
16 | #include <asm/sn/pcibus_provider_defs.h> | ||
17 | #include <asm/sn/pcidev.h> | ||
21 | #include <asm/sn/simulator.h> | 18 | #include <asm/sn/simulator.h> |
19 | #include <asm/sn/sn_sal.h> | ||
22 | #include <asm/sn/tioca_provider.h> | 20 | #include <asm/sn/tioca_provider.h> |
21 | #include "xtalk/hubdev.h" | ||
22 | #include "xtalk/xwidgetdev.h" | ||
23 | 23 | ||
24 | char master_baseio_wid; | ||
25 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ | 24 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ |
26 | 25 | ||
26 | static struct list_head sn_sysdata_list; | ||
27 | |||
28 | /* sysdata list struct */ | ||
29 | struct sysdata_el { | ||
30 | struct list_head entry; | ||
31 | void *sysdata; | ||
32 | }; | ||
33 | |||
27 | struct slab_info { | 34 | struct slab_info { |
28 | struct hubdev_info hubdev; | 35 | struct hubdev_info hubdev; |
29 | }; | 36 | }; |
@@ -138,23 +145,6 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, | |||
138 | } | 145 | } |
139 | 146 | ||
140 | /* | 147 | /* |
141 | * sn_alloc_pci_sysdata() - This routine allocates a pci controller | ||
142 | * which is expected as the pci_dev and pci_bus sysdata by the Linux | ||
143 | * PCI infrastructure. | ||
144 | */ | ||
145 | static inline struct pci_controller *sn_alloc_pci_sysdata(void) | ||
146 | { | ||
147 | struct pci_controller *pci_sysdata; | ||
148 | |||
149 | pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL); | ||
150 | if (!pci_sysdata) | ||
151 | BUG(); | ||
152 | |||
153 | memset(pci_sysdata, 0, sizeof(*pci_sysdata)); | ||
154 | return pci_sysdata; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for | 148 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for |
159 | * each node in the system. | 149 | * each node in the system. |
160 | */ | 150 | */ |
@@ -221,22 +211,34 @@ static void sn_fixup_ionodes(void) | |||
221 | 211 | ||
222 | } | 212 | } |
223 | 213 | ||
214 | void sn_pci_unfixup_slot(struct pci_dev *dev) | ||
215 | { | ||
216 | struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; | ||
217 | |||
218 | sn_irq_unfixup(dev); | ||
219 | pci_dev_put(host_pci_dev); | ||
220 | pci_dev_put(dev); | ||
221 | } | ||
222 | |||
224 | /* | 223 | /* |
225 | * sn_pci_fixup_slot() - This routine sets up a slot's resources | 224 | * sn_pci_fixup_slot() - This routine sets up a slot's resources |
226 | * consistent with the Linux PCI abstraction layer. Resources acquired | 225 | * consistent with the Linux PCI abstraction layer. Resources acquired |
227 | * from our PCI provider include PIO maps to BAR space and interrupt | 226 | * from our PCI provider include PIO maps to BAR space and interrupt |
228 | * objects. | 227 | * objects. |
229 | */ | 228 | */ |
230 | static void sn_pci_fixup_slot(struct pci_dev *dev) | 229 | void sn_pci_fixup_slot(struct pci_dev *dev) |
231 | { | 230 | { |
232 | int idx; | 231 | int idx; |
233 | int segment = 0; | 232 | int segment = 0; |
234 | uint64_t size; | ||
235 | struct sn_irq_info *sn_irq_info; | ||
236 | struct pci_dev *host_pci_dev; | ||
237 | int status = 0; | 233 | int status = 0; |
238 | struct pcibus_bussoft *bs; | 234 | struct pcibus_bussoft *bs; |
235 | struct pci_bus *host_pci_bus; | ||
236 | struct pci_dev *host_pci_dev; | ||
237 | struct sn_irq_info *sn_irq_info; | ||
238 | unsigned long size; | ||
239 | unsigned int bus_no, devfn; | ||
239 | 240 | ||
241 | pci_dev_get(dev); /* for the sysdata pointer */ | ||
240 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 242 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
241 | if (SN_PCIDEV_INFO(dev) <= 0) | 243 | if (SN_PCIDEV_INFO(dev) <= 0) |
242 | BUG(); /* Cannot afford to run out of memory */ | 244 | BUG(); /* Cannot afford to run out of memory */ |
@@ -253,7 +255,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
253 | (u64) __pa(SN_PCIDEV_INFO(dev)), | 255 | (u64) __pa(SN_PCIDEV_INFO(dev)), |
254 | (u64) __pa(sn_irq_info)); | 256 | (u64) __pa(sn_irq_info)); |
255 | if (status) | 257 | if (status) |
256 | BUG(); /* Cannot get platform pci device information information */ | 258 | BUG(); /* Cannot get platform pci device information */ |
257 | 259 | ||
258 | /* Copy over PIO Mapped Addresses */ | 260 | /* Copy over PIO Mapped Addresses */ |
259 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { | 261 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { |
@@ -275,15 +277,21 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
275 | dev->resource[idx].parent = &iomem_resource; | 277 | dev->resource[idx].parent = &iomem_resource; |
276 | } | 278 | } |
277 | 279 | ||
278 | /* set up host bus linkages */ | 280 | /* |
279 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | 281 | * Using the PROMs values for the PCI host bus, get the Linux |
280 | host_pci_dev = | 282 | * PCI host_pci_dev struct and set up host bus linkages |
281 | pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, | 283 | */ |
282 | SN_PCIDEV_INFO(dev)-> | 284 | |
283 | pdi_slot_host_handle & 0xffffffff); | 285 | bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32; |
286 | devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; | ||
287 | host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no); | ||
288 | host_pci_dev = pci_get_slot(host_pci_bus, devfn); | ||
289 | |||
290 | SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; | ||
284 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = | 291 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = |
285 | SN_PCIDEV_INFO(host_pci_dev); | 292 | SN_PCIDEV_INFO(host_pci_dev); |
286 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; | 293 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; |
294 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | ||
287 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; | 295 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; |
288 | 296 | ||
289 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { | 297 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { |
@@ -297,6 +305,9 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
297 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; | 305 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; |
298 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; | 306 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; |
299 | sn_irq_fixup(dev, sn_irq_info); | 307 | sn_irq_fixup(dev, sn_irq_info); |
308 | } else { | ||
309 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = NULL; | ||
310 | kfree(sn_irq_info); | ||
300 | } | 311 | } |
301 | } | 312 | } |
302 | 313 | ||
@@ -304,55 +315,57 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
304 | * sn_pci_controller_fixup() - This routine sets up a bus's resources | 315 | * sn_pci_controller_fixup() - This routine sets up a bus's resources |
305 | * consistent with the Linux PCI abstraction layer. | 316 | * consistent with the Linux PCI abstraction layer. |
306 | */ | 317 | */ |
307 | static void sn_pci_controller_fixup(int segment, int busnum) | 318 | void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) |
308 | { | 319 | { |
309 | int status = 0; | 320 | int status = 0; |
310 | int nasid, cnode; | 321 | int nasid, cnode; |
311 | struct pci_bus *bus; | ||
312 | struct pci_controller *controller; | 322 | struct pci_controller *controller; |
313 | struct pcibus_bussoft *prom_bussoft_ptr; | 323 | struct pcibus_bussoft *prom_bussoft_ptr; |
314 | struct hubdev_info *hubdev_info; | 324 | struct hubdev_info *hubdev_info; |
315 | void *provider_soft; | 325 | void *provider_soft; |
316 | struct sn_pcibus_provider *provider; | 326 | struct sn_pcibus_provider *provider; |
317 | 327 | ||
318 | status = | 328 | status = sal_get_pcibus_info((u64) segment, (u64) busnum, |
319 | sal_get_pcibus_info((u64) segment, (u64) busnum, | 329 | (u64) ia64_tpa(&prom_bussoft_ptr)); |
320 | (u64) ia64_tpa(&prom_bussoft_ptr)); | 330 | if (status > 0) |
321 | if (status > 0) { | 331 | return; /*bus # does not exist */ |
322 | return; /* bus # does not exist */ | ||
323 | } | ||
324 | |||
325 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | 332 | prom_bussoft_ptr = __va(prom_bussoft_ptr); |
326 | controller = sn_alloc_pci_sysdata(); | ||
327 | /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */ | ||
328 | 333 | ||
329 | bus = pci_scan_bus(busnum, &pci_root_ops, controller); | 334 | controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL); |
335 | if (!controller) | ||
336 | BUG(); | ||
337 | |||
330 | if (bus == NULL) { | 338 | if (bus == NULL) { |
331 | return; /* error, or bus already scanned */ | 339 | bus = pci_scan_bus(busnum, &pci_root_ops, controller); |
340 | if (bus == NULL) | ||
341 | return; /* error, or bus already scanned */ | ||
342 | bus->sysdata = NULL; | ||
332 | } | 343 | } |
333 | 344 | ||
345 | if (bus->sysdata) | ||
346 | goto error_return; /* sysdata already alloc'd */ | ||
347 | |||
334 | /* | 348 | /* |
335 | * Per-provider fixup. Copies the contents from prom to local | 349 | * Per-provider fixup. Copies the contents from prom to local |
336 | * area and links SN_PCIBUS_BUSSOFT(). | 350 | * area and links SN_PCIBUS_BUSSOFT(). |
337 | */ | 351 | */ |
338 | 352 | ||
339 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { | 353 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) |
340 | return; /* unsupported asic type */ | 354 | return; /* unsupported asic type */ |
341 | } | 355 | |
356 | if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) | ||
357 | goto error_return; /* no further fixup necessary */ | ||
342 | 358 | ||
343 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; | 359 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; |
344 | if (provider == NULL) { | 360 | if (provider == NULL) |
345 | return; /* no provider registerd for this asic */ | 361 | return; /* no provider registerd for this asic */ |
346 | } | ||
347 | 362 | ||
348 | provider_soft = NULL; | 363 | provider_soft = NULL; |
349 | if (provider->bus_fixup) { | 364 | if (provider->bus_fixup) |
350 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); | 365 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); |
351 | } | ||
352 | 366 | ||
353 | if (provider_soft == NULL) { | 367 | if (provider_soft == NULL) |
354 | return; /* fixup failed or not applicable */ | 368 | return; /* fixup failed or not applicable */ |
355 | } | ||
356 | 369 | ||
357 | /* | 370 | /* |
358 | * Generic bus fixup goes here. Don't reference prom_bussoft_ptr | 371 | * Generic bus fixup goes here. Don't reference prom_bussoft_ptr |
@@ -361,12 +374,47 @@ static void sn_pci_controller_fixup(int segment, int busnum) | |||
361 | 374 | ||
362 | bus->sysdata = controller; | 375 | bus->sysdata = controller; |
363 | PCI_CONTROLLER(bus)->platform_data = provider_soft; | 376 | PCI_CONTROLLER(bus)->platform_data = provider_soft; |
364 | |||
365 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); | 377 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); |
366 | cnode = nasid_to_cnodeid(nasid); | 378 | cnode = nasid_to_cnodeid(nasid); |
367 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | 379 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); |
368 | SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = | 380 | SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = |
369 | &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); | 381 | &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); |
382 | |||
383 | return; | ||
384 | |||
385 | error_return: | ||
386 | |||
387 | kfree(controller); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | void sn_bus_store_sysdata(struct pci_dev *dev) | ||
392 | { | ||
393 | struct sysdata_el *element; | ||
394 | |||
395 | element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); | ||
396 | if (!element) { | ||
397 | dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); | ||
398 | return; | ||
399 | } | ||
400 | element->sysdata = dev->sysdata; | ||
401 | list_add(&element->entry, &sn_sysdata_list); | ||
402 | } | ||
403 | |||
404 | void sn_bus_free_sysdata(void) | ||
405 | { | ||
406 | struct sysdata_el *element; | ||
407 | struct list_head *list; | ||
408 | |||
409 | sn_sysdata_free_start: | ||
410 | list_for_each(list, &sn_sysdata_list) { | ||
411 | element = list_entry(list, struct sysdata_el, entry); | ||
412 | list_del(&element->entry); | ||
413 | kfree(element->sysdata); | ||
414 | kfree(element); | ||
415 | goto sn_sysdata_free_start; | ||
416 | } | ||
417 | return; | ||
370 | } | 418 | } |
371 | 419 | ||
372 | /* | 420 | /* |
@@ -403,20 +451,17 @@ static int __init sn_pci_init(void) | |||
403 | */ | 451 | */ |
404 | ia64_max_iommu_merge_mask = ~PAGE_MASK; | 452 | ia64_max_iommu_merge_mask = ~PAGE_MASK; |
405 | sn_fixup_ionodes(); | 453 | sn_fixup_ionodes(); |
406 | sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL); | 454 | sn_irq_lh_init(); |
407 | if (sn_irq <= 0) | 455 | INIT_LIST_HEAD(&sn_sysdata_list); |
408 | BUG(); /* Canno afford to run out of memory. */ | ||
409 | memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS); | ||
410 | |||
411 | sn_init_cpei_timer(); | 456 | sn_init_cpei_timer(); |
412 | 457 | ||
413 | #ifdef CONFIG_PROC_FS | 458 | #ifdef CONFIG_PROC_FS |
414 | register_sn_procfs(); | 459 | register_sn_procfs(); |
415 | #endif | 460 | #endif |
416 | 461 | ||
417 | for (i = 0; i < PCI_BUSES_TO_SCAN; i++) { | 462 | /* busses are not known yet ... */ |
418 | sn_pci_controller_fixup(0, i); | 463 | for (i = 0; i < PCI_BUSES_TO_SCAN; i++) |
419 | } | 464 | sn_pci_controller_fixup(0, i, NULL); |
420 | 465 | ||
421 | /* | 466 | /* |
422 | * Generic Linux PCI Layer has created the pci_bus and pci_dev | 467 | * Generic Linux PCI Layer has created the pci_bus and pci_dev |
@@ -425,9 +470,8 @@ static int __init sn_pci_init(void) | |||
425 | */ | 470 | */ |
426 | 471 | ||
427 | while ((pci_dev = | 472 | while ((pci_dev = |
428 | pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) { | 473 | pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) |
429 | sn_pci_fixup_slot(pci_dev); | 474 | sn_pci_fixup_slot(pci_dev); |
430 | } | ||
431 | 475 | ||
432 | sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ | 476 | sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ |
433 | 477 | ||
@@ -469,3 +513,8 @@ cnodeid_get_geoid(cnodeid_t cnode) | |||
469 | } | 513 | } |
470 | 514 | ||
471 | subsys_initcall(sn_pci_init); | 515 | subsys_initcall(sn_pci_init); |
516 | EXPORT_SYMBOL(sn_pci_fixup_slot); | ||
517 | EXPORT_SYMBOL(sn_pci_unfixup_slot); | ||
518 | EXPORT_SYMBOL(sn_pci_controller_fixup); | ||
519 | EXPORT_SYMBOL(sn_bus_store_sysdata); | ||
520 | EXPORT_SYMBOL(sn_bus_free_sysdata); | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0f4e8138658f..84d276a14ecb 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -9,13 +9,13 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <asm/sn/intr.h> | 12 | #include <linux/spinlock.h> |
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/arch.h> | 14 | #include <asm/sn/arch.h> |
15 | #include "xtalk/xwidgetdev.h" | 15 | #include <asm/sn/intr.h> |
16 | #include <asm/sn/pcibr_provider.h> | ||
16 | #include <asm/sn/pcibus_provider_defs.h> | 17 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include <asm/sn/pcidev.h> | 18 | #include <asm/sn/pcidev.h> |
18 | #include "pci/pcibr_provider.h" | ||
19 | #include <asm/sn/shub_mmr.h> | 19 | #include <asm/sn/shub_mmr.h> |
20 | #include <asm/sn/sn_sal.h> | 20 | #include <asm/sn/sn_sal.h> |
21 | 21 | ||
@@ -25,7 +25,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
25 | 25 | ||
26 | extern int sn_force_interrupt_flag; | 26 | extern int sn_force_interrupt_flag; |
27 | extern int sn_ioif_inited; | 27 | extern int sn_ioif_inited; |
28 | struct sn_irq_info **sn_irq; | 28 | static struct list_head **sn_irq_lh; |
29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | ||
29 | 30 | ||
30 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | 31 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, |
31 | u64 sn_irq_info, | 32 | u64 sn_irq_info, |
@@ -101,7 +102,7 @@ static void sn_end_irq(unsigned int irq) | |||
101 | nasid = get_nasid(); | 102 | nasid = get_nasid(); |
102 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | 103 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR |
103 | (nasid, SH_EVENT_OCCURRED)); | 104 | (nasid, SH_EVENT_OCCURRED)); |
104 | /* If the UART bit is set here, we may have received an | 105 | /* If the UART bit is set here, we may have received an |
105 | * interrupt from the UART that the driver missed. To | 106 | * interrupt from the UART that the driver missed. To |
106 | * make sure, we IPI ourselves to force us to look again. | 107 | * make sure, we IPI ourselves to force us to look again. |
107 | */ | 108 | */ |
@@ -115,82 +116,84 @@ static void sn_end_irq(unsigned int irq) | |||
115 | force_interrupt(irq); | 116 | force_interrupt(irq); |
116 | } | 117 | } |
117 | 118 | ||
119 | static void sn_irq_info_free(struct rcu_head *head); | ||
120 | |||
118 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 121 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) |
119 | { | 122 | { |
120 | struct sn_irq_info *sn_irq_info = sn_irq[irq]; | 123 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
121 | struct sn_irq_info *tmp_sn_irq_info; | ||
122 | int cpuid, cpuphys; | 124 | int cpuid, cpuphys; |
123 | nasid_t t_nasid; /* nasid to target */ | ||
124 | int t_slice; /* slice to target */ | ||
125 | |||
126 | /* allocate a temp sn_irq_info struct to get new target info */ | ||
127 | tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); | ||
128 | if (!tmp_sn_irq_info) | ||
129 | return; | ||
130 | 125 | ||
131 | cpuid = first_cpu(mask); | 126 | cpuid = first_cpu(mask); |
132 | cpuphys = cpu_physical_id(cpuid); | 127 | cpuphys = cpu_physical_id(cpuid); |
133 | t_nasid = cpuid_to_nasid(cpuid); | ||
134 | t_slice = cpuid_to_slice(cpuid); | ||
135 | 128 | ||
136 | while (sn_irq_info) { | 129 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
137 | int status; | 130 | sn_irq_lh[irq], list) { |
138 | int local_widget; | 131 | uint64_t bridge; |
139 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | 132 | int local_widget, status; |
140 | nasid_t local_nasid = NASID_GET(bridge); | 133 | nasid_t local_nasid; |
134 | struct sn_irq_info *new_irq_info; | ||
135 | |||
136 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
137 | if (new_irq_info == NULL) | ||
138 | break; | ||
139 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
140 | |||
141 | bridge = (uint64_t) new_irq_info->irq_bridge; | ||
142 | if (!bridge) { | ||
143 | kfree(new_irq_info); | ||
144 | break; /* irq is not a device interrupt */ | ||
145 | } | ||
141 | 146 | ||
142 | if (!bridge) | 147 | local_nasid = NASID_GET(bridge); |
143 | break; /* irq is not a device interrupt */ | ||
144 | 148 | ||
145 | if (local_nasid & 1) | 149 | if (local_nasid & 1) |
146 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 150 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
147 | else | 151 | else |
148 | local_widget = SWIN_WIDGETNUM(bridge); | 152 | local_widget = SWIN_WIDGETNUM(bridge); |
149 | 153 | ||
150 | /* Free the old PROM sn_irq_info structure */ | 154 | /* Free the old PROM new_irq_info structure */ |
151 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | 155 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
156 | /* Update kernels new_irq_info with new target info */ | ||
157 | unregister_intr_pda(new_irq_info); | ||
152 | 158 | ||
153 | /* allocate a new PROM sn_irq_info struct */ | 159 | /* allocate a new PROM new_irq_info struct */ |
154 | status = sn_intr_alloc(local_nasid, local_widget, | 160 | status = sn_intr_alloc(local_nasid, local_widget, |
155 | __pa(tmp_sn_irq_info), irq, t_nasid, | 161 | __pa(new_irq_info), irq, |
156 | t_slice); | 162 | cpuid_to_nasid(cpuid), |
157 | 163 | cpuid_to_slice(cpuid)); | |
158 | if (status == 0) { | 164 | |
159 | /* Update kernels sn_irq_info with new target info */ | 165 | /* SAL call failed */ |
160 | unregister_intr_pda(sn_irq_info); | 166 | if (status) { |
161 | sn_irq_info->irq_cpuid = cpuid; | 167 | kfree(new_irq_info); |
162 | sn_irq_info->irq_nasid = t_nasid; | 168 | break; |
163 | sn_irq_info->irq_slice = t_slice; | 169 | } |
164 | sn_irq_info->irq_xtalkaddr = | 170 | |
165 | tmp_sn_irq_info->irq_xtalkaddr; | 171 | new_irq_info->irq_cpuid = cpuid; |
166 | sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; | 172 | register_intr_pda(new_irq_info); |
167 | register_intr_pda(sn_irq_info); | 173 | |
168 | 174 | if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) | |
169 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { | 175 | pcibr_change_devices_irq(new_irq_info); |
170 | pcibr_change_devices_irq(sn_irq_info); | ||
171 | } | ||
172 | 176 | ||
173 | sn_irq_info = sn_irq_info->irq_next; | 177 | spin_lock(&sn_irq_info_lock); |
178 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
179 | spin_unlock(&sn_irq_info_lock); | ||
180 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
174 | 181 | ||
175 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
176 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | 183 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); |
177 | #endif | 184 | #endif |
178 | } else { | ||
179 | break; /* snp_affinity failed the intr_alloc */ | ||
180 | } | ||
181 | } | 185 | } |
182 | kfree(tmp_sn_irq_info); | ||
183 | } | 186 | } |
184 | 187 | ||
185 | struct hw_interrupt_type irq_type_sn = { | 188 | struct hw_interrupt_type irq_type_sn = { |
186 | "SN hub", | 189 | .typename = "SN hub", |
187 | sn_startup_irq, | 190 | .startup = sn_startup_irq, |
188 | sn_shutdown_irq, | 191 | .shutdown = sn_shutdown_irq, |
189 | sn_enable_irq, | 192 | .enable = sn_enable_irq, |
190 | sn_disable_irq, | 193 | .disable = sn_disable_irq, |
191 | sn_ack_irq, | 194 | .ack = sn_ack_irq, |
192 | sn_end_irq, | 195 | .end = sn_end_irq, |
193 | sn_set_affinity_irq | 196 | .set_affinity = sn_set_affinity_irq |
194 | }; | 197 | }; |
195 | 198 | ||
196 | unsigned int sn_local_vector_to_irq(u8 vector) | 199 | unsigned int sn_local_vector_to_irq(u8 vector) |
@@ -231,19 +234,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
231 | struct sn_irq_info *tmp_irq_info; | 234 | struct sn_irq_info *tmp_irq_info; |
232 | int i, foundmatch; | 235 | int i, foundmatch; |
233 | 236 | ||
237 | rcu_read_lock(); | ||
234 | if (pdacpu(cpu)->sn_last_irq == irq) { | 238 | if (pdacpu(cpu)->sn_last_irq == irq) { |
235 | foundmatch = 0; | 239 | foundmatch = 0; |
236 | for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { | 240 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
237 | tmp_irq_info = sn_irq[i]; | 241 | i && !foundmatch; i--) { |
238 | while (tmp_irq_info) { | 242 | list_for_each_entry_rcu(tmp_irq_info, |
243 | sn_irq_lh[i], | ||
244 | list) { | ||
239 | if (tmp_irq_info->irq_cpuid == cpu) { | 245 | if (tmp_irq_info->irq_cpuid == cpu) { |
240 | foundmatch++; | 246 | foundmatch = 1; |
241 | break; | 247 | break; |
242 | } | 248 | } |
243 | tmp_irq_info = tmp_irq_info->irq_next; | ||
244 | } | ||
245 | if (foundmatch) { | ||
246 | break; | ||
247 | } | 249 | } |
248 | } | 250 | } |
249 | pdacpu(cpu)->sn_last_irq = i; | 251 | pdacpu(cpu)->sn_last_irq = i; |
@@ -251,60 +253,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
251 | 253 | ||
252 | if (pdacpu(cpu)->sn_first_irq == irq) { | 254 | if (pdacpu(cpu)->sn_first_irq == irq) { |
253 | foundmatch = 0; | 255 | foundmatch = 0; |
254 | for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { | 256 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
255 | tmp_irq_info = sn_irq[i]; | 257 | i < NR_IRQS && !foundmatch; i++) { |
256 | while (tmp_irq_info) { | 258 | list_for_each_entry_rcu(tmp_irq_info, |
259 | sn_irq_lh[i], | ||
260 | list) { | ||
257 | if (tmp_irq_info->irq_cpuid == cpu) { | 261 | if (tmp_irq_info->irq_cpuid == cpu) { |
258 | foundmatch++; | 262 | foundmatch = 1; |
259 | break; | 263 | break; |
260 | } | 264 | } |
261 | tmp_irq_info = tmp_irq_info->irq_next; | ||
262 | } | ||
263 | if (foundmatch) { | ||
264 | break; | ||
265 | } | 265 | } |
266 | } | 266 | } |
267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | 267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
268 | } | 268 | } |
269 | rcu_read_unlock(); | ||
269 | } | 270 | } |
270 | 271 | ||
271 | struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, | 272 | static void sn_irq_info_free(struct rcu_head *head) |
272 | nasid_t nasid, int slice) | ||
273 | { | 273 | { |
274 | struct sn_irq_info *sn_irq_info; | 274 | struct sn_irq_info *sn_irq_info; |
275 | int status; | ||
276 | |||
277 | sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); | ||
278 | if (sn_irq_info == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); | ||
282 | |||
283 | status = | ||
284 | sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, | ||
285 | nasid, slice); | ||
286 | |||
287 | if (status) { | ||
288 | kfree(sn_irq_info); | ||
289 | return NULL; | ||
290 | } else { | ||
291 | return sn_irq_info; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | void sn_irq_free(struct sn_irq_info *sn_irq_info) | ||
296 | { | ||
297 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
298 | nasid_t local_nasid = NASID_GET(bridge); | ||
299 | int local_widget; | ||
300 | |||
301 | if (local_nasid & 1) /* tio check */ | ||
302 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
303 | else | ||
304 | local_widget = SWIN_WIDGETNUM(bridge); | ||
305 | |||
306 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
307 | 275 | ||
276 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); | ||
308 | kfree(sn_irq_info); | 277 | kfree(sn_irq_info); |
309 | } | 278 | } |
310 | 279 | ||
@@ -314,30 +283,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
314 | int slice = sn_irq_info->irq_slice; | 283 | int slice = sn_irq_info->irq_slice; |
315 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 284 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
316 | 285 | ||
286 | pci_dev_get(pci_dev); | ||
317 | sn_irq_info->irq_cpuid = cpu; | 287 | sn_irq_info->irq_cpuid = cpu; |
318 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | 288 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
319 | 289 | ||
320 | /* link it into the sn_irq[irq] list */ | 290 | /* link it into the sn_irq[irq] list */ |
321 | sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; | 291 | spin_lock(&sn_irq_info_lock); |
322 | sn_irq[sn_irq_info->irq_irq] = sn_irq_info; | 292 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
293 | spin_unlock(&sn_irq_info_lock); | ||
323 | 294 | ||
324 | (void)register_intr_pda(sn_irq_info); | 295 | (void)register_intr_pda(sn_irq_info); |
325 | } | 296 | } |
326 | 297 | ||
298 | void sn_irq_unfixup(struct pci_dev *pci_dev) | ||
299 | { | ||
300 | struct sn_irq_info *sn_irq_info; | ||
301 | |||
302 | /* Only cleanup IRQ stuff if this device has a host bus context */ | ||
303 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) | ||
304 | return; | ||
305 | |||
306 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; | ||
307 | if (!sn_irq_info || !sn_irq_info->irq_irq) { | ||
308 | kfree(sn_irq_info); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | unregister_intr_pda(sn_irq_info); | ||
313 | spin_lock(&sn_irq_info_lock); | ||
314 | list_del_rcu(&sn_irq_info->list); | ||
315 | spin_unlock(&sn_irq_info_lock); | ||
316 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
317 | pci_dev_put(pci_dev); | ||
318 | } | ||
319 | |||
327 | static void force_interrupt(int irq) | 320 | static void force_interrupt(int irq) |
328 | { | 321 | { |
329 | struct sn_irq_info *sn_irq_info; | 322 | struct sn_irq_info *sn_irq_info; |
330 | 323 | ||
331 | if (!sn_ioif_inited) | 324 | if (!sn_ioif_inited) |
332 | return; | 325 | return; |
333 | sn_irq_info = sn_irq[irq]; | 326 | |
334 | while (sn_irq_info) { | 327 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { | ||
335 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 329 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
336 | (sn_irq_info->irq_bridge != NULL)) { | 330 | (sn_irq_info->irq_bridge != NULL)) |
337 | pcibr_force_interrupt(sn_irq_info); | 331 | pcibr_force_interrupt(sn_irq_info); |
338 | } | ||
339 | sn_irq_info = sn_irq_info->irq_next; | ||
340 | } | 332 | } |
333 | rcu_read_unlock(); | ||
341 | } | 334 | } |
342 | 335 | ||
343 | /* | 336 | /* |
@@ -402,19 +395,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
402 | 395 | ||
403 | void sn_lb_int_war_check(void) | 396 | void sn_lb_int_war_check(void) |
404 | { | 397 | { |
398 | struct sn_irq_info *sn_irq_info; | ||
405 | int i; | 399 | int i; |
406 | 400 | ||
407 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | 401 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
408 | return; | 402 | return; |
403 | |||
404 | rcu_read_lock(); | ||
409 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | 405 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
410 | struct sn_irq_info *sn_irq_info = sn_irq[i]; | 406 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
411 | while (sn_irq_info) { | 407 | /* |
412 | /* Only call for PCI bridges that are fully initialized. */ | 408 | * Only call for PCI bridges that are fully |
409 | * initialized. | ||
410 | */ | ||
413 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 411 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
414 | (sn_irq_info->irq_bridge != NULL)) { | 412 | (sn_irq_info->irq_bridge != NULL)) |
415 | sn_check_intr(i, sn_irq_info); | 413 | sn_check_intr(i, sn_irq_info); |
416 | } | ||
417 | sn_irq_info = sn_irq_info->irq_next; | ||
418 | } | 414 | } |
419 | } | 415 | } |
416 | rcu_read_unlock(); | ||
417 | } | ||
418 | |||
419 | void sn_irq_lh_init(void) | ||
420 | { | ||
421 | int i; | ||
422 | |||
423 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); | ||
424 | if (!sn_irq_lh) | ||
425 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); | ||
426 | |||
427 | for (i = 0; i < NR_IRQS; i++) { | ||
428 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
429 | if (!sn_irq_lh[i]) | ||
430 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); | ||
431 | |||
432 | INIT_LIST_HEAD(sn_irq_lh[i]); | ||
433 | } | ||
434 | |||
420 | } | 435 | } |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 22e10d282c7f..7c7fe441d623 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -270,7 +270,7 @@ void __init sn_setup(char **cmdline_p) | |||
270 | { | 270 | { |
271 | long status, ticks_per_sec, drift; | 271 | long status, ticks_per_sec, drift; |
272 | int pxm; | 272 | int pxm; |
273 | int major = sn_sal_rev_major(), minor = sn_sal_rev_minor(); | 273 | u32 version = sn_sal_rev(); |
274 | extern void sn_cpu_init(void); | 274 | extern void sn_cpu_init(void); |
275 | 275 | ||
276 | ia64_sn_plat_set_error_handling_features(); | 276 | ia64_sn_plat_set_error_handling_features(); |
@@ -308,22 +308,21 @@ void __init sn_setup(char **cmdline_p) | |||
308 | * support here so we don't have to listen to failed keyboard probe | 308 | * support here so we don't have to listen to failed keyboard probe |
309 | * messages. | 309 | * messages. |
310 | */ | 310 | */ |
311 | if ((major < 2 || (major == 2 && minor <= 9)) && | 311 | if (version <= 0x0209 && acpi_kbd_controller_present) { |
312 | acpi_kbd_controller_present) { | ||
313 | printk(KERN_INFO "Disabling legacy keyboard support as prom " | 312 | printk(KERN_INFO "Disabling legacy keyboard support as prom " |
314 | "is too old and doesn't provide FADT\n"); | 313 | "is too old and doesn't provide FADT\n"); |
315 | acpi_kbd_controller_present = 0; | 314 | acpi_kbd_controller_present = 0; |
316 | } | 315 | } |
317 | 316 | ||
318 | printk("SGI SAL version %x.%02x\n", major, minor); | 317 | printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); |
319 | 318 | ||
320 | /* | 319 | /* |
321 | * Confirm the SAL we're running on is recent enough... | 320 | * Confirm the SAL we're running on is recent enough... |
322 | */ | 321 | */ |
323 | if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR && | 322 | if (version < SN_SAL_MIN_VERSION) { |
324 | minor < SN_SAL_MIN_MINOR)) { | ||
325 | printk(KERN_ERR "This kernel needs SGI SAL version >= " | 323 | printk(KERN_ERR "This kernel needs SGI SAL version >= " |
326 | "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR); | 324 | "%x.%02x\n", SN_SAL_MIN_VERSION >> 8, |
325 | SN_SAL_MIN_VERSION & 0x00FF); | ||
327 | panic("PROM version too old\n"); | 326 | panic("PROM version too old\n"); |
328 | } | 327 | } |
329 | 328 | ||
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 8716f4d5314b..254fe15c064b 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/version.h> | ||
12 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
13 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
14 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
15 | #include <linux/device.h> | 14 | #include <linux/device.h> |
16 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <asm/system.h> | ||
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/sn/sn_sal.h> | 18 | #include <asm/sn/sn_sal.h> |
19 | #include <asm/sn/addrs.h> | 19 | #include <asm/sn/addrs.h> |
@@ -481,6 +481,9 @@ static int __init tiocx_init(void) | |||
481 | cnodeid_t cnodeid; | 481 | cnodeid_t cnodeid; |
482 | int found_tiocx_device = 0; | 482 | int found_tiocx_device = 0; |
483 | 483 | ||
484 | if (!ia64_platform_is("sn2")) | ||
485 | return -ENODEV; | ||
486 | |||
484 | bus_register(&tiocx_bus_type); | 487 | bus_register(&tiocx_bus_type); |
485 | 488 | ||
486 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { | 489 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 177ddb748ebe..d580adcad927 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/cache.h> | 53 | #include <linux/cache.h> |
54 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/delay.h> | ||
56 | #include <asm/sn/intr.h> | 57 | #include <asm/sn/intr.h> |
57 | #include <asm/sn/sn_sal.h> | 58 | #include <asm/sn/sn_sal.h> |
58 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
@@ -308,8 +309,7 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
308 | "partition %d\n", XPC_PARTID(part)); | 309 | "partition %d\n", XPC_PARTID(part)); |
309 | 310 | ||
310 | /* wait a 1/4 of a second or so */ | 311 | /* wait a 1/4 of a second or so */ |
311 | set_current_state(TASK_INTERRUPTIBLE); | 312 | msleep_interruptible(250); |
312 | (void) schedule_timeout(0.25 * HZ); | ||
313 | 313 | ||
314 | if (part->act_state == XPC_P_DEACTIVATING) { | 314 | if (part->act_state == XPC_P_DEACTIVATING) { |
315 | return part->reason; | 315 | return part->reason; |
@@ -841,9 +841,7 @@ xpc_do_exit(void) | |||
841 | down(&xpc_discovery_exited); | 841 | down(&xpc_discovery_exited); |
842 | 842 | ||
843 | 843 | ||
844 | set_current_state(TASK_INTERRUPTIBLE); | 844 | msleep_interruptible(300); |
845 | schedule_timeout(0.3 * HZ); | ||
846 | set_current_state(TASK_RUNNING); | ||
847 | 845 | ||
848 | 846 | ||
849 | /* wait for all partitions to become inactive */ | 847 | /* wait for all partitions to become inactive */ |
@@ -860,12 +858,8 @@ xpc_do_exit(void) | |||
860 | } | 858 | } |
861 | } | 859 | } |
862 | 860 | ||
863 | if (active_part_count) { | 861 | if (active_part_count) |
864 | set_current_state(TASK_INTERRUPTIBLE); | 862 | msleep_interruptible(300); |
865 | schedule_timeout(0.3 * HZ); | ||
866 | set_current_state(TASK_RUNNING); | ||
867 | } | ||
868 | |||
869 | } while (active_part_count > 0); | 863 | } while (active_part_count > 0); |
870 | 864 | ||
871 | 865 | ||
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 5da9bdbde7cb..a2f7a88aefbb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -11,9 +11,10 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
14 | #include <asm/sn/sn_sal.h> | 14 | #include <asm/sn/pcibr_provider.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
17 | #include <asm/sn/sn_sal.h> | ||
17 | 18 | ||
18 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 19 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
19 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | 20 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 0e47bce85f2d..d1647b863e61 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c | |||
@@ -8,9 +8,9 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <asm/sn/sn_sal.h> | 10 | #include <asm/sn/sn_sal.h> |
11 | #include <asm/sn/pcibr_provider.h> | ||
11 | #include <asm/sn/pcibus_provider_defs.h> | 12 | #include <asm/sn/pcibus_provider_defs.h> |
12 | #include <asm/sn/pcidev.h> | 13 | #include <asm/sn/pcidev.h> |
13 | #include "pci/pcibr_provider.h" | ||
14 | 14 | ||
15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | 15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ |
16 | 16 | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 64af2b2c1787..b058dc2a0b9d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -8,18 +8,17 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <asm/sn/sn_sal.h> | 11 | #include <asm/sn/addrs.h> |
12 | #include <asm/sn/geo.h> | 12 | #include <asm/sn/geo.h> |
13 | #include "xtalk/xwidgetdev.h" | 13 | #include <asm/sn/pcibr_provider.h> |
14 | #include "xtalk/hubdev.h" | ||
15 | #include <asm/sn/pcibus_provider_defs.h> | 14 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/pcidev.h> | 15 | #include <asm/sn/pcidev.h> |
17 | #include "pci/tiocp.h" | 16 | #include <asm/sn/pic.h> |
18 | #include "pci/pic.h" | 17 | #include <asm/sn/sn_sal.h> |
19 | #include "pci/pcibr_provider.h" | 18 | #include <asm/sn/tiocp.h> |
20 | #include "pci/tiocp.h" | ||
21 | #include "tio.h" | 19 | #include "tio.h" |
22 | #include <asm/sn/addrs.h> | 20 | #include "xtalk/xwidgetdev.h" |
21 | #include "xtalk/hubdev.h" | ||
23 | 22 | ||
24 | extern int sn_ioif_inited; | 23 | extern int sn_ioif_inited; |
25 | 24 | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 3893999d23d8..9813da56d311 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -6,18 +6,51 @@ | |||
6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/types.h> | ||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/sn/sn_sal.h> | 12 | #include <asm/sn/addrs.h> |
13 | #include "xtalk/xwidgetdev.h" | ||
14 | #include <asm/sn/geo.h> | 13 | #include <asm/sn/geo.h> |
15 | #include "xtalk/hubdev.h" | 14 | #include <asm/sn/pcibr_provider.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
18 | #include "pci/pcibr_provider.h" | 17 | #include <asm/sn/sn_sal.h> |
19 | #include <asm/sn/addrs.h> | 18 | #include "xtalk/xwidgetdev.h" |
19 | #include "xtalk/hubdev.h" | ||
20 | |||
21 | int | ||
22 | sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) | ||
23 | { | ||
24 | struct ia64_sal_retval ret_stuff; | ||
25 | uint64_t busnum; | ||
26 | |||
27 | ret_stuff.status = 0; | ||
28 | ret_stuff.v0 = 0; | ||
20 | 29 | ||
30 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
31 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, | ||
32 | (u64) device, (u64) resp, 0, 0, 0, 0); | ||
33 | |||
34 | return (int)ret_stuff.v0; | ||
35 | } | ||
36 | |||
37 | int | ||
38 | sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | ||
39 | void *resp) | ||
40 | { | ||
41 | struct ia64_sal_retval ret_stuff; | ||
42 | uint64_t busnum; | ||
43 | |||
44 | ret_stuff.status = 0; | ||
45 | ret_stuff.v0 = 0; | ||
46 | |||
47 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
48 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, | ||
49 | (u64) busnum, (u64) device, (u64) action, | ||
50 | (u64) resp, 0, 0, 0); | ||
51 | |||
52 | return (int)ret_stuff.v0; | ||
53 | } | ||
21 | 54 | ||
22 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) | 55 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) |
23 | { | 56 | { |
@@ -188,3 +221,6 @@ pcibr_init_provider(void) | |||
188 | 221 | ||
189 | return 0; | 222 | return 0; |
190 | } | 223 | } |
224 | |||
225 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); | ||
226 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 865c11c3b50a..21426d02fbe6 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c | |||
@@ -6,13 +6,13 @@ | |||
6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/types.h> | ||
11 | #include <asm/sn/pcibr_provider.h> | ||
11 | #include <asm/sn/pcibus_provider_defs.h> | 12 | #include <asm/sn/pcibus_provider_defs.h> |
12 | #include <asm/sn/pcidev.h> | 13 | #include <asm/sn/pcidev.h> |
13 | #include "pci/tiocp.h" | 14 | #include <asm/sn/pic.h> |
14 | #include "pci/pic.h" | 15 | #include <asm/sn/tiocp.h> |
15 | #include "pci/pcibr_provider.h" | ||
16 | 16 | ||
17 | union br_ptr { | 17 | union br_ptr { |
18 | struct tiocp tio; | 18 | struct tiocp tio; |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 05aa8c2fe9bb..51cc4e63092c 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -589,8 +589,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) | |||
589 | 589 | ||
590 | /* sanity check prom rev */ | 590 | /* sanity check prom rev */ |
591 | 591 | ||
592 | if (sn_sal_rev_major() < 4 || | 592 | if (sn_sal_rev() < 0x0406) { |
593 | (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) { | ||
594 | printk | 593 | printk |
595 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | 594 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " |
596 | "for tioca support\n", __FUNCTION__); | 595 | "for tioca support\n", __FUNCTION__); |