diff options
31 files changed, 1255 insertions, 544 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b2e2f6509eb0..e1fb68ddec26 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
| @@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o | |||
| 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
| 18 | obj-$(CONFIG_MODULES) += module.o | 18 | obj-$(CONFIG_MODULES) += module.o |
| 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o | 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o |
| 20 | obj-$(CONFIG_NUMA) += numa.o | ||
| 20 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o | 21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o |
| 21 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | 22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o |
| 22 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 23 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index cda06f88c66e..542256e98e60 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -640,9 +640,11 @@ acpi_boot_init (void) | |||
| 640 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) | 640 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) |
| 641 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; | 641 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; |
| 642 | } | 642 | } |
| 643 | build_cpu_to_node_map(); | ||
| 644 | # endif | 643 | # endif |
| 645 | #endif | 644 | #endif |
| 645 | #ifdef CONFIG_ACPI_NUMA | ||
| 646 | build_cpu_to_node_map(); | ||
| 647 | #endif | ||
| 646 | /* Make boot-up look pretty */ | 648 | /* Make boot-up look pretty */ |
| 647 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); | 649 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); |
| 648 | return 0; | 650 | return 0; |
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c new file mode 100644 index 000000000000..a68ce6678092 --- /dev/null +++ b/arch/ia64/kernel/numa.c | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, write to the Free Software | ||
| 14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 15 | * | ||
| 16 | * ia64 kernel NUMA specific stuff | ||
| 17 | * | ||
| 18 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> | ||
| 19 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
| 20 | * Jesse Barnes <jbarnes@sgi.com> | ||
| 21 | */ | ||
| 22 | #include <linux/config.h> | ||
| 23 | #include <linux/topology.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <asm/processor.h> | ||
| 26 | #include <asm/smp.h> | ||
| 27 | |||
| 28 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
| 29 | EXPORT_SYMBOL(cpu_to_node_map); | ||
| 30 | |||
| 31 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
| 32 | |||
| 33 | /** | ||
| 34 | * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays | ||
| 35 | * | ||
| 36 | * Build cpu to node mapping and initialize the per node cpu masks using | ||
| 37 | * info from the node_cpuid array handed to us by ACPI. | ||
| 38 | */ | ||
| 39 | void __init build_cpu_to_node_map(void) | ||
| 40 | { | ||
| 41 | int cpu, i, node; | ||
| 42 | |||
| 43 | for(node=0; node < MAX_NUMNODES; node++) | ||
| 44 | cpus_clear(node_to_cpu_mask[node]); | ||
| 45 | |||
| 46 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
| 47 | node = -1; | ||
| 48 | for (i = 0; i < NR_CPUS; ++i) | ||
| 49 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
| 50 | node = node_cpuid[i].nid; | ||
| 51 | break; | ||
| 52 | } | ||
| 53 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
| 54 | if (node >= 0) | ||
| 55 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
| 56 | } | ||
| 57 | } | ||
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index edd9f07860b2..b8a0a7d257a9 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
| @@ -143,6 +143,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | |||
| 143 | 143 | ||
| 144 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); | 144 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); |
| 145 | psr->mfh = 0; /* drop signal handler's fph contents... */ | 145 | psr->mfh = 0; /* drop signal handler's fph contents... */ |
| 146 | preempt_disable(); | ||
| 146 | if (psr->dfh) | 147 | if (psr->dfh) |
| 147 | ia64_drop_fpu(current); | 148 | ia64_drop_fpu(current); |
| 148 | else { | 149 | else { |
| @@ -150,6 +151,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | |||
| 150 | __ia64_load_fpu(current->thread.fph); | 151 | __ia64_load_fpu(current->thread.fph); |
| 151 | ia64_set_local_fpu_owner(current); | 152 | ia64_set_local_fpu_owner(current); |
| 152 | } | 153 | } |
| 154 | preempt_enable(); | ||
| 153 | } | 155 | } |
| 154 | return err; | 156 | return err; |
| 155 | } | 157 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 623b0a546709..7d72c0d872b3 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -525,47 +525,6 @@ smp_build_cpu_map (void) | |||
| 525 | } | 525 | } |
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | #ifdef CONFIG_NUMA | ||
| 529 | |||
| 530 | /* on which node is each logical CPU (one cacheline even for 64 CPUs) */ | ||
| 531 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
| 532 | EXPORT_SYMBOL(cpu_to_node_map); | ||
| 533 | /* which logical CPUs are on which nodes */ | ||
| 534 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
| 535 | |||
| 536 | /* | ||
| 537 | * Build cpu to node mapping and initialize the per node cpu masks. | ||
| 538 | */ | ||
| 539 | void __init | ||
| 540 | build_cpu_to_node_map (void) | ||
| 541 | { | ||
| 542 | int cpu, i, node; | ||
| 543 | |||
| 544 | for(node=0; node<MAX_NUMNODES; node++) | ||
| 545 | cpus_clear(node_to_cpu_mask[node]); | ||
| 546 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
| 547 | /* | ||
| 548 | * All Itanium NUMA platforms I know use ACPI, so maybe we | ||
| 549 | * can drop this ifdef completely. [EF] | ||
| 550 | */ | ||
| 551 | #ifdef CONFIG_ACPI_NUMA | ||
| 552 | node = -1; | ||
| 553 | for (i = 0; i < NR_CPUS; ++i) | ||
| 554 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
| 555 | node = node_cpuid[i].nid; | ||
| 556 | break; | ||
| 557 | } | ||
| 558 | #else | ||
| 559 | # error Fixme: Dunno how to build CPU-to-node map. | ||
| 560 | #endif | ||
| 561 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
| 562 | if (node >= 0) | ||
| 563 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
| 564 | } | ||
| 565 | } | ||
| 566 | |||
| 567 | #endif /* CONFIG_NUMA */ | ||
| 568 | |||
| 569 | /* | 528 | /* |
| 570 | * Cycle through the APs sending Wakeup IPIs to boot each. | 529 | * Cycle through the APs sending Wakeup IPIs to boot each. |
| 571 | */ | 530 | */ |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e7e520d90f03..4440c8343fa4 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
| @@ -90,14 +90,16 @@ die (const char *str, struct pt_regs *regs, long err) | |||
| 90 | .lock_owner_depth = 0 | 90 | .lock_owner_depth = 0 |
| 91 | }; | 91 | }; |
| 92 | static int die_counter; | 92 | static int die_counter; |
| 93 | int cpu = get_cpu(); | ||
| 93 | 94 | ||
| 94 | if (die.lock_owner != smp_processor_id()) { | 95 | if (die.lock_owner != cpu) { |
| 95 | console_verbose(); | 96 | console_verbose(); |
| 96 | spin_lock_irq(&die.lock); | 97 | spin_lock_irq(&die.lock); |
| 97 | die.lock_owner = smp_processor_id(); | 98 | die.lock_owner = cpu; |
| 98 | die.lock_owner_depth = 0; | 99 | die.lock_owner_depth = 0; |
| 99 | bust_spinlocks(1); | 100 | bust_spinlocks(1); |
| 100 | } | 101 | } |
| 102 | put_cpu(); | ||
| 101 | 103 | ||
| 102 | if (++die.lock_owner_depth < 3) { | 104 | if (++die.lock_owner_depth < 3) { |
| 103 | printk("%s[%d]: %s %ld [%d]\n", | 105 | printk("%s[%d]: %s %ld [%d]\n", |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index f3fd528ead3b..b5c90e548195 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
| @@ -44,150 +44,7 @@ struct early_node_data { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | 46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; |
| 47 | 47 | static nodemask_t memory_less_mask __initdata; | |
| 48 | /** | ||
| 49 | * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node | ||
| 50 | * | ||
| 51 | * This function will move nodes with only CPUs (no memory) | ||
| 52 | * to a node with memory which is at the minimum numa_slit distance. | ||
| 53 | * Any reassigments will result in the compression of the nodes | ||
| 54 | * and renumbering the nid values where appropriate. | ||
| 55 | * The static declarations below are to avoid large stack size which | ||
| 56 | * makes the code not re-entrant. | ||
| 57 | */ | ||
| 58 | static void __init reassign_cpu_only_nodes(void) | ||
| 59 | { | ||
| 60 | struct node_memblk_s *p; | ||
| 61 | int i, j, k, nnode, nid, cpu, cpunid, pxm; | ||
| 62 | u8 cslit, slit; | ||
| 63 | static DECLARE_BITMAP(nodes_with_mem, MAX_NUMNODES) __initdata; | ||
| 64 | static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata; | ||
| 65 | static int node_flip[MAX_NUMNODES] __initdata; | ||
| 66 | static int old_nid_map[NR_CPUS] __initdata; | ||
| 67 | |||
| 68 | for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) | ||
| 69 | if (!test_bit(p->nid, (void *) nodes_with_mem)) { | ||
| 70 | set_bit(p->nid, (void *) nodes_with_mem); | ||
| 71 | nnode++; | ||
| 72 | } | ||
| 73 | |||
| 74 | /* | ||
| 75 | * All nids with memory. | ||
| 76 | */ | ||
| 77 | if (nnode == num_online_nodes()) | ||
| 78 | return; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Change nids and attempt to migrate CPU-only nodes | ||
| 82 | * to the best numa_slit (closest neighbor) possible. | ||
| 83 | * For reassigned CPU nodes a nid can't be arrived at | ||
| 84 | * until after this loop because the target nid's new | ||
| 85 | * identity might not have been established yet. So | ||
| 86 | * new nid values are fabricated above num_online_nodes() and | ||
| 87 | * mapped back later to their true value. | ||
| 88 | */ | ||
| 89 | /* MCD - This code is a bit complicated, but may be unnecessary now. | ||
| 90 | * We can now handle much more interesting node-numbering. | ||
| 91 | * The old requirement that 0 <= nid <= numnodes <= MAX_NUMNODES | ||
| 92 | * and that there be no holes in the numbering 0..numnodes | ||
| 93 | * has become simply 0 <= nid <= MAX_NUMNODES. | ||
| 94 | */ | ||
| 95 | nid = 0; | ||
| 96 | for_each_online_node(i) { | ||
| 97 | if (test_bit(i, (void *) nodes_with_mem)) { | ||
| 98 | /* | ||
| 99 | * Save original nid value for numa_slit | ||
| 100 | * fixup and node_cpuid reassignments. | ||
| 101 | */ | ||
| 102 | node_flip[nid] = i; | ||
| 103 | |||
| 104 | if (i == nid) { | ||
| 105 | nid++; | ||
| 106 | continue; | ||
| 107 | } | ||
| 108 | |||
| 109 | for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) | ||
| 110 | if (p->nid == i) | ||
| 111 | p->nid = nid; | ||
| 112 | |||
| 113 | cpunid = nid; | ||
| 114 | nid++; | ||
| 115 | } else | ||
| 116 | cpunid = MAX_NUMNODES; | ||
| 117 | |||
| 118 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
| 119 | if (node_cpuid[cpu].nid == i) { | ||
| 120 | /* | ||
| 121 | * For nodes not being reassigned just | ||
| 122 | * fix the cpu's nid and reverse pxm map | ||
| 123 | */ | ||
| 124 | if (cpunid < MAX_NUMNODES) { | ||
| 125 | pxm = nid_to_pxm_map[i]; | ||
| 126 | pxm_to_nid_map[pxm] = | ||
| 127 | node_cpuid[cpu].nid = cpunid; | ||
| 128 | continue; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * For nodes being reassigned, find best node by | ||
| 133 | * numa_slit information and then make a temporary | ||
| 134 | * nid value based on current nid and num_online_nodes(). | ||
| 135 | */ | ||
| 136 | slit = 0xff; | ||
| 137 | k = 2*num_online_nodes(); | ||
| 138 | for_each_online_node(j) { | ||
| 139 | if (i == j) | ||
| 140 | continue; | ||
| 141 | else if (test_bit(j, (void *) nodes_with_mem)) { | ||
| 142 | cslit = numa_slit[i * num_online_nodes() + j]; | ||
| 143 | if (cslit < slit) { | ||
| 144 | k = num_online_nodes() + j; | ||
| 145 | slit = cslit; | ||
| 146 | } | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | /* save old nid map so we can update the pxm */ | ||
| 151 | old_nid_map[cpu] = node_cpuid[cpu].nid; | ||
| 152 | node_cpuid[cpu].nid = k; | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | /* | ||
| 157 | * Fixup temporary nid values for CPU-only nodes. | ||
| 158 | */ | ||
| 159 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
| 160 | if (node_cpuid[cpu].nid == (2*num_online_nodes())) { | ||
| 161 | pxm = nid_to_pxm_map[old_nid_map[cpu]]; | ||
| 162 | pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1; | ||
| 163 | } else { | ||
| 164 | for (i = 0; i < nnode; i++) { | ||
| 165 | if (node_flip[i] != (node_cpuid[cpu].nid - num_online_nodes())) | ||
| 166 | continue; | ||
| 167 | |||
| 168 | pxm = nid_to_pxm_map[old_nid_map[cpu]]; | ||
| 169 | pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i; | ||
| 170 | break; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * Fix numa_slit by compressing from larger | ||
| 176 | * nid array to reduced nid array. | ||
| 177 | */ | ||
| 178 | for (i = 0; i < nnode; i++) | ||
| 179 | for (j = 0; j < nnode; j++) | ||
| 180 | numa_slit_fix[i * nnode + j] = | ||
| 181 | numa_slit[node_flip[i] * num_online_nodes() + node_flip[j]]; | ||
| 182 | |||
| 183 | memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit)); | ||
| 184 | |||
| 185 | nodes_clear(node_online_map); | ||
| 186 | for (i = 0; i < nnode; i++) | ||
| 187 | node_set_online(i); | ||
| 188 | |||
| 189 | return; | ||
| 190 | } | ||
| 191 | 48 | ||
| 192 | /* | 49 | /* |
| 193 | * To prevent cache aliasing effects, align per-node structures so that they | 50 | * To prevent cache aliasing effects, align per-node structures so that they |
| @@ -233,44 +90,101 @@ static int __init build_node_maps(unsigned long start, unsigned long len, | |||
| 233 | } | 90 | } |
| 234 | 91 | ||
| 235 | /** | 92 | /** |
| 236 | * early_nr_phys_cpus_node - return number of physical cpus on a given node | 93 | * early_nr_cpus_node - return number of cpus on a given node |
| 237 | * @node: node to check | 94 | * @node: node to check |
| 238 | * | 95 | * |
| 239 | * Count the number of physical cpus on @node. These are cpus that actually | 96 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because |
| 240 | * exist. We can't use nr_cpus_node() yet because | ||
| 241 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 97 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
| 242 | * called yet. | 98 | * called yet. Note that node 0 will also count all non-existent cpus. |
| 243 | */ | 99 | */ |
| 244 | static int early_nr_phys_cpus_node(int node) | 100 | static int __init early_nr_cpus_node(int node) |
| 245 | { | 101 | { |
| 246 | int cpu, n = 0; | 102 | int cpu, n = 0; |
| 247 | 103 | ||
| 248 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 104 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
| 249 | if (node == node_cpuid[cpu].nid) | 105 | if (node == node_cpuid[cpu].nid) |
| 250 | if ((cpu == 0) || node_cpuid[cpu].phys_id) | 106 | n++; |
| 251 | n++; | ||
| 252 | 107 | ||
| 253 | return n; | 108 | return n; |
| 254 | } | 109 | } |
| 255 | 110 | ||
| 111 | /** | ||
| 112 | * compute_pernodesize - compute size of pernode data | ||
| 113 | * @node: the node id. | ||
| 114 | */ | ||
| 115 | static unsigned long __init compute_pernodesize(int node) | ||
| 116 | { | ||
| 117 | unsigned long pernodesize = 0, cpus; | ||
| 118 | |||
| 119 | cpus = early_nr_cpus_node(node); | ||
| 120 | pernodesize += PERCPU_PAGE_SIZE * cpus; | ||
| 121 | pernodesize += node * L1_CACHE_BYTES; | ||
| 122 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 123 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
| 124 | pernodesize = PAGE_ALIGN(pernodesize); | ||
| 125 | return pernodesize; | ||
| 126 | } | ||
| 256 | 127 | ||
| 257 | /** | 128 | /** |
| 258 | * early_nr_cpus_node - return number of cpus on a given node | 129 | * per_cpu_node_setup - setup per-cpu areas on each node |
| 259 | * @node: node to check | 130 | * @cpu_data: per-cpu area on this node |
| 131 | * @node: node to setup | ||
| 260 | * | 132 | * |
| 261 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because | 133 | * Copy the static per-cpu data into the region we just set aside and then |
| 262 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 134 | * setup __per_cpu_offset for each CPU on this node. Return a pointer to |
| 263 | * called yet. Note that node 0 will also count all non-existent cpus. | 135 | * the end of the area. |
| 264 | */ | 136 | */ |
| 265 | static int early_nr_cpus_node(int node) | 137 | static void *per_cpu_node_setup(void *cpu_data, int node) |
| 266 | { | 138 | { |
| 267 | int cpu, n = 0; | 139 | #ifdef CONFIG_SMP |
| 140 | int cpu; | ||
| 268 | 141 | ||
| 269 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 142 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 270 | if (node == node_cpuid[cpu].nid) | 143 | if (node == node_cpuid[cpu].nid) { |
| 271 | n++; | 144 | memcpy(__va(cpu_data), __phys_per_cpu_start, |
| 145 | __per_cpu_end - __per_cpu_start); | ||
| 146 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
| 147 | __per_cpu_start; | ||
| 148 | cpu_data += PERCPU_PAGE_SIZE; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | #endif | ||
| 152 | return cpu_data; | ||
| 153 | } | ||
| 272 | 154 | ||
| 273 | return n; | 155 | /** |
| 156 | * fill_pernode - initialize pernode data. | ||
| 157 | * @node: the node id. | ||
| 158 | * @pernode: physical address of pernode data | ||
| 159 | * @pernodesize: size of the pernode data | ||
| 160 | */ | ||
| 161 | static void __init fill_pernode(int node, unsigned long pernode, | ||
| 162 | unsigned long pernodesize) | ||
| 163 | { | ||
| 164 | void *cpu_data; | ||
| 165 | int cpus = early_nr_cpus_node(node); | ||
| 166 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | ||
| 167 | |||
| 168 | mem_data[node].pernode_addr = pernode; | ||
| 169 | mem_data[node].pernode_size = pernodesize; | ||
| 170 | memset(__va(pernode), 0, pernodesize); | ||
| 171 | |||
| 172 | cpu_data = (void *)pernode; | ||
| 173 | pernode += PERCPU_PAGE_SIZE * cpus; | ||
| 174 | pernode += node * L1_CACHE_BYTES; | ||
| 175 | |||
| 176 | mem_data[node].pgdat = __va(pernode); | ||
| 177 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 178 | |||
| 179 | mem_data[node].node_data = __va(pernode); | ||
| 180 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
| 181 | |||
| 182 | mem_data[node].pgdat->bdata = bdp; | ||
| 183 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 184 | |||
| 185 | cpu_data = per_cpu_node_setup(cpu_data, node); | ||
| 186 | |||
| 187 | return; | ||
| 274 | } | 188 | } |
| 275 | 189 | ||
| 276 | /** | 190 | /** |
| @@ -304,9 +218,8 @@ static int early_nr_cpus_node(int node) | |||
| 304 | static int __init find_pernode_space(unsigned long start, unsigned long len, | 218 | static int __init find_pernode_space(unsigned long start, unsigned long len, |
| 305 | int node) | 219 | int node) |
| 306 | { | 220 | { |
| 307 | unsigned long epfn, cpu, cpus, phys_cpus; | 221 | unsigned long epfn; |
| 308 | unsigned long pernodesize = 0, pernode, pages, mapsize; | 222 | unsigned long pernodesize = 0, pernode, pages, mapsize; |
| 309 | void *cpu_data; | ||
| 310 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | 223 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
| 311 | 224 | ||
| 312 | epfn = (start + len) >> PAGE_SHIFT; | 225 | epfn = (start + len) >> PAGE_SHIFT; |
| @@ -329,49 +242,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, | |||
| 329 | * Calculate total size needed, incl. what's necessary | 242 | * Calculate total size needed, incl. what's necessary |
| 330 | * for good alignment and alias prevention. | 243 | * for good alignment and alias prevention. |
| 331 | */ | 244 | */ |
| 332 | cpus = early_nr_cpus_node(node); | 245 | pernodesize = compute_pernodesize(node); |
| 333 | phys_cpus = early_nr_phys_cpus_node(node); | ||
| 334 | pernodesize += PERCPU_PAGE_SIZE * cpus; | ||
| 335 | pernodesize += node * L1_CACHE_BYTES; | ||
| 336 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 337 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
| 338 | pernodesize = PAGE_ALIGN(pernodesize); | ||
| 339 | pernode = NODEDATA_ALIGN(start, node); | 246 | pernode = NODEDATA_ALIGN(start, node); |
| 340 | 247 | ||
| 341 | /* Is this range big enough for what we want to store here? */ | 248 | /* Is this range big enough for what we want to store here? */ |
| 342 | if (start + len > (pernode + pernodesize + mapsize)) { | 249 | if (start + len > (pernode + pernodesize + mapsize)) |
| 343 | mem_data[node].pernode_addr = pernode; | 250 | fill_pernode(node, pernode, pernodesize); |
| 344 | mem_data[node].pernode_size = pernodesize; | ||
| 345 | memset(__va(pernode), 0, pernodesize); | ||
| 346 | |||
| 347 | cpu_data = (void *)pernode; | ||
| 348 | pernode += PERCPU_PAGE_SIZE * cpus; | ||
| 349 | pernode += node * L1_CACHE_BYTES; | ||
| 350 | |||
| 351 | mem_data[node].pgdat = __va(pernode); | ||
| 352 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 353 | |||
| 354 | mem_data[node].node_data = __va(pernode); | ||
| 355 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | ||
| 356 | |||
| 357 | mem_data[node].pgdat->bdata = bdp; | ||
| 358 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | ||
| 359 | |||
| 360 | /* | ||
| 361 | * Copy the static per-cpu data into the region we | ||
| 362 | * just set aside and then setup __per_cpu_offset | ||
| 363 | * for each CPU on this node. | ||
| 364 | */ | ||
| 365 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
| 366 | if (node == node_cpuid[cpu].nid) { | ||
| 367 | memcpy(__va(cpu_data), __phys_per_cpu_start, | ||
| 368 | __per_cpu_end - __per_cpu_start); | ||
| 369 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
| 370 | __per_cpu_start; | ||
| 371 | cpu_data += PERCPU_PAGE_SIZE; | ||
| 372 | } | ||
| 373 | } | ||
| 374 | } | ||
| 375 | 251 | ||
| 376 | return 0; | 252 | return 0; |
| 377 | } | 253 | } |
| @@ -411,6 +287,9 @@ static void __init reserve_pernode_space(void) | |||
| 411 | for_each_online_node(node) { | 287 | for_each_online_node(node) { |
| 412 | pg_data_t *pdp = mem_data[node].pgdat; | 288 | pg_data_t *pdp = mem_data[node].pgdat; |
| 413 | 289 | ||
| 290 | if (node_isset(node, memory_less_mask)) | ||
| 291 | continue; | ||
| 292 | |||
| 414 | bdp = pdp->bdata; | 293 | bdp = pdp->bdata; |
| 415 | 294 | ||
| 416 | /* First the bootmem_map itself */ | 295 | /* First the bootmem_map itself */ |
| @@ -436,8 +315,8 @@ static void __init reserve_pernode_space(void) | |||
| 436 | */ | 315 | */ |
| 437 | static void __init initialize_pernode_data(void) | 316 | static void __init initialize_pernode_data(void) |
| 438 | { | 317 | { |
| 439 | int cpu, node; | ||
| 440 | pg_data_t *pgdat_list[MAX_NUMNODES]; | 318 | pg_data_t *pgdat_list[MAX_NUMNODES]; |
| 319 | int cpu, node; | ||
| 441 | 320 | ||
| 442 | for_each_online_node(node) | 321 | for_each_online_node(node) |
| 443 | pgdat_list[node] = mem_data[node].pgdat; | 322 | pgdat_list[node] = mem_data[node].pgdat; |
| @@ -447,12 +326,99 @@ static void __init initialize_pernode_data(void) | |||
| 447 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | 326 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, |
| 448 | sizeof(pgdat_list)); | 327 | sizeof(pgdat_list)); |
| 449 | } | 328 | } |
| 450 | 329 | #ifdef CONFIG_SMP | |
| 451 | /* Set the node_data pointer for each per-cpu struct */ | 330 | /* Set the node_data pointer for each per-cpu struct */ |
| 452 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 331 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 453 | node = node_cpuid[cpu].nid; | 332 | node = node_cpuid[cpu].nid; |
| 454 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 333 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; |
| 455 | } | 334 | } |
| 335 | #else | ||
| 336 | { | ||
| 337 | struct cpuinfo_ia64 *cpu0_cpu_info; | ||
| 338 | cpu = 0; | ||
| 339 | node = node_cpuid[cpu].nid; | ||
| 340 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | ||
| 341 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | ||
| 342 | cpu0_cpu_info->node_data = mem_data[node].node_data; | ||
| 343 | } | ||
| 344 | #endif /* CONFIG_SMP */ | ||
| 345 | } | ||
| 346 | |||
| 347 | /** | ||
| 348 | * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit | ||
| 349 | * node but fall back to any other node when __alloc_bootmem_node fails | ||
| 350 | * for best. | ||
| 351 | * @nid: node id | ||
| 352 | * @pernodesize: size of this node's pernode data | ||
| 353 | * @align: alignment to use for this node's pernode data | ||
| 354 | */ | ||
| 355 | static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize, | ||
| 356 | unsigned long align) | ||
| 357 | { | ||
| 358 | void *ptr = NULL; | ||
| 359 | u8 best = 0xff; | ||
| 360 | int bestnode = -1, node; | ||
| 361 | |||
| 362 | for_each_online_node(node) { | ||
| 363 | if (node_isset(node, memory_less_mask)) | ||
| 364 | continue; | ||
| 365 | else if (node_distance(nid, node) < best) { | ||
| 366 | best = node_distance(nid, node); | ||
| 367 | bestnode = node; | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, | ||
| 372 | pernodesize, align, __pa(MAX_DMA_ADDRESS)); | ||
| 373 | |||
| 374 | if (!ptr) | ||
| 375 | panic("NO memory for memory less node\n"); | ||
| 376 | return ptr; | ||
| 377 | } | ||
| 378 | |||
| 379 | /** | ||
| 380 | * pgdat_insert - insert the pgdat into global pgdat_list | ||
| 381 | * @pgdat: the pgdat for a node. | ||
| 382 | */ | ||
| 383 | static void __init pgdat_insert(pg_data_t *pgdat) | ||
| 384 | { | ||
| 385 | pg_data_t *prev = NULL, *next; | ||
| 386 | |||
| 387 | for_each_pgdat(next) | ||
| 388 | if (pgdat->node_id < next->node_id) | ||
| 389 | break; | ||
| 390 | else | ||
| 391 | prev = next; | ||
| 392 | |||
| 393 | if (prev) { | ||
| 394 | prev->pgdat_next = pgdat; | ||
| 395 | pgdat->pgdat_next = next; | ||
| 396 | } else { | ||
| 397 | pgdat->pgdat_next = pgdat_list; | ||
| 398 | pgdat_list = pgdat; | ||
| 399 | } | ||
| 400 | |||
| 401 | return; | ||
| 402 | } | ||
| 403 | |||
| 404 | /** | ||
| 405 | * memory_less_nodes - allocate and initialize CPU only nodes pernode | ||
| 406 | * information. | ||
| 407 | */ | ||
| 408 | static void __init memory_less_nodes(void) | ||
| 409 | { | ||
| 410 | unsigned long pernodesize; | ||
| 411 | void *pernode; | ||
| 412 | int node; | ||
| 413 | |||
| 414 | for_each_node_mask(node, memory_less_mask) { | ||
| 415 | pernodesize = compute_pernodesize(node); | ||
| 416 | pernode = memory_less_node_alloc(node, pernodesize, | ||
| 417 | (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024)); | ||
| 418 | fill_pernode(node, __pa(pernode), pernodesize); | ||
| 419 | } | ||
| 420 | |||
| 421 | return; | ||
| 456 | } | 422 | } |
| 457 | 423 | ||
| 458 | /** | 424 | /** |
| @@ -472,16 +438,19 @@ void __init find_memory(void) | |||
| 472 | node_set_online(0); | 438 | node_set_online(0); |
| 473 | } | 439 | } |
| 474 | 440 | ||
| 441 | nodes_or(memory_less_mask, memory_less_mask, node_online_map); | ||
| 475 | min_low_pfn = -1; | 442 | min_low_pfn = -1; |
| 476 | max_low_pfn = 0; | 443 | max_low_pfn = 0; |
| 477 | 444 | ||
| 478 | if (num_online_nodes() > 1) | ||
| 479 | reassign_cpu_only_nodes(); | ||
| 480 | |||
| 481 | /* These actually end up getting called by call_pernode_memory() */ | 445 | /* These actually end up getting called by call_pernode_memory() */ |
| 482 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); | 446 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); |
| 483 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); | 447 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); |
| 484 | 448 | ||
| 449 | for_each_online_node(node) | ||
| 450 | if (mem_data[node].bootmem_data.node_low_pfn) { | ||
| 451 | node_clear(node, memory_less_mask); | ||
| 452 | mem_data[node].min_pfn = ~0UL; | ||
| 453 | } | ||
| 485 | /* | 454 | /* |
| 486 | * Initialize the boot memory maps in reverse order since that's | 455 | * Initialize the boot memory maps in reverse order since that's |
| 487 | * what the bootmem allocator expects | 456 | * what the bootmem allocator expects |
| @@ -492,17 +461,14 @@ void __init find_memory(void) | |||
| 492 | 461 | ||
| 493 | if (!node_online(node)) | 462 | if (!node_online(node)) |
| 494 | continue; | 463 | continue; |
| 464 | else if (node_isset(node, memory_less_mask)) | ||
| 465 | continue; | ||
| 495 | 466 | ||
| 496 | bdp = &mem_data[node].bootmem_data; | 467 | bdp = &mem_data[node].bootmem_data; |
| 497 | pernode = mem_data[node].pernode_addr; | 468 | pernode = mem_data[node].pernode_addr; |
| 498 | pernodesize = mem_data[node].pernode_size; | 469 | pernodesize = mem_data[node].pernode_size; |
| 499 | map = pernode + pernodesize; | 470 | map = pernode + pernodesize; |
| 500 | 471 | ||
| 501 | /* Sanity check... */ | ||
| 502 | if (!pernode) | ||
| 503 | panic("pernode space for node %d " | ||
| 504 | "could not be allocated!", node); | ||
| 505 | |||
| 506 | init_bootmem_node(mem_data[node].pgdat, | 472 | init_bootmem_node(mem_data[node].pgdat, |
| 507 | map>>PAGE_SHIFT, | 473 | map>>PAGE_SHIFT, |
| 508 | bdp->node_boot_start>>PAGE_SHIFT, | 474 | bdp->node_boot_start>>PAGE_SHIFT, |
| @@ -512,6 +478,7 @@ void __init find_memory(void) | |||
| 512 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); | 478 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); |
| 513 | 479 | ||
| 514 | reserve_pernode_space(); | 480 | reserve_pernode_space(); |
| 481 | memory_less_nodes(); | ||
| 515 | initialize_pernode_data(); | 482 | initialize_pernode_data(); |
| 516 | 483 | ||
| 517 | max_pfn = max_low_pfn; | 484 | max_pfn = max_low_pfn; |
| @@ -519,6 +486,7 @@ void __init find_memory(void) | |||
| 519 | find_initrd(); | 486 | find_initrd(); |
| 520 | } | 487 | } |
| 521 | 488 | ||
| 489 | #ifdef CONFIG_SMP | ||
| 522 | /** | 490 | /** |
| 523 | * per_cpu_init - setup per-cpu variables | 491 | * per_cpu_init - setup per-cpu variables |
| 524 | * | 492 | * |
| @@ -529,15 +497,15 @@ void *per_cpu_init(void) | |||
| 529 | { | 497 | { |
| 530 | int cpu; | 498 | int cpu; |
| 531 | 499 | ||
| 532 | if (smp_processor_id() == 0) { | 500 | if (smp_processor_id() != 0) |
| 533 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 501 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
| 534 | per_cpu(local_per_cpu_offset, cpu) = | 502 | |
| 535 | __per_cpu_offset[cpu]; | 503 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
| 536 | } | 504 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; |
| 537 | } | ||
| 538 | 505 | ||
| 539 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 506 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
| 540 | } | 507 | } |
| 508 | #endif /* CONFIG_SMP */ | ||
| 541 | 509 | ||
| 542 | /** | 510 | /** |
| 543 | * show_mem - give short summary of memory stats | 511 | * show_mem - give short summary of memory stats |
| @@ -680,12 +648,13 @@ void __init paging_init(void) | |||
| 680 | 648 | ||
| 681 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 649 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
| 682 | 650 | ||
| 683 | /* so min() will work in count_node_pages */ | ||
| 684 | for_each_online_node(node) | ||
| 685 | mem_data[node].min_pfn = ~0UL; | ||
| 686 | |||
| 687 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 651 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
| 688 | 652 | ||
| 653 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | ||
| 654 | vmem_map = (struct page *) vmalloc_end; | ||
| 655 | efi_memmap_walk(create_mem_map_page_table, NULL); | ||
| 656 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | ||
| 657 | |||
| 689 | for_each_online_node(node) { | 658 | for_each_online_node(node) { |
| 690 | memset(zones_size, 0, sizeof(zones_size)); | 659 | memset(zones_size, 0, sizeof(zones_size)); |
| 691 | memset(zholes_size, 0, sizeof(zholes_size)); | 660 | memset(zholes_size, 0, sizeof(zholes_size)); |
| @@ -719,15 +688,6 @@ void __init paging_init(void) | |||
| 719 | mem_data[node].num_dma_physpages); | 688 | mem_data[node].num_dma_physpages); |
| 720 | } | 689 | } |
| 721 | 690 | ||
| 722 | if (node == 0) { | ||
| 723 | vmalloc_end -= | ||
| 724 | PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | ||
| 725 | vmem_map = (struct page *) vmalloc_end; | ||
| 726 | |||
| 727 | efi_memmap_walk(create_mem_map_page_table, NULL); | ||
| 728 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | ||
| 729 | } | ||
| 730 | |||
| 731 | pfn_offset = mem_data[node].min_pfn; | 691 | pfn_offset = mem_data[node].min_pfn; |
| 732 | 692 | ||
| 733 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; | 693 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
| @@ -735,5 +695,11 @@ void __init paging_init(void) | |||
| 735 | pfn_offset, zholes_size); | 695 | pfn_offset, zholes_size); |
| 736 | } | 696 | } |
| 737 | 697 | ||
| 698 | /* | ||
| 699 | * Make memory less nodes become a member of the known nodes. | ||
| 700 | */ | ||
| 701 | for_each_node_mask(node, memory_less_mask) | ||
| 702 | pgdat_insert(mem_data[node].pgdat); | ||
| 703 | |||
| 738 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 704 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
| 739 | } | 705 | } |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4eb2f52b87a1..65f9958db9f0 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -597,7 +597,8 @@ mem_init (void) | |||
| 597 | kclist_add(&kcore_kernel, _stext, _end - _stext); | 597 | kclist_add(&kcore_kernel, _stext, _end - _stext); |
| 598 | 598 | ||
| 599 | for_each_pgdat(pgdat) | 599 | for_each_pgdat(pgdat) |
| 600 | totalram_pages += free_all_bootmem_node(pgdat); | 600 | if (pgdat->bdata->node_bootmem_map) |
| 601 | totalram_pages += free_all_bootmem_node(pgdat); | ||
| 601 | 602 | ||
| 602 | reserved_pages = 0; | 603 | reserved_pages = 0; |
| 603 | efi_memmap_walk(count_reserved_pages, &reserved_pages); | 604 | efi_memmap_walk(count_reserved_pages, &reserved_pages); |
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 868e7ecae84b..580a1c0403a7 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H | 8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H |
| 9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H | 9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H |
| 10 | 10 | ||
| 11 | #include "xtalk/xwidgetdev.h" | ||
| 12 | |||
| 11 | #define HUB_WIDGET_ID_MAX 0xf | 13 | #define HUB_WIDGET_ID_MAX 0xf |
| 12 | #define DEV_PER_WIDGET (2*2*8) | 14 | #define DEV_PER_WIDGET (2*2*8) |
| 13 | #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ | 15 | #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 783eb4323847..a67f39e448cb 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
| @@ -9,21 +9,28 @@ | |||
| 9 | #include <linux/bootmem.h> | 9 | #include <linux/bootmem.h> |
| 10 | #include <linux/nodemask.h> | 10 | #include <linux/nodemask.h> |
| 11 | #include <asm/sn/types.h> | 11 | #include <asm/sn/types.h> |
| 12 | #include <asm/sn/sn_sal.h> | ||
| 13 | #include <asm/sn/addrs.h> | 12 | #include <asm/sn/addrs.h> |
| 14 | #include <asm/sn/pcibus_provider_defs.h> | ||
| 15 | #include <asm/sn/pcidev.h> | ||
| 16 | #include "pci/pcibr_provider.h" | ||
| 17 | #include "xtalk/xwidgetdev.h" | ||
| 18 | #include <asm/sn/geo.h> | 13 | #include <asm/sn/geo.h> |
| 19 | #include "xtalk/hubdev.h" | ||
| 20 | #include <asm/sn/io.h> | 14 | #include <asm/sn/io.h> |
| 15 | #include <asm/sn/pcibr_provider.h> | ||
| 16 | #include <asm/sn/pcibus_provider_defs.h> | ||
| 17 | #include <asm/sn/pcidev.h> | ||
| 21 | #include <asm/sn/simulator.h> | 18 | #include <asm/sn/simulator.h> |
| 19 | #include <asm/sn/sn_sal.h> | ||
| 22 | #include <asm/sn/tioca_provider.h> | 20 | #include <asm/sn/tioca_provider.h> |
| 21 | #include "xtalk/hubdev.h" | ||
| 22 | #include "xtalk/xwidgetdev.h" | ||
| 23 | 23 | ||
| 24 | char master_baseio_wid; | ||
| 25 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ | 24 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ |
| 26 | 25 | ||
| 26 | static struct list_head sn_sysdata_list; | ||
| 27 | |||
| 28 | /* sysdata list struct */ | ||
| 29 | struct sysdata_el { | ||
| 30 | struct list_head entry; | ||
| 31 | void *sysdata; | ||
| 32 | }; | ||
| 33 | |||
| 27 | struct slab_info { | 34 | struct slab_info { |
| 28 | struct hubdev_info hubdev; | 35 | struct hubdev_info hubdev; |
| 29 | }; | 36 | }; |
| @@ -138,23 +145,6 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, | |||
| 138 | } | 145 | } |
| 139 | 146 | ||
| 140 | /* | 147 | /* |
| 141 | * sn_alloc_pci_sysdata() - This routine allocates a pci controller | ||
| 142 | * which is expected as the pci_dev and pci_bus sysdata by the Linux | ||
| 143 | * PCI infrastructure. | ||
| 144 | */ | ||
| 145 | static inline struct pci_controller *sn_alloc_pci_sysdata(void) | ||
| 146 | { | ||
| 147 | struct pci_controller *pci_sysdata; | ||
| 148 | |||
| 149 | pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL); | ||
| 150 | if (!pci_sysdata) | ||
| 151 | BUG(); | ||
| 152 | |||
| 153 | memset(pci_sysdata, 0, sizeof(*pci_sysdata)); | ||
| 154 | return pci_sysdata; | ||
| 155 | } | ||
| 156 | |||
| 157 | /* | ||
| 158 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for | 148 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for |
| 159 | * each node in the system. | 149 | * each node in the system. |
| 160 | */ | 150 | */ |
| @@ -221,22 +211,34 @@ static void sn_fixup_ionodes(void) | |||
| 221 | 211 | ||
| 222 | } | 212 | } |
| 223 | 213 | ||
| 214 | void sn_pci_unfixup_slot(struct pci_dev *dev) | ||
| 215 | { | ||
| 216 | struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; | ||
| 217 | |||
| 218 | sn_irq_unfixup(dev); | ||
| 219 | pci_dev_put(host_pci_dev); | ||
| 220 | pci_dev_put(dev); | ||
| 221 | } | ||
| 222 | |||
| 224 | /* | 223 | /* |
| 225 | * sn_pci_fixup_slot() - This routine sets up a slot's resources | 224 | * sn_pci_fixup_slot() - This routine sets up a slot's resources |
| 226 | * consistent with the Linux PCI abstraction layer. Resources acquired | 225 | * consistent with the Linux PCI abstraction layer. Resources acquired |
| 227 | * from our PCI provider include PIO maps to BAR space and interrupt | 226 | * from our PCI provider include PIO maps to BAR space and interrupt |
| 228 | * objects. | 227 | * objects. |
| 229 | */ | 228 | */ |
| 230 | static void sn_pci_fixup_slot(struct pci_dev *dev) | 229 | void sn_pci_fixup_slot(struct pci_dev *dev) |
| 231 | { | 230 | { |
| 232 | int idx; | 231 | int idx; |
| 233 | int segment = 0; | 232 | int segment = 0; |
| 234 | uint64_t size; | ||
| 235 | struct sn_irq_info *sn_irq_info; | ||
| 236 | struct pci_dev *host_pci_dev; | ||
| 237 | int status = 0; | 233 | int status = 0; |
| 238 | struct pcibus_bussoft *bs; | 234 | struct pcibus_bussoft *bs; |
| 235 | struct pci_bus *host_pci_bus; | ||
| 236 | struct pci_dev *host_pci_dev; | ||
| 237 | struct sn_irq_info *sn_irq_info; | ||
| 238 | unsigned long size; | ||
| 239 | unsigned int bus_no, devfn; | ||
| 239 | 240 | ||
| 241 | pci_dev_get(dev); /* for the sysdata pointer */ | ||
| 240 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 242 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
| 241 | if (SN_PCIDEV_INFO(dev) <= 0) | 243 | if (SN_PCIDEV_INFO(dev) <= 0) |
| 242 | BUG(); /* Cannot afford to run out of memory */ | 244 | BUG(); /* Cannot afford to run out of memory */ |
| @@ -253,7 +255,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
| 253 | (u64) __pa(SN_PCIDEV_INFO(dev)), | 255 | (u64) __pa(SN_PCIDEV_INFO(dev)), |
| 254 | (u64) __pa(sn_irq_info)); | 256 | (u64) __pa(sn_irq_info)); |
| 255 | if (status) | 257 | if (status) |
| 256 | BUG(); /* Cannot get platform pci device information information */ | 258 | BUG(); /* Cannot get platform pci device information */ |
| 257 | 259 | ||
| 258 | /* Copy over PIO Mapped Addresses */ | 260 | /* Copy over PIO Mapped Addresses */ |
| 259 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { | 261 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { |
| @@ -275,15 +277,21 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
| 275 | dev->resource[idx].parent = &iomem_resource; | 277 | dev->resource[idx].parent = &iomem_resource; |
| 276 | } | 278 | } |
| 277 | 279 | ||
| 278 | /* set up host bus linkages */ | 280 | /* |
| 279 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | 281 | * Using the PROMs values for the PCI host bus, get the Linux |
| 280 | host_pci_dev = | 282 | * PCI host_pci_dev struct and set up host bus linkages |
| 281 | pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, | 283 | */ |
| 282 | SN_PCIDEV_INFO(dev)-> | 284 | |
| 283 | pdi_slot_host_handle & 0xffffffff); | 285 | bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32; |
| 286 | devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; | ||
| 287 | host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no); | ||
| 288 | host_pci_dev = pci_get_slot(host_pci_bus, devfn); | ||
| 289 | |||
| 290 | SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; | ||
| 284 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = | 291 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = |
| 285 | SN_PCIDEV_INFO(host_pci_dev); | 292 | SN_PCIDEV_INFO(host_pci_dev); |
| 286 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; | 293 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; |
| 294 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | ||
| 287 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; | 295 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; |
| 288 | 296 | ||
| 289 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { | 297 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { |
| @@ -297,6 +305,9 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
| 297 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; | 305 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; |
| 298 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; | 306 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; |
| 299 | sn_irq_fixup(dev, sn_irq_info); | 307 | sn_irq_fixup(dev, sn_irq_info); |
| 308 | } else { | ||
| 309 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = NULL; | ||
| 310 | kfree(sn_irq_info); | ||
| 300 | } | 311 | } |
| 301 | } | 312 | } |
| 302 | 313 | ||
| @@ -304,55 +315,57 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
| 304 | * sn_pci_controller_fixup() - This routine sets up a bus's resources | 315 | * sn_pci_controller_fixup() - This routine sets up a bus's resources |
| 305 | * consistent with the Linux PCI abstraction layer. | 316 | * consistent with the Linux PCI abstraction layer. |
| 306 | */ | 317 | */ |
| 307 | static void sn_pci_controller_fixup(int segment, int busnum) | 318 | void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) |
| 308 | { | 319 | { |
| 309 | int status = 0; | 320 | int status = 0; |
| 310 | int nasid, cnode; | 321 | int nasid, cnode; |
| 311 | struct pci_bus *bus; | ||
| 312 | struct pci_controller *controller; | 322 | struct pci_controller *controller; |
| 313 | struct pcibus_bussoft *prom_bussoft_ptr; | 323 | struct pcibus_bussoft *prom_bussoft_ptr; |
| 314 | struct hubdev_info *hubdev_info; | 324 | struct hubdev_info *hubdev_info; |
| 315 | void *provider_soft; | 325 | void *provider_soft; |
| 316 | struct sn_pcibus_provider *provider; | 326 | struct sn_pcibus_provider *provider; |
| 317 | 327 | ||
| 318 | status = | 328 | status = sal_get_pcibus_info((u64) segment, (u64) busnum, |
| 319 | sal_get_pcibus_info((u64) segment, (u64) busnum, | 329 | (u64) ia64_tpa(&prom_bussoft_ptr)); |
| 320 | (u64) ia64_tpa(&prom_bussoft_ptr)); | 330 | if (status > 0) |
| 321 | if (status > 0) { | 331 | return; /*bus # does not exist */ |
| 322 | return; /* bus # does not exist */ | ||
| 323 | } | ||
| 324 | |||
| 325 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | 332 | prom_bussoft_ptr = __va(prom_bussoft_ptr); |
| 326 | controller = sn_alloc_pci_sysdata(); | ||
| 327 | /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */ | ||
| 328 | 333 | ||
| 329 | bus = pci_scan_bus(busnum, &pci_root_ops, controller); | 334 | controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL); |
| 335 | if (!controller) | ||
| 336 | BUG(); | ||
| 337 | |||
| 330 | if (bus == NULL) { | 338 | if (bus == NULL) { |
| 331 | return; /* error, or bus already scanned */ | 339 | bus = pci_scan_bus(busnum, &pci_root_ops, controller); |
| 340 | if (bus == NULL) | ||
| 341 | return; /* error, or bus already scanned */ | ||
| 342 | bus->sysdata = NULL; | ||
| 332 | } | 343 | } |
| 333 | 344 | ||
| 345 | if (bus->sysdata) | ||
| 346 | goto error_return; /* sysdata already alloc'd */ | ||
| 347 | |||
| 334 | /* | 348 | /* |
| 335 | * Per-provider fixup. Copies the contents from prom to local | 349 | * Per-provider fixup. Copies the contents from prom to local |
| 336 | * area and links SN_PCIBUS_BUSSOFT(). | 350 | * area and links SN_PCIBUS_BUSSOFT(). |
| 337 | */ | 351 | */ |
| 338 | 352 | ||
| 339 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { | 353 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) |
| 340 | return; /* unsupported asic type */ | 354 | return; /* unsupported asic type */ |
| 341 | } | 355 | |
| 356 | if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) | ||
| 357 | goto error_return; /* no further fixup necessary */ | ||
| 342 | 358 | ||
| 343 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; | 359 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; |
| 344 | if (provider == NULL) { | 360 | if (provider == NULL) |
| 345 | return; /* no provider registerd for this asic */ | 361 | return; /* no provider registerd for this asic */ |
| 346 | } | ||
| 347 | 362 | ||
| 348 | provider_soft = NULL; | 363 | provider_soft = NULL; |
| 349 | if (provider->bus_fixup) { | 364 | if (provider->bus_fixup) |
| 350 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); | 365 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); |
| 351 | } | ||
| 352 | 366 | ||
| 353 | if (provider_soft == NULL) { | 367 | if (provider_soft == NULL) |
| 354 | return; /* fixup failed or not applicable */ | 368 | return; /* fixup failed or not applicable */ |
| 355 | } | ||
| 356 | 369 | ||
| 357 | /* | 370 | /* |
| 358 | * Generic bus fixup goes here. Don't reference prom_bussoft_ptr | 371 | * Generic bus fixup goes here. Don't reference prom_bussoft_ptr |
| @@ -361,12 +374,47 @@ static void sn_pci_controller_fixup(int segment, int busnum) | |||
| 361 | 374 | ||
| 362 | bus->sysdata = controller; | 375 | bus->sysdata = controller; |
| 363 | PCI_CONTROLLER(bus)->platform_data = provider_soft; | 376 | PCI_CONTROLLER(bus)->platform_data = provider_soft; |
| 364 | |||
| 365 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); | 377 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); |
| 366 | cnode = nasid_to_cnodeid(nasid); | 378 | cnode = nasid_to_cnodeid(nasid); |
| 367 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | 379 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); |
| 368 | SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = | 380 | SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = |
| 369 | &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); | 381 | &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); |
| 382 | |||
| 383 | return; | ||
| 384 | |||
| 385 | error_return: | ||
| 386 | |||
| 387 | kfree(controller); | ||
| 388 | return; | ||
| 389 | } | ||
| 390 | |||
| 391 | void sn_bus_store_sysdata(struct pci_dev *dev) | ||
| 392 | { | ||
| 393 | struct sysdata_el *element; | ||
| 394 | |||
| 395 | element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); | ||
| 396 | if (!element) { | ||
| 397 | dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); | ||
| 398 | return; | ||
| 399 | } | ||
| 400 | element->sysdata = dev->sysdata; | ||
| 401 | list_add(&element->entry, &sn_sysdata_list); | ||
| 402 | } | ||
| 403 | |||
| 404 | void sn_bus_free_sysdata(void) | ||
| 405 | { | ||
| 406 | struct sysdata_el *element; | ||
| 407 | struct list_head *list; | ||
| 408 | |||
| 409 | sn_sysdata_free_start: | ||
| 410 | list_for_each(list, &sn_sysdata_list) { | ||
| 411 | element = list_entry(list, struct sysdata_el, entry); | ||
| 412 | list_del(&element->entry); | ||
| 413 | kfree(element->sysdata); | ||
| 414 | kfree(element); | ||
| 415 | goto sn_sysdata_free_start; | ||
| 416 | } | ||
| 417 | return; | ||
| 370 | } | 418 | } |
| 371 | 419 | ||
| 372 | /* | 420 | /* |
| @@ -403,20 +451,17 @@ static int __init sn_pci_init(void) | |||
| 403 | */ | 451 | */ |
| 404 | ia64_max_iommu_merge_mask = ~PAGE_MASK; | 452 | ia64_max_iommu_merge_mask = ~PAGE_MASK; |
| 405 | sn_fixup_ionodes(); | 453 | sn_fixup_ionodes(); |
| 406 | sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL); | 454 | sn_irq_lh_init(); |
| 407 | if (sn_irq <= 0) | 455 | INIT_LIST_HEAD(&sn_sysdata_list); |
| 408 | BUG(); /* Canno afford to run out of memory. */ | ||
| 409 | memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS); | ||
| 410 | |||
| 411 | sn_init_cpei_timer(); | 456 | sn_init_cpei_timer(); |
| 412 | 457 | ||
| 413 | #ifdef CONFIG_PROC_FS | 458 | #ifdef CONFIG_PROC_FS |
| 414 | register_sn_procfs(); | 459 | register_sn_procfs(); |
| 415 | #endif | 460 | #endif |
| 416 | 461 | ||
| 417 | for (i = 0; i < PCI_BUSES_TO_SCAN; i++) { | 462 | /* busses are not known yet ... */ |
| 418 | sn_pci_controller_fixup(0, i); | 463 | for (i = 0; i < PCI_BUSES_TO_SCAN; i++) |
| 419 | } | 464 | sn_pci_controller_fixup(0, i, NULL); |
| 420 | 465 | ||
| 421 | /* | 466 | /* |
| 422 | * Generic Linux PCI Layer has created the pci_bus and pci_dev | 467 | * Generic Linux PCI Layer has created the pci_bus and pci_dev |
| @@ -425,9 +470,8 @@ static int __init sn_pci_init(void) | |||
| 425 | */ | 470 | */ |
| 426 | 471 | ||
| 427 | while ((pci_dev = | 472 | while ((pci_dev = |
| 428 | pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) { | 473 | pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) |
| 429 | sn_pci_fixup_slot(pci_dev); | 474 | sn_pci_fixup_slot(pci_dev); |
| 430 | } | ||
| 431 | 475 | ||
| 432 | sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ | 476 | sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ |
| 433 | 477 | ||
| @@ -469,3 +513,8 @@ cnodeid_get_geoid(cnodeid_t cnode) | |||
| 469 | } | 513 | } |
| 470 | 514 | ||
| 471 | subsys_initcall(sn_pci_init); | 515 | subsys_initcall(sn_pci_init); |
| 516 | EXPORT_SYMBOL(sn_pci_fixup_slot); | ||
| 517 | EXPORT_SYMBOL(sn_pci_unfixup_slot); | ||
| 518 | EXPORT_SYMBOL(sn_pci_controller_fixup); | ||
| 519 | EXPORT_SYMBOL(sn_bus_store_sysdata); | ||
| 520 | EXPORT_SYMBOL(sn_bus_free_sysdata); | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0f4e8138658f..84d276a14ecb 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
| @@ -9,13 +9,13 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
| 12 | #include <asm/sn/intr.h> | 12 | #include <linux/spinlock.h> |
| 13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
| 14 | #include <asm/sn/arch.h> | 14 | #include <asm/sn/arch.h> |
| 15 | #include "xtalk/xwidgetdev.h" | 15 | #include <asm/sn/intr.h> |
| 16 | #include <asm/sn/pcibr_provider.h> | ||
| 16 | #include <asm/sn/pcibus_provider_defs.h> | 17 | #include <asm/sn/pcibus_provider_defs.h> |
| 17 | #include <asm/sn/pcidev.h> | 18 | #include <asm/sn/pcidev.h> |
| 18 | #include "pci/pcibr_provider.h" | ||
| 19 | #include <asm/sn/shub_mmr.h> | 19 | #include <asm/sn/shub_mmr.h> |
| 20 | #include <asm/sn/sn_sal.h> | 20 | #include <asm/sn/sn_sal.h> |
| 21 | 21 | ||
| @@ -25,7 +25,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
| 25 | 25 | ||
| 26 | extern int sn_force_interrupt_flag; | 26 | extern int sn_force_interrupt_flag; |
| 27 | extern int sn_ioif_inited; | 27 | extern int sn_ioif_inited; |
| 28 | struct sn_irq_info **sn_irq; | 28 | static struct list_head **sn_irq_lh; |
| 29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | ||
| 29 | 30 | ||
| 30 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | 31 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, |
| 31 | u64 sn_irq_info, | 32 | u64 sn_irq_info, |
| @@ -101,7 +102,7 @@ static void sn_end_irq(unsigned int irq) | |||
| 101 | nasid = get_nasid(); | 102 | nasid = get_nasid(); |
| 102 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | 103 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR |
| 103 | (nasid, SH_EVENT_OCCURRED)); | 104 | (nasid, SH_EVENT_OCCURRED)); |
| 104 | /* If the UART bit is set here, we may have received an | 105 | /* If the UART bit is set here, we may have received an |
| 105 | * interrupt from the UART that the driver missed. To | 106 | * interrupt from the UART that the driver missed. To |
| 106 | * make sure, we IPI ourselves to force us to look again. | 107 | * make sure, we IPI ourselves to force us to look again. |
| 107 | */ | 108 | */ |
| @@ -115,82 +116,84 @@ static void sn_end_irq(unsigned int irq) | |||
| 115 | force_interrupt(irq); | 116 | force_interrupt(irq); |
| 116 | } | 117 | } |
| 117 | 118 | ||
| 119 | static void sn_irq_info_free(struct rcu_head *head); | ||
| 120 | |||
| 118 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 121 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) |
| 119 | { | 122 | { |
| 120 | struct sn_irq_info *sn_irq_info = sn_irq[irq]; | 123 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
| 121 | struct sn_irq_info *tmp_sn_irq_info; | ||
| 122 | int cpuid, cpuphys; | 124 | int cpuid, cpuphys; |
| 123 | nasid_t t_nasid; /* nasid to target */ | ||
| 124 | int t_slice; /* slice to target */ | ||
| 125 | |||
| 126 | /* allocate a temp sn_irq_info struct to get new target info */ | ||
| 127 | tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); | ||
| 128 | if (!tmp_sn_irq_info) | ||
| 129 | return; | ||
| 130 | 125 | ||
| 131 | cpuid = first_cpu(mask); | 126 | cpuid = first_cpu(mask); |
| 132 | cpuphys = cpu_physical_id(cpuid); | 127 | cpuphys = cpu_physical_id(cpuid); |
| 133 | t_nasid = cpuid_to_nasid(cpuid); | ||
| 134 | t_slice = cpuid_to_slice(cpuid); | ||
| 135 | 128 | ||
| 136 | while (sn_irq_info) { | 129 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
| 137 | int status; | 130 | sn_irq_lh[irq], list) { |
| 138 | int local_widget; | 131 | uint64_t bridge; |
| 139 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | 132 | int local_widget, status; |
| 140 | nasid_t local_nasid = NASID_GET(bridge); | 133 | nasid_t local_nasid; |
| 134 | struct sn_irq_info *new_irq_info; | ||
| 135 | |||
| 136 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
| 137 | if (new_irq_info == NULL) | ||
| 138 | break; | ||
| 139 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
| 140 | |||
| 141 | bridge = (uint64_t) new_irq_info->irq_bridge; | ||
| 142 | if (!bridge) { | ||
| 143 | kfree(new_irq_info); | ||
| 144 | break; /* irq is not a device interrupt */ | ||
| 145 | } | ||
| 141 | 146 | ||
| 142 | if (!bridge) | 147 | local_nasid = NASID_GET(bridge); |
| 143 | break; /* irq is not a device interrupt */ | ||
| 144 | 148 | ||
| 145 | if (local_nasid & 1) | 149 | if (local_nasid & 1) |
| 146 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 150 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
| 147 | else | 151 | else |
| 148 | local_widget = SWIN_WIDGETNUM(bridge); | 152 | local_widget = SWIN_WIDGETNUM(bridge); |
| 149 | 153 | ||
| 150 | /* Free the old PROM sn_irq_info structure */ | 154 | /* Free the old PROM new_irq_info structure */ |
| 151 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | 155 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
| 156 | /* Update kernels new_irq_info with new target info */ | ||
| 157 | unregister_intr_pda(new_irq_info); | ||
| 152 | 158 | ||
| 153 | /* allocate a new PROM sn_irq_info struct */ | 159 | /* allocate a new PROM new_irq_info struct */ |
| 154 | status = sn_intr_alloc(local_nasid, local_widget, | 160 | status = sn_intr_alloc(local_nasid, local_widget, |
| 155 | __pa(tmp_sn_irq_info), irq, t_nasid, | 161 | __pa(new_irq_info), irq, |
| 156 | t_slice); | 162 | cpuid_to_nasid(cpuid), |
| 157 | 163 | cpuid_to_slice(cpuid)); | |
| 158 | if (status == 0) { | 164 | |
| 159 | /* Update kernels sn_irq_info with new target info */ | 165 | /* SAL call failed */ |
| 160 | unregister_intr_pda(sn_irq_info); | 166 | if (status) { |
| 161 | sn_irq_info->irq_cpuid = cpuid; | 167 | kfree(new_irq_info); |
| 162 | sn_irq_info->irq_nasid = t_nasid; | 168 | break; |
| 163 | sn_irq_info->irq_slice = t_slice; | 169 | } |
| 164 | sn_irq_info->irq_xtalkaddr = | 170 | |
| 165 | tmp_sn_irq_info->irq_xtalkaddr; | 171 | new_irq_info->irq_cpuid = cpuid; |
| 166 | sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; | 172 | register_intr_pda(new_irq_info); |
| 167 | register_intr_pda(sn_irq_info); | 173 | |
| 168 | 174 | if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) | |
| 169 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { | 175 | pcibr_change_devices_irq(new_irq_info); |
| 170 | pcibr_change_devices_irq(sn_irq_info); | ||
| 171 | } | ||
| 172 | 176 | ||
| 173 | sn_irq_info = sn_irq_info->irq_next; | 177 | spin_lock(&sn_irq_info_lock); |
| 178 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
| 179 | spin_unlock(&sn_irq_info_lock); | ||
| 180 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
| 174 | 181 | ||
| 175 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
| 176 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | 183 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); |
| 177 | #endif | 184 | #endif |
| 178 | } else { | ||
| 179 | break; /* snp_affinity failed the intr_alloc */ | ||
| 180 | } | ||
| 181 | } | 185 | } |
| 182 | kfree(tmp_sn_irq_info); | ||
| 183 | } | 186 | } |
| 184 | 187 | ||
| 185 | struct hw_interrupt_type irq_type_sn = { | 188 | struct hw_interrupt_type irq_type_sn = { |
| 186 | "SN hub", | 189 | .typename = "SN hub", |
| 187 | sn_startup_irq, | 190 | .startup = sn_startup_irq, |
| 188 | sn_shutdown_irq, | 191 | .shutdown = sn_shutdown_irq, |
| 189 | sn_enable_irq, | 192 | .enable = sn_enable_irq, |
| 190 | sn_disable_irq, | 193 | .disable = sn_disable_irq, |
| 191 | sn_ack_irq, | 194 | .ack = sn_ack_irq, |
| 192 | sn_end_irq, | 195 | .end = sn_end_irq, |
| 193 | sn_set_affinity_irq | 196 | .set_affinity = sn_set_affinity_irq |
| 194 | }; | 197 | }; |
| 195 | 198 | ||
| 196 | unsigned int sn_local_vector_to_irq(u8 vector) | 199 | unsigned int sn_local_vector_to_irq(u8 vector) |
| @@ -231,19 +234,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
| 231 | struct sn_irq_info *tmp_irq_info; | 234 | struct sn_irq_info *tmp_irq_info; |
| 232 | int i, foundmatch; | 235 | int i, foundmatch; |
| 233 | 236 | ||
| 237 | rcu_read_lock(); | ||
| 234 | if (pdacpu(cpu)->sn_last_irq == irq) { | 238 | if (pdacpu(cpu)->sn_last_irq == irq) { |
| 235 | foundmatch = 0; | 239 | foundmatch = 0; |
| 236 | for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { | 240 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
| 237 | tmp_irq_info = sn_irq[i]; | 241 | i && !foundmatch; i--) { |
| 238 | while (tmp_irq_info) { | 242 | list_for_each_entry_rcu(tmp_irq_info, |
| 243 | sn_irq_lh[i], | ||
| 244 | list) { | ||
| 239 | if (tmp_irq_info->irq_cpuid == cpu) { | 245 | if (tmp_irq_info->irq_cpuid == cpu) { |
| 240 | foundmatch++; | 246 | foundmatch = 1; |
| 241 | break; | 247 | break; |
| 242 | } | 248 | } |
| 243 | tmp_irq_info = tmp_irq_info->irq_next; | ||
| 244 | } | ||
| 245 | if (foundmatch) { | ||
| 246 | break; | ||
| 247 | } | 249 | } |
| 248 | } | 250 | } |
| 249 | pdacpu(cpu)->sn_last_irq = i; | 251 | pdacpu(cpu)->sn_last_irq = i; |
| @@ -251,60 +253,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
| 251 | 253 | ||
| 252 | if (pdacpu(cpu)->sn_first_irq == irq) { | 254 | if (pdacpu(cpu)->sn_first_irq == irq) { |
| 253 | foundmatch = 0; | 255 | foundmatch = 0; |
| 254 | for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { | 256 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
| 255 | tmp_irq_info = sn_irq[i]; | 257 | i < NR_IRQS && !foundmatch; i++) { |
| 256 | while (tmp_irq_info) { | 258 | list_for_each_entry_rcu(tmp_irq_info, |
| 259 | sn_irq_lh[i], | ||
| 260 | list) { | ||
| 257 | if (tmp_irq_info->irq_cpuid == cpu) { | 261 | if (tmp_irq_info->irq_cpuid == cpu) { |
| 258 | foundmatch++; | 262 | foundmatch = 1; |
| 259 | break; | 263 | break; |
| 260 | } | 264 | } |
| 261 | tmp_irq_info = tmp_irq_info->irq_next; | ||
| 262 | } | ||
| 263 | if (foundmatch) { | ||
| 264 | break; | ||
| 265 | } | 265 | } |
| 266 | } | 266 | } |
| 267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | 267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
| 268 | } | 268 | } |
| 269 | rcu_read_unlock(); | ||
| 269 | } | 270 | } |
| 270 | 271 | ||
| 271 | struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, | 272 | static void sn_irq_info_free(struct rcu_head *head) |
| 272 | nasid_t nasid, int slice) | ||
| 273 | { | 273 | { |
| 274 | struct sn_irq_info *sn_irq_info; | 274 | struct sn_irq_info *sn_irq_info; |
| 275 | int status; | ||
| 276 | |||
| 277 | sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); | ||
| 278 | if (sn_irq_info == NULL) | ||
| 279 | return NULL; | ||
| 280 | |||
| 281 | memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); | ||
| 282 | |||
| 283 | status = | ||
| 284 | sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, | ||
| 285 | nasid, slice); | ||
| 286 | |||
| 287 | if (status) { | ||
| 288 | kfree(sn_irq_info); | ||
| 289 | return NULL; | ||
| 290 | } else { | ||
| 291 | return sn_irq_info; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | void sn_irq_free(struct sn_irq_info *sn_irq_info) | ||
| 296 | { | ||
| 297 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
| 298 | nasid_t local_nasid = NASID_GET(bridge); | ||
| 299 | int local_widget; | ||
| 300 | |||
| 301 | if (local_nasid & 1) /* tio check */ | ||
| 302 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
| 303 | else | ||
| 304 | local_widget = SWIN_WIDGETNUM(bridge); | ||
| 305 | |||
| 306 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
| 307 | 275 | ||
| 276 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); | ||
| 308 | kfree(sn_irq_info); | 277 | kfree(sn_irq_info); |
| 309 | } | 278 | } |
| 310 | 279 | ||
| @@ -314,30 +283,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
| 314 | int slice = sn_irq_info->irq_slice; | 283 | int slice = sn_irq_info->irq_slice; |
| 315 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 284 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
| 316 | 285 | ||
| 286 | pci_dev_get(pci_dev); | ||
| 317 | sn_irq_info->irq_cpuid = cpu; | 287 | sn_irq_info->irq_cpuid = cpu; |
| 318 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | 288 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
| 319 | 289 | ||
| 320 | /* link it into the sn_irq[irq] list */ | 290 | /* link it into the sn_irq[irq] list */ |
| 321 | sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; | 291 | spin_lock(&sn_irq_info_lock); |
| 322 | sn_irq[sn_irq_info->irq_irq] = sn_irq_info; | 292 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
| 293 | spin_unlock(&sn_irq_info_lock); | ||
| 323 | 294 | ||
| 324 | (void)register_intr_pda(sn_irq_info); | 295 | (void)register_intr_pda(sn_irq_info); |
| 325 | } | 296 | } |
| 326 | 297 | ||
| 298 | void sn_irq_unfixup(struct pci_dev *pci_dev) | ||
| 299 | { | ||
| 300 | struct sn_irq_info *sn_irq_info; | ||
| 301 | |||
| 302 | /* Only cleanup IRQ stuff if this device has a host bus context */ | ||
| 303 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) | ||
| 304 | return; | ||
| 305 | |||
| 306 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; | ||
| 307 | if (!sn_irq_info || !sn_irq_info->irq_irq) { | ||
| 308 | kfree(sn_irq_info); | ||
| 309 | return; | ||
| 310 | } | ||
| 311 | |||
| 312 | unregister_intr_pda(sn_irq_info); | ||
| 313 | spin_lock(&sn_irq_info_lock); | ||
| 314 | list_del_rcu(&sn_irq_info->list); | ||
| 315 | spin_unlock(&sn_irq_info_lock); | ||
| 316 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
| 317 | pci_dev_put(pci_dev); | ||
| 318 | } | ||
| 319 | |||
| 327 | static void force_interrupt(int irq) | 320 | static void force_interrupt(int irq) |
| 328 | { | 321 | { |
| 329 | struct sn_irq_info *sn_irq_info; | 322 | struct sn_irq_info *sn_irq_info; |
| 330 | 323 | ||
| 331 | if (!sn_ioif_inited) | 324 | if (!sn_ioif_inited) |
| 332 | return; | 325 | return; |
| 333 | sn_irq_info = sn_irq[irq]; | 326 | |
| 334 | while (sn_irq_info) { | 327 | rcu_read_lock(); |
| 328 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { | ||
| 335 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 329 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
| 336 | (sn_irq_info->irq_bridge != NULL)) { | 330 | (sn_irq_info->irq_bridge != NULL)) |
| 337 | pcibr_force_interrupt(sn_irq_info); | 331 | pcibr_force_interrupt(sn_irq_info); |
| 338 | } | ||
| 339 | sn_irq_info = sn_irq_info->irq_next; | ||
| 340 | } | 332 | } |
| 333 | rcu_read_unlock(); | ||
| 341 | } | 334 | } |
| 342 | 335 | ||
| 343 | /* | 336 | /* |
| @@ -402,19 +395,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
| 402 | 395 | ||
| 403 | void sn_lb_int_war_check(void) | 396 | void sn_lb_int_war_check(void) |
| 404 | { | 397 | { |
| 398 | struct sn_irq_info *sn_irq_info; | ||
| 405 | int i; | 399 | int i; |
| 406 | 400 | ||
| 407 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | 401 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
| 408 | return; | 402 | return; |
| 403 | |||
| 404 | rcu_read_lock(); | ||
| 409 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | 405 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
| 410 | struct sn_irq_info *sn_irq_info = sn_irq[i]; | 406 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
| 411 | while (sn_irq_info) { | 407 | /* |
| 412 | /* Only call for PCI bridges that are fully initialized. */ | 408 | * Only call for PCI bridges that are fully |
| 409 | * initialized. | ||
| 410 | */ | ||
| 413 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 411 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
| 414 | (sn_irq_info->irq_bridge != NULL)) { | 412 | (sn_irq_info->irq_bridge != NULL)) |
| 415 | sn_check_intr(i, sn_irq_info); | 413 | sn_check_intr(i, sn_irq_info); |
| 416 | } | ||
| 417 | sn_irq_info = sn_irq_info->irq_next; | ||
| 418 | } | 414 | } |
| 419 | } | 415 | } |
| 416 | rcu_read_unlock(); | ||
| 417 | } | ||
| 418 | |||
| 419 | void sn_irq_lh_init(void) | ||
| 420 | { | ||
| 421 | int i; | ||
| 422 | |||
| 423 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); | ||
| 424 | if (!sn_irq_lh) | ||
| 425 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); | ||
| 426 | |||
| 427 | for (i = 0; i < NR_IRQS; i++) { | ||
| 428 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
| 429 | if (!sn_irq_lh[i]) | ||
| 430 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); | ||
| 431 | |||
| 432 | INIT_LIST_HEAD(sn_irq_lh[i]); | ||
| 433 | } | ||
| 434 | |||
| 420 | } | 435 | } |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 22e10d282c7f..7c7fe441d623 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
| @@ -270,7 +270,7 @@ void __init sn_setup(char **cmdline_p) | |||
| 270 | { | 270 | { |
| 271 | long status, ticks_per_sec, drift; | 271 | long status, ticks_per_sec, drift; |
| 272 | int pxm; | 272 | int pxm; |
| 273 | int major = sn_sal_rev_major(), minor = sn_sal_rev_minor(); | 273 | u32 version = sn_sal_rev(); |
| 274 | extern void sn_cpu_init(void); | 274 | extern void sn_cpu_init(void); |
| 275 | 275 | ||
| 276 | ia64_sn_plat_set_error_handling_features(); | 276 | ia64_sn_plat_set_error_handling_features(); |
| @@ -308,22 +308,21 @@ void __init sn_setup(char **cmdline_p) | |||
| 308 | * support here so we don't have to listen to failed keyboard probe | 308 | * support here so we don't have to listen to failed keyboard probe |
| 309 | * messages. | 309 | * messages. |
| 310 | */ | 310 | */ |
| 311 | if ((major < 2 || (major == 2 && minor <= 9)) && | 311 | if (version <= 0x0209 && acpi_kbd_controller_present) { |
| 312 | acpi_kbd_controller_present) { | ||
| 313 | printk(KERN_INFO "Disabling legacy keyboard support as prom " | 312 | printk(KERN_INFO "Disabling legacy keyboard support as prom " |
| 314 | "is too old and doesn't provide FADT\n"); | 313 | "is too old and doesn't provide FADT\n"); |
| 315 | acpi_kbd_controller_present = 0; | 314 | acpi_kbd_controller_present = 0; |
| 316 | } | 315 | } |
| 317 | 316 | ||
| 318 | printk("SGI SAL version %x.%02x\n", major, minor); | 317 | printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); |
| 319 | 318 | ||
| 320 | /* | 319 | /* |
| 321 | * Confirm the SAL we're running on is recent enough... | 320 | * Confirm the SAL we're running on is recent enough... |
| 322 | */ | 321 | */ |
| 323 | if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR && | 322 | if (version < SN_SAL_MIN_VERSION) { |
| 324 | minor < SN_SAL_MIN_MINOR)) { | ||
| 325 | printk(KERN_ERR "This kernel needs SGI SAL version >= " | 323 | printk(KERN_ERR "This kernel needs SGI SAL version >= " |
| 326 | "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR); | 324 | "%x.%02x\n", SN_SAL_MIN_VERSION >> 8, |
| 325 | SN_SAL_MIN_VERSION & 0x00FF); | ||
| 327 | panic("PROM version too old\n"); | 326 | panic("PROM version too old\n"); |
| 328 | } | 327 | } |
| 329 | 328 | ||
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 8716f4d5314b..c1cbcd1a1398 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
| 15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <asm/system.h> | ||
| 17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
| 18 | #include <asm/sn/sn_sal.h> | 19 | #include <asm/sn/sn_sal.h> |
| 19 | #include <asm/sn/addrs.h> | 20 | #include <asm/sn/addrs.h> |
| @@ -481,6 +482,9 @@ static int __init tiocx_init(void) | |||
| 481 | cnodeid_t cnodeid; | 482 | cnodeid_t cnodeid; |
| 482 | int found_tiocx_device = 0; | 483 | int found_tiocx_device = 0; |
| 483 | 484 | ||
| 485 | if (!ia64_platform_is("sn2")) | ||
| 486 | return -ENODEV; | ||
| 487 | |||
| 484 | bus_register(&tiocx_bus_type); | 488 | bus_register(&tiocx_bus_type); |
| 485 | 489 | ||
| 486 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { | 490 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 5da9bdbde7cb..a2f7a88aefbb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
| @@ -11,9 +11,10 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
| 14 | #include <asm/sn/sn_sal.h> | 14 | #include <asm/sn/pcibr_provider.h> |
| 15 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
| 16 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
| 17 | #include <asm/sn/sn_sal.h> | ||
| 17 | 18 | ||
| 18 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 19 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
| 19 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | 20 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 0e47bce85f2d..d1647b863e61 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c | |||
| @@ -8,9 +8,9 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
| 10 | #include <asm/sn/sn_sal.h> | 10 | #include <asm/sn/sn_sal.h> |
| 11 | #include <asm/sn/pcibr_provider.h> | ||
| 11 | #include <asm/sn/pcibus_provider_defs.h> | 12 | #include <asm/sn/pcibus_provider_defs.h> |
| 12 | #include <asm/sn/pcidev.h> | 13 | #include <asm/sn/pcidev.h> |
| 13 | #include "pci/pcibr_provider.h" | ||
| 14 | 14 | ||
| 15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | 15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ |
| 16 | 16 | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 64af2b2c1787..b058dc2a0b9d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
| @@ -8,18 +8,17 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
| 10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
| 11 | #include <asm/sn/sn_sal.h> | 11 | #include <asm/sn/addrs.h> |
| 12 | #include <asm/sn/geo.h> | 12 | #include <asm/sn/geo.h> |
| 13 | #include "xtalk/xwidgetdev.h" | 13 | #include <asm/sn/pcibr_provider.h> |
| 14 | #include "xtalk/hubdev.h" | ||
| 15 | #include <asm/sn/pcibus_provider_defs.h> | 14 | #include <asm/sn/pcibus_provider_defs.h> |
| 16 | #include <asm/sn/pcidev.h> | 15 | #include <asm/sn/pcidev.h> |
| 17 | #include "pci/tiocp.h" | 16 | #include <asm/sn/pic.h> |
| 18 | #include "pci/pic.h" | 17 | #include <asm/sn/sn_sal.h> |
| 19 | #include "pci/pcibr_provider.h" | 18 | #include <asm/sn/tiocp.h> |
| 20 | #include "pci/tiocp.h" | ||
| 21 | #include "tio.h" | 19 | #include "tio.h" |
| 22 | #include <asm/sn/addrs.h> | 20 | #include "xtalk/xwidgetdev.h" |
| 21 | #include "xtalk/hubdev.h" | ||
| 23 | 22 | ||
| 24 | extern int sn_ioif_inited; | 23 | extern int sn_ioif_inited; |
| 25 | 24 | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 3893999d23d8..9813da56d311 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
| @@ -6,18 +6,51 @@ | |||
| 6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
| 10 | #include <linux/types.h> | ||
| 11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
| 12 | #include <asm/sn/sn_sal.h> | 12 | #include <asm/sn/addrs.h> |
| 13 | #include "xtalk/xwidgetdev.h" | ||
| 14 | #include <asm/sn/geo.h> | 13 | #include <asm/sn/geo.h> |
| 15 | #include "xtalk/hubdev.h" | 14 | #include <asm/sn/pcibr_provider.h> |
| 16 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
| 17 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
| 18 | #include "pci/pcibr_provider.h" | 17 | #include <asm/sn/sn_sal.h> |
| 19 | #include <asm/sn/addrs.h> | 18 | #include "xtalk/xwidgetdev.h" |
| 19 | #include "xtalk/hubdev.h" | ||
| 20 | |||
| 21 | int | ||
| 22 | sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) | ||
| 23 | { | ||
| 24 | struct ia64_sal_retval ret_stuff; | ||
| 25 | uint64_t busnum; | ||
| 26 | |||
| 27 | ret_stuff.status = 0; | ||
| 28 | ret_stuff.v0 = 0; | ||
| 20 | 29 | ||
| 30 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
| 31 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, | ||
| 32 | (u64) device, (u64) resp, 0, 0, 0, 0); | ||
| 33 | |||
| 34 | return (int)ret_stuff.v0; | ||
| 35 | } | ||
| 36 | |||
| 37 | int | ||
| 38 | sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | ||
| 39 | void *resp) | ||
| 40 | { | ||
| 41 | struct ia64_sal_retval ret_stuff; | ||
| 42 | uint64_t busnum; | ||
| 43 | |||
| 44 | ret_stuff.status = 0; | ||
| 45 | ret_stuff.v0 = 0; | ||
| 46 | |||
| 47 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
| 48 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, | ||
| 49 | (u64) busnum, (u64) device, (u64) action, | ||
| 50 | (u64) resp, 0, 0, 0); | ||
| 51 | |||
| 52 | return (int)ret_stuff.v0; | ||
| 53 | } | ||
| 21 | 54 | ||
| 22 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) | 55 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) |
| 23 | { | 56 | { |
| @@ -188,3 +221,6 @@ pcibr_init_provider(void) | |||
| 188 | 221 | ||
| 189 | return 0; | 222 | return 0; |
| 190 | } | 223 | } |
| 224 | |||
| 225 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); | ||
| 226 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 865c11c3b50a..21426d02fbe6 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c | |||
| @@ -6,13 +6,13 @@ | |||
| 6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
| 10 | #include <linux/types.h> | ||
| 11 | #include <asm/sn/pcibr_provider.h> | ||
| 11 | #include <asm/sn/pcibus_provider_defs.h> | 12 | #include <asm/sn/pcibus_provider_defs.h> |
| 12 | #include <asm/sn/pcidev.h> | 13 | #include <asm/sn/pcidev.h> |
| 13 | #include "pci/tiocp.h" | 14 | #include <asm/sn/pic.h> |
| 14 | #include "pci/pic.h" | 15 | #include <asm/sn/tiocp.h> |
| 15 | #include "pci/pcibr_provider.h" | ||
| 16 | 16 | ||
| 17 | union br_ptr { | 17 | union br_ptr { |
| 18 | struct tiocp tio; | 18 | struct tiocp tio; |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 05aa8c2fe9bb..51cc4e63092c 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
| @@ -589,8 +589,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) | |||
| 589 | 589 | ||
| 590 | /* sanity check prom rev */ | 590 | /* sanity check prom rev */ |
| 591 | 591 | ||
| 592 | if (sn_sal_rev_major() < 4 || | 592 | if (sn_sal_rev() < 0x0406) { |
| 593 | (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) { | ||
| 594 | printk | 593 | printk |
| 595 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | 594 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " |
| 596 | "for tioca support\n", __FUNCTION__); | 595 | "for tioca support\n", __FUNCTION__); |
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 1a4d4ca2a4dc..9c4a39ee89b5 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig | |||
| @@ -187,9 +187,10 @@ config HOTPLUG_PCI_RPA_DLPAR | |||
| 187 | 187 | ||
| 188 | config HOTPLUG_PCI_SGI | 188 | config HOTPLUG_PCI_SGI |
| 189 | tristate "SGI PCI Hotplug Support" | 189 | tristate "SGI PCI Hotplug Support" |
| 190 | depends on HOTPLUG_PCI && IA64_SGI_SN2 | 190 | depends on HOTPLUG_PCI && (IA64_SGI_SN2 || IA64_GENERIC) |
| 191 | help | 191 | help |
| 192 | Say Y here if you have an SGI IA64 Altix system. | 192 | Say Y here if you want to use the SGI Altix Hotplug |
| 193 | Driver for PCI devices. | ||
| 193 | 194 | ||
| 194 | When in doubt, say N. | 195 | When in doubt, say N. |
| 195 | 196 | ||
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 3e632ff8c717..31a307004b94 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile | |||
| @@ -14,6 +14,7 @@ obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o | |||
| 14 | obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o | 14 | obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o |
| 15 | obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o | 15 | obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o |
| 16 | obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o | 16 | obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o |
| 17 | obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o | ||
| 17 | 18 | ||
| 18 | pci_hotplug-objs := pci_hotplug_core.o | 19 | pci_hotplug-objs := pci_hotplug_core.o |
| 19 | 20 | ||
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c new file mode 100644 index 000000000000..323041fd41dc --- /dev/null +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
| @@ -0,0 +1,611 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2005 Silicon Graphics, Inc. All rights reserved. | ||
| 7 | * | ||
| 8 | * This work was based on the 2.4/2.6 kernel development by Dick Reigner. | ||
| 9 | * Work to add BIOS PROM support was completed by Mike Habeck. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/proc_fs.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | |||
| 19 | #include <asm/sn/addrs.h> | ||
| 20 | #include <asm/sn/l1.h> | ||
| 21 | #include <asm/sn/module.h> | ||
| 22 | #include <asm/sn/pcibr_provider.h> | ||
| 23 | #include <asm/sn/pcibus_provider_defs.h> | ||
| 24 | #include <asm/sn/pcidev.h> | ||
| 25 | #include <asm/sn/sn_sal.h> | ||
| 26 | #include <asm/sn/types.h> | ||
| 27 | |||
| 28 | #include "../pci.h" | ||
| 29 | #include "pci_hotplug.h" | ||
| 30 | |||
| 31 | MODULE_LICENSE("GPL"); | ||
| 32 | MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)"); | ||
| 33 | MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver"); | ||
| 34 | |||
| 35 | #define PCIIO_ASIC_TYPE_TIOCA 4 | ||
| 36 | #define PCI_SLOT_ALREADY_UP 2 /* slot already up */ | ||
| 37 | #define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */ | ||
| 38 | #define PCI_L1_ERR 7 /* L1 console command error */ | ||
| 39 | #define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */ | ||
| 40 | #define PCI_L1_QSIZE 128 /* our L1 message buffer size */ | ||
| 41 | #define SN_MAX_HP_SLOTS 32 /* max number of hotplug slots */ | ||
| 42 | #define SGI_HOTPLUG_PROM_REV 0x0420 /* Min. required PROM version */ | ||
| 43 | |||
| 44 | /* internal list head */ | ||
| 45 | static struct list_head sn_hp_list; | ||
| 46 | |||
| 47 | /* hotplug_slot struct's private pointer */ | ||
| 48 | struct slot { | ||
| 49 | int device_num; | ||
| 50 | struct pci_bus *pci_bus; | ||
| 51 | /* this struct for glue internal only */ | ||
| 52 | struct hotplug_slot *hotplug_slot; | ||
| 53 | struct list_head hp_list; | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct pcibr_slot_enable_resp { | ||
| 57 | int resp_sub_errno; | ||
| 58 | char resp_l1_msg[PCI_L1_QSIZE + 1]; | ||
| 59 | }; | ||
| 60 | |||
| 61 | struct pcibr_slot_disable_resp { | ||
| 62 | int resp_sub_errno; | ||
| 63 | char resp_l1_msg[PCI_L1_QSIZE + 1]; | ||
| 64 | }; | ||
| 65 | |||
| 66 | enum sn_pci_req_e { | ||
| 67 | PCI_REQ_SLOT_ELIGIBLE, | ||
| 68 | PCI_REQ_SLOT_DISABLE | ||
| 69 | }; | ||
| 70 | |||
| 71 | static int enable_slot(struct hotplug_slot *slot); | ||
| 72 | static int disable_slot(struct hotplug_slot *slot); | ||
| 73 | static int get_power_status(struct hotplug_slot *slot, u8 *value); | ||
| 74 | |||
| 75 | static struct hotplug_slot_ops sn_hotplug_slot_ops = { | ||
| 76 | .owner = THIS_MODULE, | ||
| 77 | .enable_slot = enable_slot, | ||
| 78 | .disable_slot = disable_slot, | ||
| 79 | .get_power_status = get_power_status, | ||
| 80 | }; | ||
| 81 | |||
| 82 | static DECLARE_MUTEX(sn_hotplug_sem); | ||
| 83 | |||
| 84 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) | ||
| 85 | { | ||
| 86 | struct pcibus_info *pcibus_info; | ||
| 87 | int bricktype; | ||
| 88 | int bus_num; | ||
| 89 | |||
| 90 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); | ||
| 91 | |||
| 92 | /* Check to see if this is a valid slot on 'pci_bus' */ | ||
| 93 | if (!(pcibus_info->pbi_valid_devices & (1 << device))) | ||
| 94 | return -EPERM; | ||
| 95 | |||
| 96 | bricktype = MODULE_GET_BTYPE(pcibus_info->pbi_moduleid); | ||
| 97 | bus_num = pcibus_info->pbi_buscommon.bs_persist_busnum & 0xf; | ||
| 98 | |||
| 99 | /* Do not allow hotplug operations on base I/O cards */ | ||
| 100 | if ((bricktype == L1_BRICKTYPE_IX || bricktype == L1_BRICKTYPE_IA) && | ||
| 101 | (bus_num == 1 && device != 1)) | ||
| 102 | return -EPERM; | ||
| 103 | |||
| 104 | return 1; | ||
| 105 | } | ||
| 106 | |||
| 107 | static int sn_pci_bus_valid(struct pci_bus *pci_bus) | ||
| 108 | { | ||
| 109 | struct pcibus_info *pcibus_info; | ||
| 110 | int asic_type; | ||
| 111 | int bricktype; | ||
| 112 | |||
| 113 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); | ||
| 114 | |||
| 115 | /* Don't register slots hanging off the TIOCA bus */ | ||
| 116 | asic_type = pcibus_info->pbi_buscommon.bs_asic_type; | ||
| 117 | if (asic_type == PCIIO_ASIC_TYPE_TIOCA) | ||
| 118 | return -EPERM; | ||
| 119 | |||
| 120 | /* Only register slots in I/O Bricks that support hotplug */ | ||
| 121 | bricktype = MODULE_GET_BTYPE(pcibus_info->pbi_moduleid); | ||
| 122 | switch (bricktype) { | ||
| 123 | case L1_BRICKTYPE_IX: | ||
| 124 | case L1_BRICKTYPE_PX: | ||
| 125 | case L1_BRICKTYPE_IA: | ||
| 126 | case L1_BRICKTYPE_PA: | ||
| 127 | return 1; | ||
| 128 | break; | ||
| 129 | default: | ||
| 130 | return -EPERM; | ||
| 131 | break; | ||
| 132 | } | ||
| 133 | |||
| 134 | return -EIO; | ||
| 135 | } | ||
| 136 | |||
| 137 | static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, | ||
| 138 | struct pci_bus *pci_bus, int device) | ||
| 139 | { | ||
| 140 | struct pcibus_info *pcibus_info; | ||
| 141 | struct slot *slot; | ||
| 142 | |||
| 143 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); | ||
| 144 | |||
| 145 | bss_hotplug_slot->private = kcalloc(1, sizeof(struct slot), | ||
| 146 | GFP_KERNEL); | ||
| 147 | if (!bss_hotplug_slot->private) | ||
| 148 | return -ENOMEM; | ||
| 149 | slot = (struct slot *)bss_hotplug_slot->private; | ||
| 150 | |||
| 151 | bss_hotplug_slot->name = kmalloc(33, GFP_KERNEL); | ||
| 152 | if (!bss_hotplug_slot->name) { | ||
| 153 | kfree(bss_hotplug_slot->private); | ||
| 154 | return -ENOMEM; | ||
| 155 | } | ||
| 156 | |||
| 157 | slot->device_num = device; | ||
| 158 | slot->pci_bus = pci_bus; | ||
| 159 | |||
| 160 | sprintf(bss_hotplug_slot->name, "module_%c%c%c%c%.2d_b_%d_s_%d", | ||
| 161 | '0'+RACK_GET_CLASS(MODULE_GET_RACK(pcibus_info->pbi_moduleid)), | ||
| 162 | '0'+RACK_GET_GROUP(MODULE_GET_RACK(pcibus_info->pbi_moduleid)), | ||
| 163 | '0'+RACK_GET_NUM(MODULE_GET_RACK(pcibus_info->pbi_moduleid)), | ||
| 164 | MODULE_GET_BTCHAR(pcibus_info->pbi_moduleid), | ||
| 165 | MODULE_GET_BPOS(pcibus_info->pbi_moduleid), | ||
| 166 | ((int)pcibus_info->pbi_buscommon.bs_persist_busnum) & 0xf, | ||
| 167 | device + 1); | ||
| 168 | |||
| 169 | slot->hotplug_slot = bss_hotplug_slot; | ||
| 170 | list_add(&slot->hp_list, &sn_hp_list); | ||
| 171 | |||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | static struct hotplug_slot * sn_hp_destroy(void) | ||
| 176 | { | ||
| 177 | struct slot *slot; | ||
| 178 | struct list_head *list; | ||
| 179 | struct hotplug_slot *bss_hotplug_slot = NULL; | ||
| 180 | |||
| 181 | list_for_each(list, &sn_hp_list) { | ||
| 182 | slot = list_entry(list, struct slot, hp_list); | ||
| 183 | bss_hotplug_slot = slot->hotplug_slot; | ||
| 184 | list_del(&((struct slot *)bss_hotplug_slot->private)-> | ||
| 185 | hp_list); | ||
| 186 | break; | ||
| 187 | } | ||
| 188 | return bss_hotplug_slot; | ||
| 189 | } | ||
| 190 | |||
| 191 | static void sn_bus_alloc_data(struct pci_dev *dev) | ||
| 192 | { | ||
| 193 | struct list_head *node; | ||
| 194 | struct pci_bus *subordinate_bus; | ||
| 195 | struct pci_dev *child; | ||
| 196 | |||
| 197 | sn_pci_fixup_slot(dev); | ||
| 198 | |||
| 199 | /* Recursively sets up the sn_irq_info structs */ | ||
| 200 | if (dev->subordinate) { | ||
| 201 | subordinate_bus = dev->subordinate; | ||
| 202 | list_for_each(node, &subordinate_bus->devices) { | ||
| 203 | child = list_entry(node, struct pci_dev, bus_list); | ||
| 204 | sn_bus_alloc_data(child); | ||
| 205 | } | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | static void sn_bus_free_data(struct pci_dev *dev) | ||
| 210 | { | ||
| 211 | struct list_head *node; | ||
| 212 | struct pci_bus *subordinate_bus; | ||
| 213 | struct pci_dev *child; | ||
| 214 | |||
| 215 | /* Recursively clean up sn_irq_info structs */ | ||
| 216 | if (dev->subordinate) { | ||
| 217 | subordinate_bus = dev->subordinate; | ||
| 218 | list_for_each(node, &subordinate_bus->devices) { | ||
| 219 | child = list_entry(node, struct pci_dev, bus_list); | ||
| 220 | sn_bus_free_data(child); | ||
| 221 | } | ||
| 222 | } | ||
| 223 | sn_pci_unfixup_slot(dev); | ||
| 224 | } | ||
| 225 | |||
| 226 | static u8 sn_power_status_get(struct hotplug_slot *bss_hotplug_slot) | ||
| 227 | { | ||
| 228 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 229 | struct pcibus_info *pcibus_info; | ||
| 230 | u8 retval; | ||
| 231 | |||
| 232 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | ||
| 233 | retval = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); | ||
| 234 | |||
| 235 | return retval ? 1 : 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | static void sn_slot_mark_enable(struct hotplug_slot *bss_hotplug_slot, | ||
| 239 | int device_num) | ||
| 240 | { | ||
| 241 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 242 | struct pcibus_info *pcibus_info; | ||
| 243 | |||
| 244 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | ||
| 245 | pcibus_info->pbi_enabled_devices |= (1 << device_num); | ||
| 246 | } | ||
| 247 | |||
| 248 | static void sn_slot_mark_disable(struct hotplug_slot *bss_hotplug_slot, | ||
| 249 | int device_num) | ||
| 250 | { | ||
| 251 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 252 | struct pcibus_info *pcibus_info; | ||
| 253 | |||
| 254 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | ||
| 255 | pcibus_info->pbi_enabled_devices &= ~(1 << device_num); | ||
| 256 | } | ||
| 257 | |||
| 258 | static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot, | ||
| 259 | int device_num) | ||
| 260 | { | ||
| 261 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 262 | struct pcibus_info *pcibus_info; | ||
| 263 | struct pcibr_slot_enable_resp resp; | ||
| 264 | int rc; | ||
| 265 | |||
| 266 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | ||
| 267 | |||
| 268 | /* | ||
| 269 | * Power-on and initialize the slot in the SN | ||
| 270 | * PCI infrastructure. | ||
| 271 | */ | ||
| 272 | rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp); | ||
| 273 | |||
| 274 | if (rc == PCI_SLOT_ALREADY_UP) { | ||
| 275 | dev_dbg(slot->pci_bus->self, "is already active\n"); | ||
| 276 | return -EPERM; | ||
| 277 | } | ||
| 278 | |||
| 279 | if (rc == PCI_L1_ERR) { | ||
| 280 | dev_dbg(slot->pci_bus->self, | ||
| 281 | "L1 failure %d with message: %s", | ||
| 282 | resp.resp_sub_errno, resp.resp_l1_msg); | ||
| 283 | return -EPERM; | ||
| 284 | } | ||
| 285 | |||
| 286 | if (rc) { | ||
| 287 | dev_dbg(slot->pci_bus->self, | ||
| 288 | "insert failed with error %d sub-error %d\n", | ||
| 289 | rc, resp.resp_sub_errno); | ||
| 290 | return -EIO; | ||
| 291 | } | ||
| 292 | |||
| 293 | sn_slot_mark_enable(bss_hotplug_slot, device_num); | ||
| 294 | |||
| 295 | return 0; | ||
| 296 | } | ||
| 297 | |||
| 298 | static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot, | ||
| 299 | int device_num, int action) | ||
| 300 | { | ||
| 301 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 302 | struct pcibus_info *pcibus_info; | ||
| 303 | struct pcibr_slot_disable_resp resp; | ||
| 304 | int rc; | ||
| 305 | |||
| 306 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | ||
| 307 | |||
| 308 | rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp); | ||
| 309 | |||
| 310 | if (action == PCI_REQ_SLOT_ELIGIBLE && rc == PCI_SLOT_ALREADY_DOWN) { | ||
| 311 | dev_dbg(slot->pci_bus->self, "Slot %s already inactive\n"); | ||
| 312 | return -ENODEV; | ||
| 313 | } | ||
| 314 | |||
| 315 | if (action == PCI_REQ_SLOT_ELIGIBLE && rc == PCI_EMPTY_33MHZ) { | ||
| 316 | dev_dbg(slot->pci_bus->self, | ||
| 317 | "Cannot remove last 33MHz card\n"); | ||
| 318 | return -EPERM; | ||
| 319 | } | ||
| 320 | |||
| 321 | if (action == PCI_REQ_SLOT_ELIGIBLE && rc == PCI_L1_ERR) { | ||
| 322 | dev_dbg(slot->pci_bus->self, | ||
| 323 | "L1 failure %d with message \n%s\n", | ||
| 324 | resp.resp_sub_errno, resp.resp_l1_msg); | ||
| 325 | return -EPERM; | ||
| 326 | } | ||
| 327 | |||
| 328 | if (action == PCI_REQ_SLOT_ELIGIBLE && rc) { | ||
| 329 | dev_dbg(slot->pci_bus->self, | ||
| 330 | "remove failed with error %d sub-error %d\n", | ||
| 331 | rc, resp.resp_sub_errno); | ||
| 332 | return -EIO; | ||
| 333 | } | ||
| 334 | |||
| 335 | if (action == PCI_REQ_SLOT_ELIGIBLE && !rc) | ||
| 336 | return 0; | ||
| 337 | |||
| 338 | if (action == PCI_REQ_SLOT_DISABLE && !rc) { | ||
| 339 | sn_slot_mark_disable(bss_hotplug_slot, device_num); | ||
| 340 | dev_dbg(slot->pci_bus->self, "remove successful\n"); | ||
| 341 | return 0; | ||
| 342 | } | ||
| 343 | |||
| 344 | if (action == PCI_REQ_SLOT_DISABLE && rc) { | ||
| 345 | dev_dbg(slot->pci_bus->self,"remove failed rc = %d\n", rc); | ||
| 346 | return rc; | ||
| 347 | } | ||
| 348 | |||
| 349 | return rc; | ||
| 350 | } | ||
| 351 | |||
| 352 | static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | ||
| 353 | { | ||
| 354 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 355 | struct pci_bus *new_bus = NULL; | ||
| 356 | struct pci_dev *dev; | ||
| 357 | int func, num_funcs; | ||
| 358 | int new_ppb = 0; | ||
| 359 | int rc; | ||
| 360 | |||
| 361 | /* Serialize the Linux PCI infrastructure */ | ||
| 362 | down(&sn_hotplug_sem); | ||
| 363 | |||
| 364 | /* | ||
| 365 | * Power-on and initialize the slot in the SN | ||
| 366 | * PCI infrastructure. | ||
| 367 | */ | ||
| 368 | rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); | ||
| 369 | if (rc) { | ||
| 370 | up(&sn_hotplug_sem); | ||
| 371 | return rc; | ||
| 372 | } | ||
| 373 | |||
| 374 | num_funcs = pci_scan_slot(slot->pci_bus, PCI_DEVFN(slot->device_num+1, | ||
| 375 | PCI_FUNC(0))); | ||
| 376 | if (!num_funcs) { | ||
| 377 | dev_dbg(slot->pci_bus->self, "no device in slot\n"); | ||
| 378 | up(&sn_hotplug_sem); | ||
| 379 | return -ENODEV; | ||
| 380 | } | ||
| 381 | |||
| 382 | sn_pci_controller_fixup(pci_domain_nr(slot->pci_bus), | ||
| 383 | slot->pci_bus->number, | ||
| 384 | slot->pci_bus); | ||
| 385 | /* | ||
| 386 | * Map SN resources for all functions on the card | ||
| 387 | * to the Linux PCI interface and tell the drivers | ||
| 388 | * about them. | ||
| 389 | */ | ||
| 390 | for (func = 0; func < num_funcs; func++) { | ||
| 391 | dev = pci_get_slot(slot->pci_bus, | ||
| 392 | PCI_DEVFN(slot->device_num + 1, | ||
| 393 | PCI_FUNC(func))); | ||
| 394 | |||
| 395 | |||
| 396 | if (dev) { | ||
| 397 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | ||
| 398 | unsigned char sec_bus; | ||
| 399 | pci_read_config_byte(dev, PCI_SECONDARY_BUS, | ||
| 400 | &sec_bus); | ||
| 401 | new_bus = pci_add_new_bus(dev->bus, dev, | ||
| 402 | sec_bus); | ||
| 403 | pci_scan_child_bus(new_bus); | ||
| 404 | sn_pci_controller_fixup(pci_domain_nr(new_bus), | ||
| 405 | new_bus->number, | ||
| 406 | new_bus); | ||
| 407 | new_ppb = 1; | ||
| 408 | } | ||
| 409 | sn_bus_alloc_data(dev); | ||
| 410 | pci_dev_put(dev); | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 414 | /* Call the driver for the new device */ | ||
| 415 | pci_bus_add_devices(slot->pci_bus); | ||
| 416 | /* Call the drivers for the new devices subordinate to PPB */ | ||
| 417 | if (new_ppb) | ||
| 418 | pci_bus_add_devices(new_bus); | ||
| 419 | |||
| 420 | up(&sn_hotplug_sem); | ||
| 421 | |||
| 422 | if (rc == 0) | ||
| 423 | dev_dbg(slot->pci_bus->self, | ||
| 424 | "insert operation successful\n"); | ||
| 425 | else | ||
| 426 | dev_dbg(slot->pci_bus->self, | ||
| 427 | "insert operation failed rc = %d\n", rc); | ||
| 428 | |||
| 429 | return rc; | ||
| 430 | } | ||
| 431 | |||
| 432 | static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | ||
| 433 | { | ||
| 434 | struct slot *slot = (struct slot *)bss_hotplug_slot->private; | ||
| 435 | struct pci_dev *dev; | ||
| 436 | int func; | ||
| 437 | int rc; | ||
| 438 | |||
| 439 | /* Acquire update access to the bus */ | ||
| 440 | down(&sn_hotplug_sem); | ||
| 441 | |||
| 442 | /* is it okay to bring this slot down? */ | ||
| 443 | rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, | ||
| 444 | PCI_REQ_SLOT_ELIGIBLE); | ||
| 445 | if (rc) | ||
| 446 | goto leaving; | ||
| 447 | |||
| 448 | /* Free the SN resources assigned to the Linux device.*/ | ||
| 449 | for (func = 0; func < 8; func++) { | ||
| 450 | dev = pci_get_slot(slot->pci_bus, | ||
| 451 | PCI_DEVFN(slot->device_num+1, | ||
| 452 | PCI_FUNC(func))); | ||
| 453 | if (dev) { | ||
| 454 | /* | ||
| 455 | * Some drivers may use dma accesses during the | ||
| 456 | * driver remove function. We release the sysdata | ||
| 457 | * areas after the driver remove functions have | ||
| 458 | * been called. | ||
| 459 | */ | ||
| 460 | sn_bus_store_sysdata(dev); | ||
| 461 | sn_bus_free_data(dev); | ||
| 462 | pci_remove_bus_device(dev); | ||
| 463 | pci_dev_put(dev); | ||
| 464 | } | ||
| 465 | } | ||
| 466 | |||
| 467 | /* free the collected sysdata pointers */ | ||
| 468 | sn_bus_free_sysdata(); | ||
| 469 | |||
| 470 | /* Deactivate slot */ | ||
| 471 | rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, | ||
| 472 | PCI_REQ_SLOT_DISABLE); | ||
| 473 | leaving: | ||
| 474 | /* Release the bus lock */ | ||
| 475 | up(&sn_hotplug_sem); | ||
| 476 | |||
| 477 | return rc; | ||
| 478 | } | ||
| 479 | |||
| 480 | static int get_power_status(struct hotplug_slot *bss_hotplug_slot, u8 *value) | ||
| 481 | { | ||
| 482 | down(&sn_hotplug_sem); | ||
| 483 | *value = sn_power_status_get(bss_hotplug_slot); | ||
| 484 | up(&sn_hotplug_sem); | ||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) | ||
| 489 | { | ||
| 490 | kfree(bss_hotplug_slot->info); | ||
| 491 | kfree(bss_hotplug_slot->name); | ||
| 492 | kfree(bss_hotplug_slot->private); | ||
| 493 | kfree(bss_hotplug_slot); | ||
| 494 | } | ||
| 495 | |||
| 496 | static int sn_hotplug_slot_register(struct pci_bus *pci_bus) | ||
| 497 | { | ||
| 498 | int device; | ||
| 499 | struct hotplug_slot *bss_hotplug_slot; | ||
| 500 | int rc = 0; | ||
| 501 | |||
| 502 | /* | ||
| 503 | * Currently only four devices are supported, | ||
| 504 | * in the future there maybe more -- up to 32. | ||
| 505 | */ | ||
| 506 | |||
| 507 | for (device = 0; device < SN_MAX_HP_SLOTS ; device++) { | ||
| 508 | if (sn_pci_slot_valid(pci_bus, device) != 1) | ||
| 509 | continue; | ||
| 510 | |||
| 511 | bss_hotplug_slot = kcalloc(1,sizeof(struct hotplug_slot), | ||
| 512 | GFP_KERNEL); | ||
| 513 | if (!bss_hotplug_slot) { | ||
| 514 | rc = -ENOMEM; | ||
| 515 | goto alloc_err; | ||
| 516 | } | ||
| 517 | |||
| 518 | bss_hotplug_slot->info = | ||
| 519 | kcalloc(1,sizeof(struct hotplug_slot_info), | ||
| 520 | GFP_KERNEL); | ||
| 521 | if (!bss_hotplug_slot->info) { | ||
| 522 | rc = -ENOMEM; | ||
| 523 | goto alloc_err; | ||
| 524 | } | ||
| 525 | |||
| 526 | if (sn_hp_slot_private_alloc(bss_hotplug_slot, | ||
| 527 | pci_bus, device)) { | ||
| 528 | rc = -ENOMEM; | ||
| 529 | goto alloc_err; | ||
| 530 | } | ||
| 531 | |||
| 532 | bss_hotplug_slot->ops = &sn_hotplug_slot_ops; | ||
| 533 | bss_hotplug_slot->release = &sn_release_slot; | ||
| 534 | |||
| 535 | rc = pci_hp_register(bss_hotplug_slot); | ||
| 536 | if (rc) | ||
| 537 | goto register_err; | ||
| 538 | } | ||
| 539 | dev_dbg(pci_bus->self, "Registered bus with hotplug\n"); | ||
| 540 | return rc; | ||
| 541 | |||
| 542 | register_err: | ||
| 543 | dev_dbg(pci_bus->self, "bus failed to register with err = %d\n", | ||
| 544 | rc); | ||
| 545 | |||
| 546 | alloc_err: | ||
| 547 | if (rc == -ENOMEM) | ||
| 548 | dev_dbg(pci_bus->self, "Memory allocation error\n"); | ||
| 549 | |||
| 550 | /* destroy THIS element */ | ||
| 551 | if (bss_hotplug_slot) | ||
| 552 | sn_release_slot(bss_hotplug_slot); | ||
| 553 | |||
| 554 | /* destroy anything else on the list */ | ||
| 555 | while ((bss_hotplug_slot = sn_hp_destroy())) | ||
| 556 | pci_hp_deregister(bss_hotplug_slot); | ||
| 557 | |||
| 558 | return rc; | ||
| 559 | } | ||
| 560 | |||
| 561 | static int sn_pci_hotplug_init(void) | ||
| 562 | { | ||
| 563 | struct pci_bus *pci_bus = NULL; | ||
| 564 | int rc; | ||
| 565 | int registered = 0; | ||
| 566 | |||
| 567 | INIT_LIST_HEAD(&sn_hp_list); | ||
| 568 | |||
| 569 | if (sn_sal_rev() < SGI_HOTPLUG_PROM_REV) { | ||
| 570 | printk(KERN_ERR "%s: PROM version must be greater than 4.05\n", | ||
| 571 | __FUNCTION__); | ||
| 572 | return -EPERM; | ||
| 573 | } | ||
| 574 | |||
| 575 | while ((pci_bus = pci_find_next_bus(pci_bus))) { | ||
| 576 | if (!pci_bus->sysdata) | ||
| 577 | continue; | ||
| 578 | |||
| 579 | rc = sn_pci_bus_valid(pci_bus); | ||
| 580 | if (rc != 1) { | ||
| 581 | dev_dbg(pci_bus->self, "not a valid hotplug bus\n"); | ||
| 582 | continue; | ||
| 583 | } | ||
| 584 | dev_dbg(pci_bus->self, "valid hotplug bus\n"); | ||
| 585 | |||
| 586 | rc = sn_hotplug_slot_register(pci_bus); | ||
| 587 | if (!rc) | ||
| 588 | registered = 1; | ||
| 589 | else { | ||
| 590 | registered = 0; | ||
| 591 | break; | ||
| 592 | } | ||
| 593 | } | ||
| 594 | |||
| 595 | return registered == 1 ? 0 : -ENODEV; | ||
| 596 | } | ||
| 597 | |||
| 598 | static void sn_pci_hotplug_exit(void) | ||
| 599 | { | ||
| 600 | struct hotplug_slot *bss_hotplug_slot; | ||
| 601 | |||
| 602 | while ((bss_hotplug_slot = sn_hp_destroy())) { | ||
| 603 | pci_hp_deregister(bss_hotplug_slot); | ||
| 604 | } | ||
| 605 | |||
| 606 | if (!list_empty(&sn_hp_list)) | ||
| 607 | printk(KERN_ERR "%s: internal list is not empty\n", __FILE__); | ||
| 608 | } | ||
| 609 | |||
| 610 | module_init(sn_pci_hotplug_init); | ||
| 611 | module_exit(sn_pci_hotplug_exit); | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index a90a533eba0f..05fa91a31c62 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
| @@ -379,6 +379,7 @@ exit: | |||
| 379 | EXPORT_SYMBOL(pci_dev_present); | 379 | EXPORT_SYMBOL(pci_dev_present); |
| 380 | 380 | ||
| 381 | EXPORT_SYMBOL(pci_find_bus); | 381 | EXPORT_SYMBOL(pci_find_bus); |
| 382 | EXPORT_SYMBOL(pci_find_next_bus); | ||
| 382 | EXPORT_SYMBOL(pci_find_device); | 383 | EXPORT_SYMBOL(pci_find_device); |
| 383 | EXPORT_SYMBOL(pci_find_device_reverse); | 384 | EXPORT_SYMBOL(pci_find_device_reverse); |
| 384 | EXPORT_SYMBOL(pci_find_slot); | 385 | EXPORT_SYMBOL(pci_find_slot); |
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index 635fdce854a8..ab827d298569 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef _ASM_IA64_SN_ARCH_H | 11 | #ifndef _ASM_IA64_SN_ARCH_H |
| 12 | #define _ASM_IA64_SN_ARCH_H | 12 | #define _ASM_IA64_SN_ARCH_H |
| 13 | 13 | ||
| 14 | #include <linux/numa.h> | ||
| 14 | #include <asm/types.h> | 15 | #include <asm/types.h> |
| 15 | #include <asm/percpu.h> | 16 | #include <asm/percpu.h> |
| 16 | #include <asm/sn/types.h> | 17 | #include <asm/sn/types.h> |
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h index e51471fb0867..e190dd4213d5 100644 --- a/include/asm-ia64/sn/intr.h +++ b/include/asm-ia64/sn/intr.h | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #ifndef _ASM_IA64_SN_INTR_H | 9 | #ifndef _ASM_IA64_SN_INTR_H |
| 10 | #define _ASM_IA64_SN_INTR_H | 10 | #define _ASM_IA64_SN_INTR_H |
| 11 | 11 | ||
| 12 | #include <linux/rcupdate.h> | ||
| 13 | |||
| 12 | #define SGI_UART_VECTOR (0xe9) | 14 | #define SGI_UART_VECTOR (0xe9) |
| 13 | #define SGI_PCIBR_ERROR (0x33) | 15 | #define SGI_PCIBR_ERROR (0x33) |
| 14 | 16 | ||
| @@ -33,7 +35,7 @@ | |||
| 33 | 35 | ||
| 34 | // The SN PROM irq struct | 36 | // The SN PROM irq struct |
| 35 | struct sn_irq_info { | 37 | struct sn_irq_info { |
| 36 | struct sn_irq_info *irq_next; /* sharing irq list */ | 38 | struct sn_irq_info *irq_next; /* deprecated DO NOT USE */ |
| 37 | short irq_nasid; /* Nasid IRQ is assigned to */ | 39 | short irq_nasid; /* Nasid IRQ is assigned to */ |
| 38 | int irq_slice; /* slice IRQ is assigned to */ | 40 | int irq_slice; /* slice IRQ is assigned to */ |
| 39 | int irq_cpuid; /* kernel logical cpuid */ | 41 | int irq_cpuid; /* kernel logical cpuid */ |
| @@ -47,6 +49,8 @@ struct sn_irq_info { | |||
| 47 | int irq_cookie; /* unique cookie */ | 49 | int irq_cookie; /* unique cookie */ |
| 48 | int irq_flags; /* flags */ | 50 | int irq_flags; /* flags */ |
| 49 | int irq_share_cnt; /* num devices sharing IRQ */ | 51 | int irq_share_cnt; /* num devices sharing IRQ */ |
| 52 | struct list_head list; /* list of sn_irq_info structs */ | ||
| 53 | struct rcu_head rcu; /* rcu callback list */ | ||
| 50 | }; | 54 | }; |
| 51 | 55 | ||
| 52 | extern void sn_send_IPI_phys(int, long, int, int); | 56 | extern void sn_send_IPI_phys(int, long, int, int); |
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index 1cd291d8badd..f9b8d2164007 100644 --- a/arch/ia64/sn/include/pci/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h | |||
| @@ -8,6 +8,9 @@ | |||
| 8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | 8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H |
| 9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | 9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H |
| 10 | 10 | ||
| 11 | #include <asm/sn/intr.h> | ||
| 12 | #include <asm/sn/pcibus_provider_defs.h> | ||
| 13 | |||
| 11 | /* Workarounds */ | 14 | /* Workarounds */ |
| 12 | #define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ | 15 | #define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ |
| 13 | 16 | ||
| @@ -20,7 +23,7 @@ | |||
| 20 | #define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) | 23 | #define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) |
| 21 | 24 | ||
| 22 | 25 | ||
| 23 | /* | 26 | /* |
| 24 | * The different PCI Bridge types supported on the SGI Altix platforms | 27 | * The different PCI Bridge types supported on the SGI Altix platforms |
| 25 | */ | 28 | */ |
| 26 | #define PCIBR_BRIDGETYPE_UNKNOWN -1 | 29 | #define PCIBR_BRIDGETYPE_UNKNOWN -1 |
| @@ -100,15 +103,16 @@ struct pcibus_info { | |||
| 100 | 103 | ||
| 101 | struct ate_resource pbi_int_ate_resource; | 104 | struct ate_resource pbi_int_ate_resource; |
| 102 | uint64_t pbi_int_ate_size; | 105 | uint64_t pbi_int_ate_size; |
| 103 | 106 | ||
| 104 | uint64_t pbi_dir_xbase; | 107 | uint64_t pbi_dir_xbase; |
| 105 | char pbi_hub_xid; | 108 | char pbi_hub_xid; |
| 106 | 109 | ||
| 107 | uint64_t pbi_devreg[8]; | 110 | uint64_t pbi_devreg[8]; |
| 108 | spinlock_t pbi_lock; | ||
| 109 | 111 | ||
| 110 | uint32_t pbi_valid_devices; | 112 | uint32_t pbi_valid_devices; |
| 111 | uint32_t pbi_enabled_devices; | 113 | uint32_t pbi_enabled_devices; |
| 114 | |||
| 115 | spinlock_t pbi_lock; | ||
| 112 | }; | 116 | }; |
| 113 | 117 | ||
| 114 | /* | 118 | /* |
| @@ -148,4 +152,8 @@ extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info); | |||
| 148 | extern int pcibr_ate_alloc(struct pcibus_info *, int); | 152 | extern int pcibr_ate_alloc(struct pcibus_info *, int); |
| 149 | extern void pcibr_ate_free(struct pcibus_info *, int); | 153 | extern void pcibr_ate_free(struct pcibus_info *, int); |
| 150 | extern void ate_write(struct pcibus_info *, int, int, uint64_t); | 154 | extern void ate_write(struct pcibus_info *, int, int, uint64_t); |
| 155 | extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, | ||
| 156 | void *resp); | ||
| 157 | extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, | ||
| 158 | int action, void *resp); | ||
| 151 | #endif | 159 | #endif |
diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h index ed4031d80811..49711d00ad04 100644 --- a/include/asm-ia64/sn/pcidev.h +++ b/include/asm-ia64/sn/pcidev.h | |||
| @@ -10,11 +10,11 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
| 12 | 12 | ||
| 13 | extern struct sn_irq_info **sn_irq; | ||
| 14 | |||
| 15 | #define SN_PCIDEV_INFO(pci_dev) \ | 13 | #define SN_PCIDEV_INFO(pci_dev) \ |
| 16 | ((struct pcidev_info *)(pci_dev)->sysdata) | 14 | ((struct pcidev_info *)(pci_dev)->sysdata) |
| 17 | 15 | ||
| 16 | #define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \ | ||
| 17 | (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) | ||
| 18 | /* | 18 | /* |
| 19 | * Given a pci_bus, return the sn pcibus_bussoft struct. Note that | 19 | * Given a pci_bus, return the sn pcibus_bussoft struct. Note that |
| 20 | * this only works for root busses, not for busses represented by PPB's. | 20 | * this only works for root busses, not for busses represented by PPB's. |
| @@ -23,6 +23,8 @@ extern struct sn_irq_info **sn_irq; | |||
| 23 | #define SN_PCIBUS_BUSSOFT(pci_bus) \ | 23 | #define SN_PCIBUS_BUSSOFT(pci_bus) \ |
| 24 | ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) | 24 | ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) |
| 25 | 25 | ||
| 26 | #define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \ | ||
| 27 | (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) | ||
| 26 | /* | 28 | /* |
| 27 | * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note | 29 | * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note |
| 28 | * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due | 30 | * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due |
| @@ -50,9 +52,17 @@ struct pcidev_info { | |||
| 50 | 52 | ||
| 51 | struct sn_irq_info *pdi_sn_irq_info; | 53 | struct sn_irq_info *pdi_sn_irq_info; |
| 52 | struct sn_pcibus_provider *pdi_provider; /* sn pci ops */ | 54 | struct sn_pcibus_provider *pdi_provider; /* sn pci ops */ |
| 55 | struct pci_dev *host_pci_dev; /* host bus link */ | ||
| 53 | }; | 56 | }; |
| 54 | 57 | ||
| 55 | extern void sn_irq_fixup(struct pci_dev *pci_dev, | 58 | extern void sn_irq_fixup(struct pci_dev *pci_dev, |
| 56 | struct sn_irq_info *sn_irq_info); | 59 | struct sn_irq_info *sn_irq_info); |
| 57 | 60 | extern void sn_irq_unfixup(struct pci_dev *pci_dev); | |
| 61 | extern void sn_pci_controller_fixup(int segment, int busnum, | ||
| 62 | struct pci_bus *bus); | ||
| 63 | extern void sn_bus_store_sysdata(struct pci_dev *dev); | ||
| 64 | extern void sn_bus_free_sysdata(void); | ||
| 65 | extern void sn_pci_fixup_slot(struct pci_dev *dev); | ||
| 66 | extern void sn_pci_unfixup_slot(struct pci_dev *dev); | ||
| 67 | extern void sn_irq_lh_init(void); | ||
| 58 | #endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ | 68 | #endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ |
diff --git a/arch/ia64/sn/include/pci/pic.h b/include/asm-ia64/sn/pic.h index fd18acecb1e6..0de82e6b0893 100644 --- a/arch/ia64/sn/include/pci/pic.h +++ b/include/asm-ia64/sn/pic.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) | 15 | * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) |
| 16 | * be designated as 'device 0'. That is a departure from earlier SGI | 16 | * be designated as 'device 0'. That is a departure from earlier SGI |
| 17 | * PCI bridges. Because of that we use config space 1 to access the | 17 | * PCI bridges. Because of that we use config space 1 to access the |
| 18 | * config space of the first actual PCI device on the bus. | 18 | * config space of the first actual PCI device on the bus. |
| 19 | * Here's what the PIC manual says: | 19 | * Here's what the PIC manual says: |
| 20 | * | 20 | * |
| 21 | * The current PCI-X bus specification now defines that the parent | 21 | * The current PCI-X bus specification now defines that the parent |
| @@ -29,14 +29,14 @@ | |||
| 29 | * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. | 29 | * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. |
| 30 | * PCI-X requires we start a 1, not 0 and currently the PX brick | 30 | * PCI-X requires we start a 1, not 0 and currently the PX brick |
| 31 | * does associate our: | 31 | * does associate our: |
| 32 | * | 32 | * |
| 33 | * device 0 with configuration space window 1, | 33 | * device 0 with configuration space window 1, |
| 34 | * device 1 with configuration space window 2, | 34 | * device 1 with configuration space window 2, |
| 35 | * device 2 with configuration space window 3, | 35 | * device 2 with configuration space window 3, |
| 36 | * device 3 with configuration space window 4. | 36 | * device 3 with configuration space window 4. |
| 37 | * | 37 | * |
| 38 | * The net effect is that all config space access are off-by-one with | 38 | * The net effect is that all config space access are off-by-one with |
| 39 | * relation to other per-slot accesses on the PIC. | 39 | * relation to other per-slot accesses on the PIC. |
| 40 | * Here is a table that shows some of that: | 40 | * Here is a table that shows some of that: |
| 41 | * | 41 | * |
| 42 | * Internal Slot# | 42 | * Internal Slot# |
| @@ -65,7 +65,7 @@ | |||
| 65 | *****************************************************************************/ | 65 | *****************************************************************************/ |
| 66 | 66 | ||
| 67 | /* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] | 67 | /* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] |
| 68 | * of a 64-bit register. When writing PIC registers, always write the | 68 | * of a 64-bit register. When writing PIC registers, always write the |
| 69 | * entire 64 bits. | 69 | * entire 64 bits. |
| 70 | */ | 70 | */ |
| 71 | 71 | ||
| @@ -164,7 +164,7 @@ struct pic { | |||
| 164 | uint64_t clear_all; /* 0x000{438,,,5F8} */ | 164 | uint64_t clear_all; /* 0x000{438,,,5F8} */ |
| 165 | } p_buf_count[8]; | 165 | } p_buf_count[8]; |
| 166 | 166 | ||
| 167 | 167 | ||
| 168 | /* 0x000600-0x0009FF -- PCI/X registers */ | 168 | /* 0x000600-0x0009FF -- PCI/X registers */ |
| 169 | uint64_t p_pcix_bus_err_addr; /* 0x000600 */ | 169 | uint64_t p_pcix_bus_err_addr; /* 0x000600 */ |
| 170 | uint64_t p_pcix_bus_err_attr; /* 0x000608 */ | 170 | uint64_t p_pcix_bus_err_attr; /* 0x000608 */ |
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index 20b300187669..d2c1d34dcce4 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h | |||
| @@ -81,11 +81,6 @@ | |||
| 81 | * | 81 | * |
| 82 | */ | 82 | */ |
| 83 | 83 | ||
| 84 | #ifndef CONFIG_SMP | ||
| 85 | #define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) | ||
| 86 | #endif | ||
| 87 | |||
| 88 | |||
| 89 | #define get_node_number(addr) NASID_GET(addr) | 84 | #define get_node_number(addr) NASID_GET(addr) |
| 90 | 85 | ||
| 91 | /* | 86 | /* |
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 1455375d2ce4..27976d223186 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
| @@ -134,43 +134,28 @@ | |||
| 134 | 134 | ||
| 135 | #define SN_SAL_FAKE_PROM 0x02009999 | 135 | #define SN_SAL_FAKE_PROM 0x02009999 |
| 136 | 136 | ||
| 137 | |||
| 138 | /** | 137 | /** |
| 139 | * sn_sal_rev_major - get the major SGI SAL revision number | 138 | * sn_sal_revision - get the SGI SAL revision number |
| 140 | * | 139 | * |
| 141 | * The SGI PROM stores its version in sal_[ab]_rev_(major|minor). | 140 | * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor). |
| 142 | * This routine simply extracts the major value from the | 141 | * This routine simply extracts the major and minor values and |
| 143 | * @ia64_sal_systab structure constructed by ia64_sal_init(). | 142 | * presents them in a u32 format. |
| 144 | */ | 143 | * |
| 145 | static inline int | 144 | * For example, version 4.05 would be represented at 0x0405. |
| 146 | sn_sal_rev_major(void) | 145 | */ |
| 146 | static inline u32 | ||
| 147 | sn_sal_rev(void) | ||
| 147 | { | 148 | { |
| 148 | struct ia64_sal_systab *systab = efi.sal_systab; | 149 | struct ia64_sal_systab *systab = efi.sal_systab; |
| 149 | 150 | ||
| 150 | return (int)systab->sal_b_rev_major; | 151 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); |
| 151 | } | ||
| 152 | |||
| 153 | /** | ||
| 154 | * sn_sal_rev_minor - get the minor SGI SAL revision number | ||
| 155 | * | ||
| 156 | * The SGI PROM stores its version in sal_[ab]_rev_(major|minor). | ||
| 157 | * This routine simply extracts the minor value from the | ||
| 158 | * @ia64_sal_systab structure constructed by ia64_sal_init(). | ||
| 159 | */ | ||
| 160 | static inline int | ||
| 161 | sn_sal_rev_minor(void) | ||
| 162 | { | ||
| 163 | struct ia64_sal_systab *systab = efi.sal_systab; | ||
| 164 | |||
| 165 | return (int)systab->sal_b_rev_minor; | ||
| 166 | } | 152 | } |
| 167 | 153 | ||
| 168 | /* | 154 | /* |
| 169 | * Specify the minimum PROM revsion required for this kernel. | 155 | * Specify the minimum PROM revsion required for this kernel. |
| 170 | * Note that they're stored in hex format... | 156 | * Note that they're stored in hex format... |
| 171 | */ | 157 | */ |
| 172 | #define SN_SAL_MIN_MAJOR 0x4 /* SN2 kernels need at least PROM 4.0 */ | 158 | #define SN_SAL_MIN_VERSION 0x0404 |
| 173 | #define SN_SAL_MIN_MINOR 0x0 | ||
| 174 | 159 | ||
| 175 | /* | 160 | /* |
| 176 | * Returns the master console nasid, if the call fails, return an illegal | 161 | * Returns the master console nasid, if the call fails, return an illegal |
diff --git a/arch/ia64/sn/include/pci/tiocp.h b/include/asm-ia64/sn/tiocp.h index f07c83b2bf6e..5f2489c9d2dd 100644 --- a/arch/ia64/sn/include/pci/tiocp.h +++ b/include/asm-ia64/sn/tiocp.h | |||
| @@ -111,7 +111,7 @@ struct tiocp{ | |||
| 111 | uint64_t clear_all; /* 0x000{438,,,5F8} */ | 111 | uint64_t clear_all; /* 0x000{438,,,5F8} */ |
| 112 | } cp_buf_count[8]; | 112 | } cp_buf_count[8]; |
| 113 | 113 | ||
| 114 | 114 | ||
| 115 | /* 0x000600-0x0009FF -- PCI/X registers */ | 115 | /* 0x000600-0x0009FF -- PCI/X registers */ |
| 116 | uint64_t cp_pcix_bus_err_addr; /* 0x000600 */ | 116 | uint64_t cp_pcix_bus_err_addr; /* 0x000600 */ |
| 117 | uint64_t cp_pcix_bus_err_attr; /* 0x000608 */ | 117 | uint64_t cp_pcix_bus_err_attr; /* 0x000608 */ |
