aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>2007-07-17 08:22:33 -0400
committerTony Luck <tony.luck@intel.com>2007-07-17 12:52:13 -0400
commit4994be1b3fe9120c88022ff5c0c33f6312b17adb (patch)
treeb4d32c77681029d2b5dfd94b0eb5a09be0ccae9e
parente1b30a392835e92581db09a4e8b4b2ad53a0c370 (diff)
[IA64] Add support for vector domain
Add fundamental support for multiple vector domain. There still exists only one vector domain even with this patch. IRQ migration across domain is not supported yet by this patch. Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/iosapic.c13
-rw-r--r--arch/ia64/kernel/irq_ia64.c120
-rw-r--r--arch/ia64/kernel/msi_ia64.c9
-rw-r--r--include/asm-ia64/hw_irq.h4
-rw-r--r--include/asm-ia64/irq.h9
5 files changed, 113 insertions, 42 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index cf27cfb4d165..e647254c2707 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -354,6 +354,8 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
354 354
355 irq &= (~IA64_IRQ_REDIRECTED); 355 irq &= (~IA64_IRQ_REDIRECTED);
356 356
357 /* IRQ migration across domain is not supported yet */
358 cpus_and(mask, mask, irq_to_domain(irq));
357 if (cpus_empty(mask)) 359 if (cpus_empty(mask))
358 return; 360 return;
359 361
@@ -663,6 +665,7 @@ get_target_cpu (unsigned int gsi, int irq)
663#ifdef CONFIG_SMP 665#ifdef CONFIG_SMP
664 static int cpu = -1; 666 static int cpu = -1;
665 extern int cpe_vector; 667 extern int cpe_vector;
668 cpumask_t domain = irq_to_domain(irq);
666 669
667 /* 670 /*
668 * In case of vector shared by multiple RTEs, all RTEs that 671 * In case of vector shared by multiple RTEs, all RTEs that
@@ -701,7 +704,7 @@ get_target_cpu (unsigned int gsi, int irq)
701 goto skip_numa_setup; 704 goto skip_numa_setup;
702 705
703 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 706 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
704 707 cpus_and(cpu_mask, cpu_mask, domain);
705 for_each_cpu_mask(numa_cpu, cpu_mask) { 708 for_each_cpu_mask(numa_cpu, cpu_mask) {
706 if (!cpu_online(numa_cpu)) 709 if (!cpu_online(numa_cpu))
707 cpu_clear(numa_cpu, cpu_mask); 710 cpu_clear(numa_cpu, cpu_mask);
@@ -731,7 +734,7 @@ skip_numa_setup:
731 do { 734 do {
732 if (++cpu >= NR_CPUS) 735 if (++cpu >= NR_CPUS)
733 cpu = 0; 736 cpu = 0;
734 } while (!cpu_online(cpu)); 737 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
735 738
736 return cpu_physical_id(cpu); 739 return cpu_physical_id(cpu);
737#else /* CONFIG_SMP */ 740#else /* CONFIG_SMP */
@@ -900,7 +903,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
900 switch (int_type) { 903 switch (int_type) {
901 case ACPI_INTERRUPT_PMI: 904 case ACPI_INTERRUPT_PMI:
902 irq = vector = iosapic_vector; 905 irq = vector = iosapic_vector;
903 bind_irq_vector(irq, vector); 906 bind_irq_vector(irq, vector, CPU_MASK_ALL);
904 /* 907 /*
905 * since PMI vector is alloc'd by FW(ACPI) not by kernel, 908 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
906 * we need to make sure the vector is available 909 * we need to make sure the vector is available
@@ -917,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
917 break; 920 break;
918 case ACPI_INTERRUPT_CPEI: 921 case ACPI_INTERRUPT_CPEI:
919 irq = vector = IA64_CPE_VECTOR; 922 irq = vector = IA64_CPE_VECTOR;
920 BUG_ON(bind_irq_vector(irq, vector)); 923 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
921 delivery = IOSAPIC_LOWEST_PRIORITY; 924 delivery = IOSAPIC_LOWEST_PRIORITY;
922 mask = 1; 925 mask = 1;
923 break; 926 break;
@@ -953,7 +956,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
953 unsigned int dest = cpu_physical_id(smp_processor_id()); 956 unsigned int dest = cpu_physical_id(smp_processor_id());
954 957
955 irq = vector = isa_irq_to_vector(isa_irq); 958 irq = vector = isa_irq_to_vector(isa_irq);
956 BUG_ON(bind_irq_vector(irq, vector)); 959 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
957 register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); 960 register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
958 961
959 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", 962 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 072427c2c3f6..a3667631ed80 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -60,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
60void __iomem *ipi_base_addr = ((void __iomem *) 60void __iomem *ipi_base_addr = ((void __iomem *)
61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); 61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62 62
63static cpumask_t vector_allocation_domain(int cpu);
64
63/* 65/*
64 * Legacy IRQ to IA-64 vector translation table. 66 * Legacy IRQ to IA-64 vector translation table.
65 */ 67 */
@@ -73,13 +75,20 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
73DEFINE_SPINLOCK(vector_lock); 75DEFINE_SPINLOCK(vector_lock);
74 76
75struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { 77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
76 [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED } 78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
77}; 82};
78 83
79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { 84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
80 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR 85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
81}; 86};
82 87
88static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
89 [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
90};
91
83static int irq_status[NR_IRQS] = { 92static int irq_status[NR_IRQS] = {
84 [0 ... NR_IRQS -1] = IRQ_UNUSED 93 [0 ... NR_IRQS -1] = IRQ_UNUSED
85}; 94};
@@ -111,39 +120,54 @@ static inline int find_unassigned_irq(void)
111 return -ENOSPC; 120 return -ENOSPC;
112} 121}
113 122
114static inline int find_unassigned_vector(void) 123static inline int find_unassigned_vector(cpumask_t domain)
115{ 124{
116 int vector; 125 cpumask_t mask;
126 int pos;
127
128 cpus_and(mask, domain, cpu_online_map);
129 if (cpus_empty(mask))
130 return -EINVAL;
117 131
118 for (vector = IA64_FIRST_DEVICE_VECTOR; 132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
119 vector <= IA64_LAST_DEVICE_VECTOR; vector++) 133 cpus_and(mask, domain, vector_table[pos]);
120 if (__get_cpu_var(vector_irq[vector]) == IA64_SPURIOUS_INT_VECTOR) 134 if (!cpus_empty(mask))
121 return vector; 135 continue;
136 return IA64_FIRST_DEVICE_VECTOR + pos;
137 }
122 return -ENOSPC; 138 return -ENOSPC;
123} 139}
124 140
125static int __bind_irq_vector(int irq, int vector) 141static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
126{ 142{
127 int cpu; 143 cpumask_t mask;
144 int cpu, pos;
145 struct irq_cfg *cfg = &irq_cfg[irq];
128 146
129 if (irq_to_vector(irq) == vector) 147 cpus_and(mask, domain, cpu_online_map);
148 if (cpus_empty(mask))
149 return -EINVAL;
150 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
130 return 0; 151 return 0;
131 if (irq_to_vector(irq) != IRQ_VECTOR_UNASSIGNED) 152 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
132 return -EBUSY; 153 return -EBUSY;
133 for_each_online_cpu(cpu) 154 for_each_cpu_mask(cpu, mask)
134 per_cpu(vector_irq, cpu)[vector] = irq; 155 per_cpu(vector_irq, cpu)[vector] = irq;
135 irq_cfg[irq].vector = vector; 156 cfg->vector = vector;
157 cfg->domain = domain;
136 irq_status[irq] = IRQ_USED; 158 irq_status[irq] = IRQ_USED;
159 pos = vector - IA64_FIRST_DEVICE_VECTOR;
160 cpus_or(vector_table[pos], vector_table[pos], domain);
137 return 0; 161 return 0;
138} 162}
139 163
140int bind_irq_vector(int irq, int vector) 164int bind_irq_vector(int irq, int vector, cpumask_t domain)
141{ 165{
142 unsigned long flags; 166 unsigned long flags;
143 int ret; 167 int ret;
144 168
145 spin_lock_irqsave(&vector_lock, flags); 169 spin_lock_irqsave(&vector_lock, flags);
146 ret = __bind_irq_vector(irq, vector); 170 ret = __bind_irq_vector(irq, vector, domain);
147 spin_unlock_irqrestore(&vector_lock, flags); 171 spin_unlock_irqrestore(&vector_lock, flags);
148 return ret; 172 return ret;
149} 173}
@@ -151,16 +175,24 @@ int bind_irq_vector(int irq, int vector)
151static void clear_irq_vector(int irq) 175static void clear_irq_vector(int irq)
152{ 176{
153 unsigned long flags; 177 unsigned long flags;
154 int vector, cpu; 178 int vector, cpu, pos;
179 cpumask_t mask;
180 cpumask_t domain;
181 struct irq_cfg *cfg = &irq_cfg[irq];
155 182
156 spin_lock_irqsave(&vector_lock, flags); 183 spin_lock_irqsave(&vector_lock, flags);
157 BUG_ON((unsigned)irq >= NR_IRQS); 184 BUG_ON((unsigned)irq >= NR_IRQS);
158 BUG_ON(irq_cfg[irq].vector == IRQ_VECTOR_UNASSIGNED); 185 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
159 vector = irq_cfg[irq].vector; 186 vector = cfg->vector;
160 for_each_online_cpu(cpu) 187 domain = cfg->domain;
188 cpus_and(mask, cfg->domain, cpu_online_map);
189 for_each_cpu_mask(cpu, mask)
161 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 190 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
162 irq_cfg[irq].vector = IRQ_VECTOR_UNASSIGNED; 191 cfg->vector = IRQ_VECTOR_UNASSIGNED;
192 cfg->domain = CPU_MASK_NONE;
163 irq_status[irq] = IRQ_UNUSED; 193 irq_status[irq] = IRQ_UNUSED;
194 pos = vector - IA64_FIRST_DEVICE_VECTOR;
195 cpus_andnot(vector_table[pos], vector_table[pos], domain);
164 spin_unlock_irqrestore(&vector_lock, flags); 196 spin_unlock_irqrestore(&vector_lock, flags);
165} 197}
166 198
@@ -168,18 +200,26 @@ int
168assign_irq_vector (int irq) 200assign_irq_vector (int irq)
169{ 201{
170 unsigned long flags; 202 unsigned long flags;
171 int vector = -ENOSPC; 203 int vector, cpu;
204 cpumask_t domain;
205
206 vector = -ENOSPC;
172 207
208 spin_lock_irqsave(&vector_lock, flags);
173 if (irq < 0) { 209 if (irq < 0) {
174 goto out; 210 goto out;
175 } 211 }
176 spin_lock_irqsave(&vector_lock, flags); 212 for_each_online_cpu(cpu) {
177 vector = find_unassigned_vector(); 213 domain = vector_allocation_domain(cpu);
214 vector = find_unassigned_vector(domain);
215 if (vector >= 0)
216 break;
217 }
178 if (vector < 0) 218 if (vector < 0)
179 goto out; 219 goto out;
180 BUG_ON(__bind_irq_vector(irq, vector)); 220 BUG_ON(__bind_irq_vector(irq, vector, domain));
181 spin_unlock_irqrestore(&vector_lock, flags);
182 out: 221 out:
222 spin_unlock_irqrestore(&vector_lock, flags);
183 return vector; 223 return vector;
184} 224}
185 225
@@ -198,7 +238,7 @@ reserve_irq_vector (int vector)
198 if (vector < IA64_FIRST_DEVICE_VECTOR || 238 if (vector < IA64_FIRST_DEVICE_VECTOR ||
199 vector > IA64_LAST_DEVICE_VECTOR) 239 vector > IA64_LAST_DEVICE_VECTOR)
200 return -EINVAL; 240 return -EINVAL;
201 return !!bind_irq_vector(vector, vector); 241 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
202} 242}
203 243
204/* 244/*
@@ -214,11 +254,19 @@ void __setup_vector_irq(int cpu)
214 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 254 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
215 /* Mark the inuse vectors */ 255 /* Mark the inuse vectors */
216 for (irq = 0; irq < NR_IRQS; ++irq) { 256 for (irq = 0; irq < NR_IRQS; ++irq) {
217 if ((vector = irq_to_vector(irq)) != IRQ_VECTOR_UNASSIGNED) 257 if (!cpu_isset(cpu, irq_cfg[irq].domain))
218 per_cpu(vector_irq, cpu)[vector] = irq; 258 continue;
259 vector = irq_to_vector(irq);
260 per_cpu(vector_irq, cpu)[vector] = irq;
219 } 261 }
220} 262}
221 263
264static cpumask_t vector_allocation_domain(int cpu)
265{
266 return CPU_MASK_ALL;
267}
268
269
222void destroy_and_reserve_irq(unsigned int irq) 270void destroy_and_reserve_irq(unsigned int irq)
223{ 271{
224 dynamic_irq_cleanup(irq); 272 dynamic_irq_cleanup(irq);
@@ -233,17 +281,23 @@ void destroy_and_reserve_irq(unsigned int irq)
233int create_irq(void) 281int create_irq(void)
234{ 282{
235 unsigned long flags; 283 unsigned long flags;
236 int irq, vector; 284 int irq, vector, cpu;
285 cpumask_t domain;
237 286
238 irq = -ENOSPC; 287 irq = vector = -ENOSPC;
239 spin_lock_irqsave(&vector_lock, flags); 288 spin_lock_irqsave(&vector_lock, flags);
240 vector = find_unassigned_vector(); 289 for_each_online_cpu(cpu) {
290 domain = vector_allocation_domain(cpu);
291 vector = find_unassigned_vector(domain);
292 if (vector >= 0)
293 break;
294 }
241 if (vector < 0) 295 if (vector < 0)
242 goto out; 296 goto out;
243 irq = find_unassigned_irq(); 297 irq = find_unassigned_irq();
244 if (irq < 0) 298 if (irq < 0)
245 goto out; 299 goto out;
246 BUG_ON(__bind_irq_vector(irq, vector)); 300 BUG_ON(__bind_irq_vector(irq, vector, domain));
247 out: 301 out:
248 spin_unlock_irqrestore(&vector_lock, flags); 302 spin_unlock_irqrestore(&vector_lock, flags);
249 if (irq >= 0) 303 if (irq >= 0)
@@ -434,7 +488,7 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
434 unsigned int irq; 488 unsigned int irq;
435 489
436 irq = vec; 490 irq = vec;
437 BUG_ON(bind_irq_vector(irq, vec)); 491 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
438 desc = irq_desc + irq; 492 desc = irq_desc + irq;
439 desc->status |= IRQ_PER_CPU; 493 desc->status |= IRQ_PER_CPU;
440 desc->chip = &irq_type_ia64_lsapic; 494 desc->chip = &irq_type_ia64_lsapic;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index c81080df70df..1d22670cc88b 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -52,6 +52,11 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
52 struct msi_msg msg; 52 struct msi_msg msg;
53 u32 addr; 53 u32 addr;
54 54
55 /* IRQ migration across domain is not supported yet */
56 cpus_and(cpu_mask, cpu_mask, irq_to_domain(irq));
57 if (cpus_empty(cpu_mask))
58 return;
59
55 read_msi_msg(irq, &msg); 60 read_msi_msg(irq, &msg);
56 61
57 addr = msg.address_lo; 62 addr = msg.address_lo;
@@ -69,13 +74,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
69 struct msi_msg msg; 74 struct msi_msg msg;
70 unsigned long dest_phys_id; 75 unsigned long dest_phys_id;
71 int irq, vector; 76 int irq, vector;
77 cpumask_t mask;
72 78
73 irq = create_irq(); 79 irq = create_irq();
74 if (irq < 0) 80 if (irq < 0)
75 return irq; 81 return irq;
76 82
77 set_irq_msi(irq, desc); 83 set_irq_msi(irq, desc);
78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 84 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
85 dest_phys_id = cpu_physical_id(first_cpu(mask));
79 vector = irq_to_vector(irq); 86 vector = irq_to_vector(irq);
80 87
81 msg.address_hi = 0; 88 msg.address_hi = 0;
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 4eff7ff2da82..97ea3900d74a 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -92,14 +92,16 @@ extern __u8 isa_irq_to_vector_map[16];
92 92
93struct irq_cfg { 93struct irq_cfg {
94 ia64_vector vector; 94 ia64_vector vector;
95 cpumask_t domain;
95}; 96};
96extern spinlock_t vector_lock; 97extern spinlock_t vector_lock;
97extern struct irq_cfg irq_cfg[NR_IRQS]; 98extern struct irq_cfg irq_cfg[NR_IRQS];
99#define irq_to_domain(x) irq_cfg[(x)].domain
98DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); 100DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
99 101
100extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 102extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
101 103
102extern int bind_irq_vector(int irq, int vector); 104extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
103extern int assign_irq_vector (int irq); /* allocate a free vector */ 105extern int assign_irq_vector (int irq); /* allocate a free vector */
104extern void free_irq_vector (int vector); 106extern void free_irq_vector (int vector);
105extern int reserve_irq_vector (int vector); 107extern int reserve_irq_vector (int vector);
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index 67221615e317..35b360b82e43 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,8 +14,13 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16 16
17#define NR_IRQS 256 17#define NR_VECTORS 256
18#define NR_IRQ_VECTORS NR_IRQS 18
19#if (NR_VECTORS + 32 * NR_CPUS) < 1024
20#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
21#else
22#define NR_IRQS 1024
23#endif
19 24
20static __inline__ int 25static __inline__ int
21irq_canonicalize (int irq) 26irq_canonicalize (int irq)