aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/irq_ia64.c
diff options
context:
space:
mode:
authorYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>2007-07-17 08:22:33 -0400
committerTony Luck <tony.luck@intel.com>2007-07-17 12:52:13 -0400
commit4994be1b3fe9120c88022ff5c0c33f6312b17adb (patch)
treeb4d32c77681029d2b5dfd94b0eb5a09be0ccae9e /arch/ia64/kernel/irq_ia64.c
parente1b30a392835e92581db09a4e8b4b2ad53a0c370 (diff)
[IA64] Add support for vector domain
Add fundamental support for multiple vector domain. There still exists only one vector domain even with this patch. IRQ migration across domain is not supported yet by this patch. Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/irq_ia64.c')
-rw-r--r--arch/ia64/kernel/irq_ia64.c120
1 files changed, 87 insertions, 33 deletions
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 072427c2c3f6..a3667631ed80 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -60,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
60void __iomem *ipi_base_addr = ((void __iomem *) 60void __iomem *ipi_base_addr = ((void __iomem *)
61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); 61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62 62
63static cpumask_t vector_allocation_domain(int cpu);
64
63/* 65/*
64 * Legacy IRQ to IA-64 vector translation table. 66 * Legacy IRQ to IA-64 vector translation table.
65 */ 67 */
@@ -73,13 +75,20 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
73DEFINE_SPINLOCK(vector_lock); 75DEFINE_SPINLOCK(vector_lock);
74 76
75struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { 77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
76 [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED } 78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
77}; 82};
78 83
79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { 84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
80 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR 85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
81}; 86};
82 87
88static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
89 [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
90};
91
83static int irq_status[NR_IRQS] = { 92static int irq_status[NR_IRQS] = {
84 [0 ... NR_IRQS -1] = IRQ_UNUSED 93 [0 ... NR_IRQS -1] = IRQ_UNUSED
85}; 94};
@@ -111,39 +120,54 @@ static inline int find_unassigned_irq(void)
111 return -ENOSPC; 120 return -ENOSPC;
112} 121}
113 122
114static inline int find_unassigned_vector(void) 123static inline int find_unassigned_vector(cpumask_t domain)
115{ 124{
116 int vector; 125 cpumask_t mask;
126 int pos;
127
128 cpus_and(mask, domain, cpu_online_map);
129 if (cpus_empty(mask))
130 return -EINVAL;
117 131
118 for (vector = IA64_FIRST_DEVICE_VECTOR; 132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
119 vector <= IA64_LAST_DEVICE_VECTOR; vector++) 133 cpus_and(mask, domain, vector_table[pos]);
120 if (__get_cpu_var(vector_irq[vector]) == IA64_SPURIOUS_INT_VECTOR) 134 if (!cpus_empty(mask))
121 return vector; 135 continue;
136 return IA64_FIRST_DEVICE_VECTOR + pos;
137 }
122 return -ENOSPC; 138 return -ENOSPC;
123} 139}
124 140
125static int __bind_irq_vector(int irq, int vector) 141static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
126{ 142{
127 int cpu; 143 cpumask_t mask;
144 int cpu, pos;
145 struct irq_cfg *cfg = &irq_cfg[irq];
128 146
129 if (irq_to_vector(irq) == vector) 147 cpus_and(mask, domain, cpu_online_map);
148 if (cpus_empty(mask))
149 return -EINVAL;
150 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
130 return 0; 151 return 0;
131 if (irq_to_vector(irq) != IRQ_VECTOR_UNASSIGNED) 152 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
132 return -EBUSY; 153 return -EBUSY;
133 for_each_online_cpu(cpu) 154 for_each_cpu_mask(cpu, mask)
134 per_cpu(vector_irq, cpu)[vector] = irq; 155 per_cpu(vector_irq, cpu)[vector] = irq;
135 irq_cfg[irq].vector = vector; 156 cfg->vector = vector;
157 cfg->domain = domain;
136 irq_status[irq] = IRQ_USED; 158 irq_status[irq] = IRQ_USED;
159 pos = vector - IA64_FIRST_DEVICE_VECTOR;
160 cpus_or(vector_table[pos], vector_table[pos], domain);
137 return 0; 161 return 0;
138} 162}
139 163
140int bind_irq_vector(int irq, int vector) 164int bind_irq_vector(int irq, int vector, cpumask_t domain)
141{ 165{
142 unsigned long flags; 166 unsigned long flags;
143 int ret; 167 int ret;
144 168
145 spin_lock_irqsave(&vector_lock, flags); 169 spin_lock_irqsave(&vector_lock, flags);
146 ret = __bind_irq_vector(irq, vector); 170 ret = __bind_irq_vector(irq, vector, domain);
147 spin_unlock_irqrestore(&vector_lock, flags); 171 spin_unlock_irqrestore(&vector_lock, flags);
148 return ret; 172 return ret;
149} 173}
@@ -151,16 +175,24 @@ int bind_irq_vector(int irq, int vector)
151static void clear_irq_vector(int irq) 175static void clear_irq_vector(int irq)
152{ 176{
153 unsigned long flags; 177 unsigned long flags;
154 int vector, cpu; 178 int vector, cpu, pos;
179 cpumask_t mask;
180 cpumask_t domain;
181 struct irq_cfg *cfg = &irq_cfg[irq];
155 182
156 spin_lock_irqsave(&vector_lock, flags); 183 spin_lock_irqsave(&vector_lock, flags);
157 BUG_ON((unsigned)irq >= NR_IRQS); 184 BUG_ON((unsigned)irq >= NR_IRQS);
158 BUG_ON(irq_cfg[irq].vector == IRQ_VECTOR_UNASSIGNED); 185 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
159 vector = irq_cfg[irq].vector; 186 vector = cfg->vector;
160 for_each_online_cpu(cpu) 187 domain = cfg->domain;
188 cpus_and(mask, cfg->domain, cpu_online_map);
189 for_each_cpu_mask(cpu, mask)
161 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 190 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
162 irq_cfg[irq].vector = IRQ_VECTOR_UNASSIGNED; 191 cfg->vector = IRQ_VECTOR_UNASSIGNED;
192 cfg->domain = CPU_MASK_NONE;
163 irq_status[irq] = IRQ_UNUSED; 193 irq_status[irq] = IRQ_UNUSED;
194 pos = vector - IA64_FIRST_DEVICE_VECTOR;
195 cpus_andnot(vector_table[pos], vector_table[pos], domain);
164 spin_unlock_irqrestore(&vector_lock, flags); 196 spin_unlock_irqrestore(&vector_lock, flags);
165} 197}
166 198
@@ -168,18 +200,26 @@ int
168assign_irq_vector (int irq) 200assign_irq_vector (int irq)
169{ 201{
170 unsigned long flags; 202 unsigned long flags;
171 int vector = -ENOSPC; 203 int vector, cpu;
204 cpumask_t domain;
205
206 vector = -ENOSPC;
172 207
208 spin_lock_irqsave(&vector_lock, flags);
173 if (irq < 0) { 209 if (irq < 0) {
174 goto out; 210 goto out;
175 } 211 }
176 spin_lock_irqsave(&vector_lock, flags); 212 for_each_online_cpu(cpu) {
177 vector = find_unassigned_vector(); 213 domain = vector_allocation_domain(cpu);
214 vector = find_unassigned_vector(domain);
215 if (vector >= 0)
216 break;
217 }
178 if (vector < 0) 218 if (vector < 0)
179 goto out; 219 goto out;
180 BUG_ON(__bind_irq_vector(irq, vector)); 220 BUG_ON(__bind_irq_vector(irq, vector, domain));
181 spin_unlock_irqrestore(&vector_lock, flags);
182 out: 221 out:
222 spin_unlock_irqrestore(&vector_lock, flags);
183 return vector; 223 return vector;
184} 224}
185 225
@@ -198,7 +238,7 @@ reserve_irq_vector (int vector)
198 if (vector < IA64_FIRST_DEVICE_VECTOR || 238 if (vector < IA64_FIRST_DEVICE_VECTOR ||
199 vector > IA64_LAST_DEVICE_VECTOR) 239 vector > IA64_LAST_DEVICE_VECTOR)
200 return -EINVAL; 240 return -EINVAL;
201 return !!bind_irq_vector(vector, vector); 241 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
202} 242}
203 243
204/* 244/*
@@ -214,11 +254,19 @@ void __setup_vector_irq(int cpu)
214 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 254 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
215 /* Mark the inuse vectors */ 255 /* Mark the inuse vectors */
216 for (irq = 0; irq < NR_IRQS; ++irq) { 256 for (irq = 0; irq < NR_IRQS; ++irq) {
217 if ((vector = irq_to_vector(irq)) != IRQ_VECTOR_UNASSIGNED) 257 if (!cpu_isset(cpu, irq_cfg[irq].domain))
218 per_cpu(vector_irq, cpu)[vector] = irq; 258 continue;
259 vector = irq_to_vector(irq);
260 per_cpu(vector_irq, cpu)[vector] = irq;
219 } 261 }
220} 262}
221 263
264static cpumask_t vector_allocation_domain(int cpu)
265{
266 return CPU_MASK_ALL;
267}
268
269
222void destroy_and_reserve_irq(unsigned int irq) 270void destroy_and_reserve_irq(unsigned int irq)
223{ 271{
224 dynamic_irq_cleanup(irq); 272 dynamic_irq_cleanup(irq);
@@ -233,17 +281,23 @@ void destroy_and_reserve_irq(unsigned int irq)
233int create_irq(void) 281int create_irq(void)
234{ 282{
235 unsigned long flags; 283 unsigned long flags;
236 int irq, vector; 284 int irq, vector, cpu;
285 cpumask_t domain;
237 286
238 irq = -ENOSPC; 287 irq = vector = -ENOSPC;
239 spin_lock_irqsave(&vector_lock, flags); 288 spin_lock_irqsave(&vector_lock, flags);
240 vector = find_unassigned_vector(); 289 for_each_online_cpu(cpu) {
290 domain = vector_allocation_domain(cpu);
291 vector = find_unassigned_vector(domain);
292 if (vector >= 0)
293 break;
294 }
241 if (vector < 0) 295 if (vector < 0)
242 goto out; 296 goto out;
243 irq = find_unassigned_irq(); 297 irq = find_unassigned_irq();
244 if (irq < 0) 298 if (irq < 0)
245 goto out; 299 goto out;
246 BUG_ON(__bind_irq_vector(irq, vector)); 300 BUG_ON(__bind_irq_vector(irq, vector, domain));
247 out: 301 out:
248 spin_unlock_irqrestore(&vector_lock, flags); 302 spin_unlock_irqrestore(&vector_lock, flags);
249 if (irq >= 0) 303 if (irq >= 0)
@@ -434,7 +488,7 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
434 unsigned int irq; 488 unsigned int irq;
435 489
436 irq = vec; 490 irq = vec;
437 BUG_ON(bind_irq_vector(irq, vec)); 491 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
438 desc = irq_desc + irq; 492 desc = irq_desc + irq;
439 desc->status |= IRQ_PER_CPU; 493 desc->status |= IRQ_PER_CPU;
440 desc->chip = &irq_type_ia64_lsapic; 494 desc->chip = &irq_type_ia64_lsapic;