diff options
-rw-r--r-- | arch/x86/pci/xen.c | 41 | ||||
-rw-r--r-- | drivers/xen/events.c | 439 | ||||
-rw-r--r-- | include/xen/events.h | 24 |
3 files changed, 300 insertions, 204 deletions
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 8c4085a95ef1..e37b407a0ee8 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -50,7 +50,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | |||
50 | name = "ioapic-level"; | 50 | name = "ioapic-level"; |
51 | } | 51 | } |
52 | 52 | ||
53 | irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name); | 53 | irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); |
54 | 54 | ||
55 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | 55 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); |
56 | 56 | ||
@@ -237,6 +237,7 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) | |||
237 | { | 237 | { |
238 | int rc; | 238 | int rc; |
239 | int share = 1; | 239 | int share = 1; |
240 | int pirq; | ||
240 | u8 gsi; | 241 | u8 gsi; |
241 | 242 | ||
242 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); | 243 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); |
@@ -246,13 +247,21 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) | |||
246 | return rc; | 247 | return rc; |
247 | } | 248 | } |
248 | 249 | ||
250 | rc = xen_allocate_pirq_gsi(gsi); | ||
251 | if (rc < 0) { | ||
252 | dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n", | ||
253 | gsi, rc); | ||
254 | return rc; | ||
255 | } | ||
256 | pirq = rc; | ||
257 | |||
249 | if (gsi < NR_IRQS_LEGACY) | 258 | if (gsi < NR_IRQS_LEGACY) |
250 | share = 0; | 259 | share = 0; |
251 | 260 | ||
252 | rc = xen_allocate_pirq(gsi, share, "pcifront"); | 261 | rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); |
253 | if (rc < 0) { | 262 | if (rc < 0) { |
254 | dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", | 263 | dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", |
255 | gsi, rc); | 264 | gsi, pirq, rc); |
256 | return rc; | 265 | return rc; |
257 | } | 266 | } |
258 | 267 | ||
@@ -309,7 +318,7 @@ int __init pci_xen_hvm_init(void) | |||
309 | #ifdef CONFIG_XEN_DOM0 | 318 | #ifdef CONFIG_XEN_DOM0 |
310 | static int xen_register_pirq(u32 gsi, int triggering) | 319 | static int xen_register_pirq(u32 gsi, int triggering) |
311 | { | 320 | { |
312 | int rc, irq; | 321 | int rc, pirq, irq = -1; |
313 | struct physdev_map_pirq map_irq; | 322 | struct physdev_map_pirq map_irq; |
314 | int shareable = 0; | 323 | int shareable = 0; |
315 | char *name; | 324 | char *name; |
@@ -325,17 +334,20 @@ static int xen_register_pirq(u32 gsi, int triggering) | |||
325 | name = "ioapic-level"; | 334 | name = "ioapic-level"; |
326 | } | 335 | } |
327 | 336 | ||
328 | irq = xen_allocate_pirq(gsi, shareable, name); | 337 | pirq = xen_allocate_pirq_gsi(gsi); |
329 | 338 | if (pirq < 0) | |
330 | printk(KERN_DEBUG "xen: --> irq=%d\n", irq); | 339 | goto out; |
331 | 340 | ||
341 | irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); | ||
332 | if (irq < 0) | 342 | if (irq < 0) |
333 | goto out; | 343 | goto out; |
334 | 344 | ||
345 | printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq); | ||
346 | |||
335 | map_irq.domid = DOMID_SELF; | 347 | map_irq.domid = DOMID_SELF; |
336 | map_irq.type = MAP_PIRQ_TYPE_GSI; | 348 | map_irq.type = MAP_PIRQ_TYPE_GSI; |
337 | map_irq.index = gsi; | 349 | map_irq.index = gsi; |
338 | map_irq.pirq = irq; | 350 | map_irq.pirq = pirq; |
339 | 351 | ||
340 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | 352 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); |
341 | if (rc) { | 353 | if (rc) { |
@@ -422,13 +434,18 @@ static int __init pci_xen_initial_domain(void) | |||
422 | 434 | ||
423 | void __init xen_setup_pirqs(void) | 435 | void __init xen_setup_pirqs(void) |
424 | { | 436 | { |
425 | int irq; | 437 | int pirq, irq; |
426 | 438 | ||
427 | pci_xen_initial_domain(); | 439 | pci_xen_initial_domain(); |
428 | 440 | ||
429 | if (0 == nr_ioapics) { | 441 | if (0 == nr_ioapics) { |
430 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) | 442 | for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { |
431 | xen_allocate_pirq(irq, 0, "xt-pic"); | 443 | pirq = xen_allocate_pirq_gsi(irq); |
444 | if (WARN(pirq < 0, | ||
445 | "Could not allocate PIRQ for legacy interrupt\n")) | ||
446 | break; | ||
447 | irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic"); | ||
448 | } | ||
432 | return; | 449 | return; |
433 | } | 450 | } |
434 | 451 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 65f5068afd84..02b5a9c05cfa 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -56,6 +56,8 @@ | |||
56 | */ | 56 | */ |
57 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | 57 | static DEFINE_SPINLOCK(irq_mapping_update_lock); |
58 | 58 | ||
59 | static LIST_HEAD(xen_irq_list_head); | ||
60 | |||
59 | /* IRQ <-> VIRQ mapping. */ | 61 | /* IRQ <-> VIRQ mapping. */ |
60 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; | 62 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
61 | 63 | ||
@@ -85,7 +87,9 @@ enum xen_irq_type { | |||
85 | */ | 87 | */ |
86 | struct irq_info | 88 | struct irq_info |
87 | { | 89 | { |
90 | struct list_head list; | ||
88 | enum xen_irq_type type; /* type */ | 91 | enum xen_irq_type type; /* type */ |
92 | unsigned irq; | ||
89 | unsigned short evtchn; /* event channel */ | 93 | unsigned short evtchn; /* event channel */ |
90 | unsigned short cpu; /* cpu bound */ | 94 | unsigned short cpu; /* cpu bound */ |
91 | 95 | ||
@@ -103,23 +107,10 @@ struct irq_info | |||
103 | #define PIRQ_NEEDS_EOI (1 << 0) | 107 | #define PIRQ_NEEDS_EOI (1 << 0) |
104 | #define PIRQ_SHAREABLE (1 << 1) | 108 | #define PIRQ_SHAREABLE (1 << 1) |
105 | 109 | ||
106 | static struct irq_info *irq_info; | ||
107 | static int *pirq_to_irq; | ||
108 | |||
109 | static int *evtchn_to_irq; | 110 | static int *evtchn_to_irq; |
110 | struct cpu_evtchn_s { | ||
111 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; | ||
112 | }; | ||
113 | 111 | ||
114 | static __initdata struct cpu_evtchn_s init_evtchn_mask = { | 112 | static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], |
115 | .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, | 113 | cpu_evtchn_mask); |
116 | }; | ||
117 | static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask; | ||
118 | |||
119 | static inline unsigned long *cpu_evtchn_mask(int cpu) | ||
120 | { | ||
121 | return cpu_evtchn_mask_p[cpu].bits; | ||
122 | } | ||
123 | 114 | ||
124 | /* Xen will never allocate port zero for any purpose. */ | 115 | /* Xen will never allocate port zero for any purpose. */ |
125 | #define VALID_EVTCHN(chn) ((chn) != 0) | 116 | #define VALID_EVTCHN(chn) ((chn) != 0) |
@@ -128,46 +119,86 @@ static struct irq_chip xen_dynamic_chip; | |||
128 | static struct irq_chip xen_percpu_chip; | 119 | static struct irq_chip xen_percpu_chip; |
129 | static struct irq_chip xen_pirq_chip; | 120 | static struct irq_chip xen_pirq_chip; |
130 | 121 | ||
131 | /* Constructor for packed IRQ information. */ | 122 | /* Get info for IRQ */ |
132 | static struct irq_info mk_unbound_info(void) | 123 | static struct irq_info *info_for_irq(unsigned irq) |
124 | { | ||
125 | return get_irq_data(irq); | ||
126 | } | ||
127 | |||
128 | /* Constructors for packed IRQ information. */ | ||
129 | static void xen_irq_info_common_init(struct irq_info *info, | ||
130 | unsigned irq, | ||
131 | enum xen_irq_type type, | ||
132 | unsigned short evtchn, | ||
133 | unsigned short cpu) | ||
133 | { | 134 | { |
134 | return (struct irq_info) { .type = IRQT_UNBOUND }; | 135 | |
136 | BUG_ON(info->type != IRQT_UNBOUND && info->type != type); | ||
137 | |||
138 | info->type = type; | ||
139 | info->irq = irq; | ||
140 | info->evtchn = evtchn; | ||
141 | info->cpu = cpu; | ||
142 | |||
143 | evtchn_to_irq[evtchn] = irq; | ||
135 | } | 144 | } |
136 | 145 | ||
137 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | 146 | static void xen_irq_info_evtchn_init(unsigned irq, |
147 | unsigned short evtchn) | ||
138 | { | 148 | { |
139 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, | 149 | struct irq_info *info = info_for_irq(irq); |
140 | .cpu = 0 }; | 150 | |
151 | xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); | ||
141 | } | 152 | } |
142 | 153 | ||
143 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | 154 | static void xen_irq_info_ipi_init(unsigned cpu, |
155 | unsigned irq, | ||
156 | unsigned short evtchn, | ||
157 | enum ipi_vector ipi) | ||
144 | { | 158 | { |
145 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, | 159 | struct irq_info *info = info_for_irq(irq); |
146 | .cpu = 0, .u.ipi = ipi }; | 160 | |
161 | xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0); | ||
162 | |||
163 | info->u.ipi = ipi; | ||
164 | |||
165 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | ||
147 | } | 166 | } |
148 | 167 | ||
149 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | 168 | static void xen_irq_info_virq_init(unsigned cpu, |
169 | unsigned irq, | ||
170 | unsigned short evtchn, | ||
171 | unsigned short virq) | ||
150 | { | 172 | { |
151 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | 173 | struct irq_info *info = info_for_irq(irq); |
152 | .cpu = 0, .u.virq = virq }; | 174 | |
175 | xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0); | ||
176 | |||
177 | info->u.virq = virq; | ||
178 | |||
179 | per_cpu(virq_to_irq, cpu)[virq] = irq; | ||
153 | } | 180 | } |
154 | 181 | ||
155 | static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq, | 182 | static void xen_irq_info_pirq_init(unsigned irq, |
156 | unsigned short gsi, unsigned short vector) | 183 | unsigned short evtchn, |
184 | unsigned short pirq, | ||
185 | unsigned short gsi, | ||
186 | unsigned short vector, | ||
187 | unsigned char flags) | ||
157 | { | 188 | { |
158 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | 189 | struct irq_info *info = info_for_irq(irq); |
159 | .cpu = 0, | 190 | |
160 | .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } }; | 191 | xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0); |
192 | |||
193 | info->u.pirq.pirq = pirq; | ||
194 | info->u.pirq.gsi = gsi; | ||
195 | info->u.pirq.vector = vector; | ||
196 | info->u.pirq.flags = flags; | ||
161 | } | 197 | } |
162 | 198 | ||
163 | /* | 199 | /* |
164 | * Accessors for packed IRQ information. | 200 | * Accessors for packed IRQ information. |
165 | */ | 201 | */ |
166 | static struct irq_info *info_for_irq(unsigned irq) | ||
167 | { | ||
168 | return &irq_info[irq]; | ||
169 | } | ||
170 | |||
171 | static unsigned int evtchn_from_irq(unsigned irq) | 202 | static unsigned int evtchn_from_irq(unsigned irq) |
172 | { | 203 | { |
173 | if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) | 204 | if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) |
@@ -212,26 +243,6 @@ static unsigned pirq_from_irq(unsigned irq) | |||
212 | return info->u.pirq.pirq; | 243 | return info->u.pirq.pirq; |
213 | } | 244 | } |
214 | 245 | ||
215 | static unsigned gsi_from_irq(unsigned irq) | ||
216 | { | ||
217 | struct irq_info *info = info_for_irq(irq); | ||
218 | |||
219 | BUG_ON(info == NULL); | ||
220 | BUG_ON(info->type != IRQT_PIRQ); | ||
221 | |||
222 | return info->u.pirq.gsi; | ||
223 | } | ||
224 | |||
225 | static unsigned vector_from_irq(unsigned irq) | ||
226 | { | ||
227 | struct irq_info *info = info_for_irq(irq); | ||
228 | |||
229 | BUG_ON(info == NULL); | ||
230 | BUG_ON(info->type != IRQT_PIRQ); | ||
231 | |||
232 | return info->u.pirq.vector; | ||
233 | } | ||
234 | |||
235 | static enum xen_irq_type type_from_irq(unsigned irq) | 246 | static enum xen_irq_type type_from_irq(unsigned irq) |
236 | { | 247 | { |
237 | return info_for_irq(irq)->type; | 248 | return info_for_irq(irq)->type; |
@@ -267,7 +278,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, | |||
267 | unsigned int idx) | 278 | unsigned int idx) |
268 | { | 279 | { |
269 | return (sh->evtchn_pending[idx] & | 280 | return (sh->evtchn_pending[idx] & |
270 | cpu_evtchn_mask(cpu)[idx] & | 281 | per_cpu(cpu_evtchn_mask, cpu)[idx] & |
271 | ~sh->evtchn_mask[idx]); | 282 | ~sh->evtchn_mask[idx]); |
272 | } | 283 | } |
273 | 284 | ||
@@ -280,28 +291,28 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
280 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); | 291 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
281 | #endif | 292 | #endif |
282 | 293 | ||
283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 294 | clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); |
284 | set_bit(chn, cpu_evtchn_mask(cpu)); | 295 | set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); |
285 | 296 | ||
286 | irq_info[irq].cpu = cpu; | 297 | info_for_irq(irq)->cpu = cpu; |
287 | } | 298 | } |
288 | 299 | ||
289 | static void init_evtchn_cpu_bindings(void) | 300 | static void init_evtchn_cpu_bindings(void) |
290 | { | 301 | { |
291 | int i; | 302 | int i; |
292 | #ifdef CONFIG_SMP | 303 | #ifdef CONFIG_SMP |
293 | struct irq_desc *desc; | 304 | struct irq_info *info; |
294 | 305 | ||
295 | /* By default all event channels notify CPU#0. */ | 306 | /* By default all event channels notify CPU#0. */ |
296 | for_each_irq_desc(i, desc) { | 307 | list_for_each_entry(info, &xen_irq_list_head, list) { |
308 | struct irq_desc *desc = irq_to_desc(info->irq); | ||
297 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); | 309 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
298 | } | 310 | } |
299 | #endif | 311 | #endif |
300 | 312 | ||
301 | for_each_possible_cpu(i) | 313 | for_each_possible_cpu(i) |
302 | memset(cpu_evtchn_mask(i), | 314 | memset(per_cpu(cpu_evtchn_mask, i), |
303 | (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); | 315 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); |
304 | |||
305 | } | 316 | } |
306 | 317 | ||
307 | static inline void clear_evtchn(int port) | 318 | static inline void clear_evtchn(int port) |
@@ -376,7 +387,28 @@ static void unmask_evtchn(int port) | |||
376 | put_cpu(); | 387 | put_cpu(); |
377 | } | 388 | } |
378 | 389 | ||
379 | static int xen_allocate_irq_dynamic(void) | 390 | static void xen_irq_init(unsigned irq) |
391 | { | ||
392 | struct irq_info *info; | ||
393 | struct irq_desc *desc = irq_to_desc(irq); | ||
394 | |||
395 | #ifdef CONFIG_SMP | ||
396 | /* By default all event channels notify CPU#0. */ | ||
397 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); | ||
398 | #endif | ||
399 | |||
400 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
401 | if (info == NULL) | ||
402 | panic("Unable to allocate metadata for IRQ%d\n", irq); | ||
403 | |||
404 | info->type = IRQT_UNBOUND; | ||
405 | |||
406 | set_irq_data(irq, info); | ||
407 | |||
408 | list_add_tail(&info->list, &xen_irq_list_head); | ||
409 | } | ||
410 | |||
411 | static int __must_check xen_allocate_irq_dynamic(void) | ||
380 | { | 412 | { |
381 | int first = 0; | 413 | int first = 0; |
382 | int irq; | 414 | int irq; |
@@ -393,22 +425,14 @@ static int xen_allocate_irq_dynamic(void) | |||
393 | first = get_nr_irqs_gsi(); | 425 | first = get_nr_irqs_gsi(); |
394 | #endif | 426 | #endif |
395 | 427 | ||
396 | retry: | ||
397 | irq = irq_alloc_desc_from(first, -1); | 428 | irq = irq_alloc_desc_from(first, -1); |
398 | 429 | ||
399 | if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { | 430 | xen_irq_init(irq); |
400 | printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); | ||
401 | first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); | ||
402 | goto retry; | ||
403 | } | ||
404 | |||
405 | if (irq < 0) | ||
406 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | ||
407 | 431 | ||
408 | return irq; | 432 | return irq; |
409 | } | 433 | } |
410 | 434 | ||
411 | static int xen_allocate_irq_gsi(unsigned gsi) | 435 | static int __must_check xen_allocate_irq_gsi(unsigned gsi) |
412 | { | 436 | { |
413 | int irq; | 437 | int irq; |
414 | 438 | ||
@@ -423,17 +447,25 @@ static int xen_allocate_irq_gsi(unsigned gsi) | |||
423 | 447 | ||
424 | /* Legacy IRQ descriptors are already allocated by the arch. */ | 448 | /* Legacy IRQ descriptors are already allocated by the arch. */ |
425 | if (gsi < NR_IRQS_LEGACY) | 449 | if (gsi < NR_IRQS_LEGACY) |
426 | return gsi; | 450 | irq = gsi; |
451 | else | ||
452 | irq = irq_alloc_desc_at(gsi, -1); | ||
427 | 453 | ||
428 | irq = irq_alloc_desc_at(gsi, -1); | 454 | xen_irq_init(irq); |
429 | if (irq < 0) | ||
430 | panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); | ||
431 | 455 | ||
432 | return irq; | 456 | return irq; |
433 | } | 457 | } |
434 | 458 | ||
435 | static void xen_free_irq(unsigned irq) | 459 | static void xen_free_irq(unsigned irq) |
436 | { | 460 | { |
461 | struct irq_info *info = get_irq_data(irq); | ||
462 | |||
463 | list_del(&info->list); | ||
464 | |||
465 | set_irq_data(irq, NULL); | ||
466 | |||
467 | kfree(info); | ||
468 | |||
437 | /* Legacy IRQ descriptors are managed by the arch. */ | 469 | /* Legacy IRQ descriptors are managed by the arch. */ |
438 | if (irq < NR_IRQS_LEGACY) | 470 | if (irq < NR_IRQS_LEGACY) |
439 | return; | 471 | return; |
@@ -563,51 +595,39 @@ static void ack_pirq(struct irq_data *data) | |||
563 | 595 | ||
564 | static int find_irq_by_gsi(unsigned gsi) | 596 | static int find_irq_by_gsi(unsigned gsi) |
565 | { | 597 | { |
566 | int irq; | 598 | struct irq_info *info; |
567 | 599 | ||
568 | for (irq = 0; irq < nr_irqs; irq++) { | 600 | list_for_each_entry(info, &xen_irq_list_head, list) { |
569 | struct irq_info *info = info_for_irq(irq); | 601 | if (info->type != IRQT_PIRQ) |
570 | |||
571 | if (info == NULL || info->type != IRQT_PIRQ) | ||
572 | continue; | 602 | continue; |
573 | 603 | ||
574 | if (gsi_from_irq(irq) == gsi) | 604 | if (info->u.pirq.gsi == gsi) |
575 | return irq; | 605 | return info->irq; |
576 | } | 606 | } |
577 | 607 | ||
578 | return -1; | 608 | return -1; |
579 | } | 609 | } |
580 | 610 | ||
581 | int xen_allocate_pirq(unsigned gsi, int shareable, char *name) | 611 | int xen_allocate_pirq_gsi(unsigned gsi) |
582 | { | 612 | { |
583 | return xen_map_pirq_gsi(gsi, gsi, shareable, name); | 613 | return gsi; |
584 | } | 614 | } |
585 | 615 | ||
586 | /* xen_map_pirq_gsi might allocate irqs from the top down, as a | 616 | /* |
587 | * consequence don't assume that the irq number returned has a low value | 617 | * Do not make any assumptions regarding the relationship between the |
588 | * or can be used as a pirq number unless you know otherwise. | 618 | * IRQ number returned here and the Xen pirq argument. |
589 | * | ||
590 | * One notable exception is when xen_map_pirq_gsi is called passing an | ||
591 | * hardware gsi as argument, in that case the irq number returned | ||
592 | * matches the gsi number passed as second argument. | ||
593 | * | 619 | * |
594 | * Note: We don't assign an event channel until the irq actually started | 620 | * Note: We don't assign an event channel until the irq actually started |
595 | * up. Return an existing irq if we've already got one for the gsi. | 621 | * up. Return an existing irq if we've already got one for the gsi. |
596 | */ | 622 | */ |
597 | int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | 623 | int xen_bind_pirq_gsi_to_irq(unsigned gsi, |
624 | unsigned pirq, int shareable, char *name) | ||
598 | { | 625 | { |
599 | int irq = 0; | 626 | int irq = -1; |
600 | struct physdev_irq irq_op; | 627 | struct physdev_irq irq_op; |
601 | 628 | ||
602 | spin_lock(&irq_mapping_update_lock); | 629 | spin_lock(&irq_mapping_update_lock); |
603 | 630 | ||
604 | if ((pirq > nr_irqs) || (gsi > nr_irqs)) { | ||
605 | printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", | ||
606 | pirq > nr_irqs ? "pirq" :"", | ||
607 | gsi > nr_irqs ? "gsi" : ""); | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | irq = find_irq_by_gsi(gsi); | 631 | irq = find_irq_by_gsi(gsi); |
612 | if (irq != -1) { | 632 | if (irq != -1) { |
613 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", | 633 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", |
@@ -616,6 +636,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
616 | } | 636 | } |
617 | 637 | ||
618 | irq = xen_allocate_irq_gsi(gsi); | 638 | irq = xen_allocate_irq_gsi(gsi); |
639 | if (irq < 0) | ||
640 | goto out; | ||
619 | 641 | ||
620 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 642 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
621 | handle_level_irq, name); | 643 | handle_level_irq, name); |
@@ -633,9 +655,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
633 | goto out; | 655 | goto out; |
634 | } | 656 | } |
635 | 657 | ||
636 | irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector); | 658 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, |
637 | irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0; | 659 | shareable ? PIRQ_SHAREABLE : 0); |
638 | pirq_to_irq[pirq] = irq; | ||
639 | 660 | ||
640 | out: | 661 | out: |
641 | spin_unlock(&irq_mapping_update_lock); | 662 | spin_unlock(&irq_mapping_update_lock); |
@@ -672,8 +693,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
672 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 693 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
673 | handle_level_irq, name); | 694 | handle_level_irq, name); |
674 | 695 | ||
675 | irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); | 696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); |
676 | pirq_to_irq[pirq] = irq; | ||
677 | ret = irq_set_msi_desc(irq, msidesc); | 697 | ret = irq_set_msi_desc(irq, msidesc); |
678 | if (ret < 0) | 698 | if (ret < 0) |
679 | goto error_irq; | 699 | goto error_irq; |
@@ -709,9 +729,6 @@ int xen_destroy_irq(int irq) | |||
709 | goto out; | 729 | goto out; |
710 | } | 730 | } |
711 | } | 731 | } |
712 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
713 | |||
714 | irq_info[irq] = mk_unbound_info(); | ||
715 | 732 | ||
716 | xen_free_irq(irq); | 733 | xen_free_irq(irq); |
717 | 734 | ||
@@ -720,19 +737,26 @@ out: | |||
720 | return rc; | 737 | return rc; |
721 | } | 738 | } |
722 | 739 | ||
723 | int xen_vector_from_irq(unsigned irq) | 740 | int xen_irq_from_pirq(unsigned pirq) |
724 | { | 741 | { |
725 | return vector_from_irq(irq); | 742 | int irq; |
726 | } | ||
727 | 743 | ||
728 | int xen_gsi_from_irq(unsigned irq) | 744 | struct irq_info *info; |
729 | { | ||
730 | return gsi_from_irq(irq); | ||
731 | } | ||
732 | 745 | ||
733 | int xen_irq_from_pirq(unsigned pirq) | 746 | spin_lock(&irq_mapping_update_lock); |
734 | { | 747 | |
735 | return pirq_to_irq[pirq]; | 748 | list_for_each_entry(info, &xen_irq_list_head, list) { |
749 | if (info == NULL || info->type != IRQT_PIRQ) | ||
750 | continue; | ||
751 | irq = info->irq; | ||
752 | if (info->u.pirq.pirq == pirq) | ||
753 | goto out; | ||
754 | } | ||
755 | irq = -1; | ||
756 | out: | ||
757 | spin_unlock(&irq_mapping_update_lock); | ||
758 | |||
759 | return irq; | ||
736 | } | 760 | } |
737 | 761 | ||
738 | int bind_evtchn_to_irq(unsigned int evtchn) | 762 | int bind_evtchn_to_irq(unsigned int evtchn) |
@@ -745,14 +769,16 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
745 | 769 | ||
746 | if (irq == -1) { | 770 | if (irq == -1) { |
747 | irq = xen_allocate_irq_dynamic(); | 771 | irq = xen_allocate_irq_dynamic(); |
772 | if (irq == -1) | ||
773 | goto out; | ||
748 | 774 | ||
749 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 775 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
750 | handle_fasteoi_irq, "event"); | 776 | handle_fasteoi_irq, "event"); |
751 | 777 | ||
752 | evtchn_to_irq[evtchn] = irq; | 778 | xen_irq_info_evtchn_init(irq, evtchn); |
753 | irq_info[irq] = mk_evtchn_info(evtchn); | ||
754 | } | 779 | } |
755 | 780 | ||
781 | out: | ||
756 | spin_unlock(&irq_mapping_update_lock); | 782 | spin_unlock(&irq_mapping_update_lock); |
757 | 783 | ||
758 | return irq; | 784 | return irq; |
@@ -782,9 +808,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
782 | BUG(); | 808 | BUG(); |
783 | evtchn = bind_ipi.port; | 809 | evtchn = bind_ipi.port; |
784 | 810 | ||
785 | evtchn_to_irq[evtchn] = irq; | 811 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
786 | irq_info[irq] = mk_ipi_info(evtchn, ipi); | ||
787 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | ||
788 | 812 | ||
789 | bind_evtchn_to_cpu(evtchn, cpu); | 813 | bind_evtchn_to_cpu(evtchn, cpu); |
790 | } | 814 | } |
@@ -821,6 +845,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
821 | 845 | ||
822 | if (irq == -1) { | 846 | if (irq == -1) { |
823 | irq = xen_allocate_irq_dynamic(); | 847 | irq = xen_allocate_irq_dynamic(); |
848 | if (irq == -1) | ||
849 | goto out; | ||
824 | 850 | ||
825 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 851 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
826 | handle_percpu_irq, "virq"); | 852 | handle_percpu_irq, "virq"); |
@@ -832,14 +858,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
832 | BUG(); | 858 | BUG(); |
833 | evtchn = bind_virq.port; | 859 | evtchn = bind_virq.port; |
834 | 860 | ||
835 | evtchn_to_irq[evtchn] = irq; | 861 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
836 | irq_info[irq] = mk_virq_info(evtchn, virq); | ||
837 | |||
838 | per_cpu(virq_to_irq, cpu)[virq] = irq; | ||
839 | 862 | ||
840 | bind_evtchn_to_cpu(evtchn, cpu); | 863 | bind_evtchn_to_cpu(evtchn, cpu); |
841 | } | 864 | } |
842 | 865 | ||
866 | out: | ||
843 | spin_unlock(&irq_mapping_update_lock); | 867 | spin_unlock(&irq_mapping_update_lock); |
844 | 868 | ||
845 | return irq; | 869 | return irq; |
@@ -876,11 +900,9 @@ static void unbind_from_irq(unsigned int irq) | |||
876 | evtchn_to_irq[evtchn] = -1; | 900 | evtchn_to_irq[evtchn] = -1; |
877 | } | 901 | } |
878 | 902 | ||
879 | if (irq_info[irq].type != IRQT_UNBOUND) { | 903 | BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); |
880 | irq_info[irq] = mk_unbound_info(); | ||
881 | 904 | ||
882 | xen_free_irq(irq); | 905 | xen_free_irq(irq); |
883 | } | ||
884 | 906 | ||
885 | spin_unlock(&irq_mapping_update_lock); | 907 | spin_unlock(&irq_mapping_update_lock); |
886 | } | 908 | } |
@@ -894,6 +916,8 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, | |||
894 | int retval; | 916 | int retval; |
895 | 917 | ||
896 | irq = bind_evtchn_to_irq(evtchn); | 918 | irq = bind_evtchn_to_irq(evtchn); |
919 | if (irq < 0) | ||
920 | return irq; | ||
897 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | 921 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
898 | if (retval != 0) { | 922 | if (retval != 0) { |
899 | unbind_from_irq(irq); | 923 | unbind_from_irq(irq); |
@@ -935,6 +959,8 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |||
935 | int retval; | 959 | int retval; |
936 | 960 | ||
937 | irq = bind_virq_to_irq(virq, cpu); | 961 | irq = bind_virq_to_irq(virq, cpu); |
962 | if (irq < 0) | ||
963 | return irq; | ||
938 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | 964 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
939 | if (retval != 0) { | 965 | if (retval != 0) { |
940 | unbind_from_irq(irq); | 966 | unbind_from_irq(irq); |
@@ -986,7 +1012,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
986 | { | 1012 | { |
987 | struct shared_info *sh = HYPERVISOR_shared_info; | 1013 | struct shared_info *sh = HYPERVISOR_shared_info; |
988 | int cpu = smp_processor_id(); | 1014 | int cpu = smp_processor_id(); |
989 | unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu); | 1015 | unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); |
990 | int i; | 1016 | int i; |
991 | unsigned long flags; | 1017 | unsigned long flags; |
992 | static DEFINE_SPINLOCK(debug_lock); | 1018 | static DEFINE_SPINLOCK(debug_lock); |
@@ -1064,6 +1090,13 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
1064 | } | 1090 | } |
1065 | 1091 | ||
1066 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); | 1092 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
1093 | static DEFINE_PER_CPU(unsigned int, current_word_idx); | ||
1094 | static DEFINE_PER_CPU(unsigned int, current_bit_idx); | ||
1095 | |||
1096 | /* | ||
1097 | * Mask out the i least significant bits of w | ||
1098 | */ | ||
1099 | #define MASK_LSBS(w, i) (w & ((~0UL) << i)) | ||
1067 | 1100 | ||
1068 | /* | 1101 | /* |
1069 | * Search the CPUs pending events bitmasks. For each one found, map | 1102 | * Search the CPUs pending events bitmasks. For each one found, map |
@@ -1076,6 +1109,9 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count); | |||
1076 | */ | 1109 | */ |
1077 | static void __xen_evtchn_do_upcall(void) | 1110 | static void __xen_evtchn_do_upcall(void) |
1078 | { | 1111 | { |
1112 | int start_word_idx, start_bit_idx; | ||
1113 | int word_idx, bit_idx; | ||
1114 | int i; | ||
1079 | int cpu = get_cpu(); | 1115 | int cpu = get_cpu(); |
1080 | struct shared_info *s = HYPERVISOR_shared_info; | 1116 | struct shared_info *s = HYPERVISOR_shared_info; |
1081 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 1117 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
@@ -1094,17 +1130,57 @@ static void __xen_evtchn_do_upcall(void) | |||
1094 | wmb(); | 1130 | wmb(); |
1095 | #endif | 1131 | #endif |
1096 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); | 1132 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
1097 | while (pending_words != 0) { | 1133 | |
1134 | start_word_idx = __this_cpu_read(current_word_idx); | ||
1135 | start_bit_idx = __this_cpu_read(current_bit_idx); | ||
1136 | |||
1137 | word_idx = start_word_idx; | ||
1138 | |||
1139 | for (i = 0; pending_words != 0; i++) { | ||
1098 | unsigned long pending_bits; | 1140 | unsigned long pending_bits; |
1099 | int word_idx = __ffs(pending_words); | 1141 | unsigned long words; |
1100 | pending_words &= ~(1UL << word_idx); | ||
1101 | 1142 | ||
1102 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | 1143 | words = MASK_LSBS(pending_words, word_idx); |
1103 | int bit_idx = __ffs(pending_bits); | 1144 | |
1104 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | 1145 | /* |
1105 | int irq = evtchn_to_irq[port]; | 1146 | * If we masked out all events, wrap to beginning. |
1147 | */ | ||
1148 | if (words == 0) { | ||
1149 | word_idx = 0; | ||
1150 | bit_idx = 0; | ||
1151 | continue; | ||
1152 | } | ||
1153 | word_idx = __ffs(words); | ||
1154 | |||
1155 | pending_bits = active_evtchns(cpu, s, word_idx); | ||
1156 | bit_idx = 0; /* usually scan entire word from start */ | ||
1157 | if (word_idx == start_word_idx) { | ||
1158 | /* We scan the starting word in two parts */ | ||
1159 | if (i == 0) | ||
1160 | /* 1st time: start in the middle */ | ||
1161 | bit_idx = start_bit_idx; | ||
1162 | else | ||
1163 | /* 2nd time: mask bits done already */ | ||
1164 | bit_idx &= (1UL << start_bit_idx) - 1; | ||
1165 | } | ||
1166 | |||
1167 | do { | ||
1168 | unsigned long bits; | ||
1169 | int port, irq; | ||
1106 | struct irq_desc *desc; | 1170 | struct irq_desc *desc; |
1107 | 1171 | ||
1172 | bits = MASK_LSBS(pending_bits, bit_idx); | ||
1173 | |||
1174 | /* If we masked out all events, move on. */ | ||
1175 | if (bits == 0) | ||
1176 | break; | ||
1177 | |||
1178 | bit_idx = __ffs(bits); | ||
1179 | |||
1180 | /* Process port. */ | ||
1181 | port = (word_idx * BITS_PER_LONG) + bit_idx; | ||
1182 | irq = evtchn_to_irq[port]; | ||
1183 | |||
1108 | mask_evtchn(port); | 1184 | mask_evtchn(port); |
1109 | clear_evtchn(port); | 1185 | clear_evtchn(port); |
1110 | 1186 | ||
@@ -1113,7 +1189,21 @@ static void __xen_evtchn_do_upcall(void) | |||
1113 | if (desc) | 1189 | if (desc) |
1114 | generic_handle_irq_desc(irq, desc); | 1190 | generic_handle_irq_desc(irq, desc); |
1115 | } | 1191 | } |
1116 | } | 1192 | |
1193 | bit_idx = (bit_idx + 1) % BITS_PER_LONG; | ||
1194 | |||
1195 | /* Next caller starts at last processed + 1 */ | ||
1196 | __this_cpu_write(current_word_idx, | ||
1197 | bit_idx ? word_idx : | ||
1198 | (word_idx+1) % BITS_PER_LONG); | ||
1199 | __this_cpu_write(current_bit_idx, bit_idx); | ||
1200 | } while (bit_idx != 0); | ||
1201 | |||
1202 | /* Scan start_l1i twice; all others once. */ | ||
1203 | if ((word_idx != start_word_idx) || (i != 0)) | ||
1204 | pending_words &= ~(1UL << word_idx); | ||
1205 | |||
1206 | word_idx = (word_idx + 1) % BITS_PER_LONG; | ||
1117 | } | 1207 | } |
1118 | 1208 | ||
1119 | BUG_ON(!irqs_disabled()); | 1209 | BUG_ON(!irqs_disabled()); |
@@ -1163,8 +1253,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1163 | so there should be a proper type */ | 1253 | so there should be a proper type */ |
1164 | BUG_ON(info->type == IRQT_UNBOUND); | 1254 | BUG_ON(info->type == IRQT_UNBOUND); |
1165 | 1255 | ||
1166 | evtchn_to_irq[evtchn] = irq; | 1256 | xen_irq_info_evtchn_init(irq, evtchn); |
1167 | irq_info[irq] = mk_evtchn_info(evtchn); | ||
1168 | 1257 | ||
1169 | spin_unlock(&irq_mapping_update_lock); | 1258 | spin_unlock(&irq_mapping_update_lock); |
1170 | 1259 | ||
@@ -1181,10 +1270,14 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1181 | struct evtchn_bind_vcpu bind_vcpu; | 1270 | struct evtchn_bind_vcpu bind_vcpu; |
1182 | int evtchn = evtchn_from_irq(irq); | 1271 | int evtchn = evtchn_from_irq(irq); |
1183 | 1272 | ||
1184 | /* events delivered via platform PCI interrupts are always | 1273 | if (!VALID_EVTCHN(evtchn)) |
1185 | * routed to vcpu 0 */ | 1274 | return -1; |
1186 | if (!VALID_EVTCHN(evtchn) || | 1275 | |
1187 | (xen_hvm_domain() && !xen_have_vector_callback)) | 1276 | /* |
1277 | * Events delivered via platform PCI interrupts are always | ||
1278 | * routed to vcpu 0 and hence cannot be rebound. | ||
1279 | */ | ||
1280 | if (xen_hvm_domain() && !xen_have_vector_callback) | ||
1188 | return -1; | 1281 | return -1; |
1189 | 1282 | ||
1190 | /* Send future instances of this interrupt to other vcpu. */ | 1283 | /* Send future instances of this interrupt to other vcpu. */ |
@@ -1271,19 +1364,22 @@ static int retrigger_dynirq(struct irq_data *data) | |||
1271 | return ret; | 1364 | return ret; |
1272 | } | 1365 | } |
1273 | 1366 | ||
1274 | static void restore_cpu_pirqs(void) | 1367 | static void restore_pirqs(void) |
1275 | { | 1368 | { |
1276 | int pirq, rc, irq, gsi; | 1369 | int pirq, rc, irq, gsi; |
1277 | struct physdev_map_pirq map_irq; | 1370 | struct physdev_map_pirq map_irq; |
1371 | struct irq_info *info; | ||
1278 | 1372 | ||
1279 | for (pirq = 0; pirq < nr_irqs; pirq++) { | 1373 | list_for_each_entry(info, &xen_irq_list_head, list) { |
1280 | irq = pirq_to_irq[pirq]; | 1374 | if (info->type != IRQT_PIRQ) |
1281 | if (irq == -1) | ||
1282 | continue; | 1375 | continue; |
1283 | 1376 | ||
1377 | pirq = info->u.pirq.pirq; | ||
1378 | gsi = info->u.pirq.gsi; | ||
1379 | irq = info->irq; | ||
1380 | |||
1284 | /* save/restore of PT devices doesn't work, so at this point the | 1381 | /* save/restore of PT devices doesn't work, so at this point the |
1285 | * only devices present are GSI based emulated devices */ | 1382 | * only devices present are GSI based emulated devices */ |
1286 | gsi = gsi_from_irq(irq); | ||
1287 | if (!gsi) | 1383 | if (!gsi) |
1288 | continue; | 1384 | continue; |
1289 | 1385 | ||
@@ -1296,8 +1392,7 @@ static void restore_cpu_pirqs(void) | |||
1296 | if (rc) { | 1392 | if (rc) { |
1297 | printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", | 1393 | printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", |
1298 | gsi, irq, pirq, rc); | 1394 | gsi, irq, pirq, rc); |
1299 | irq_info[irq] = mk_unbound_info(); | 1395 | xen_free_irq(irq); |
1300 | pirq_to_irq[pirq] = -1; | ||
1301 | continue; | 1396 | continue; |
1302 | } | 1397 | } |
1303 | 1398 | ||
@@ -1327,8 +1422,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
1327 | evtchn = bind_virq.port; | 1422 | evtchn = bind_virq.port; |
1328 | 1423 | ||
1329 | /* Record the new mapping. */ | 1424 | /* Record the new mapping. */ |
1330 | evtchn_to_irq[evtchn] = irq; | 1425 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
1331 | irq_info[irq] = mk_virq_info(evtchn, virq); | ||
1332 | bind_evtchn_to_cpu(evtchn, cpu); | 1426 | bind_evtchn_to_cpu(evtchn, cpu); |
1333 | } | 1427 | } |
1334 | } | 1428 | } |
@@ -1352,8 +1446,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
1352 | evtchn = bind_ipi.port; | 1446 | evtchn = bind_ipi.port; |
1353 | 1447 | ||
1354 | /* Record the new mapping. */ | 1448 | /* Record the new mapping. */ |
1355 | evtchn_to_irq[evtchn] = irq; | 1449 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
1356 | irq_info[irq] = mk_ipi_info(evtchn, ipi); | ||
1357 | bind_evtchn_to_cpu(evtchn, cpu); | 1450 | bind_evtchn_to_cpu(evtchn, cpu); |
1358 | } | 1451 | } |
1359 | } | 1452 | } |
@@ -1413,7 +1506,8 @@ void xen_poll_irq(int irq) | |||
1413 | 1506 | ||
1414 | void xen_irq_resume(void) | 1507 | void xen_irq_resume(void) |
1415 | { | 1508 | { |
1416 | unsigned int cpu, irq, evtchn; | 1509 | unsigned int cpu, evtchn; |
1510 | struct irq_info *info; | ||
1417 | 1511 | ||
1418 | init_evtchn_cpu_bindings(); | 1512 | init_evtchn_cpu_bindings(); |
1419 | 1513 | ||
@@ -1422,8 +1516,8 @@ void xen_irq_resume(void) | |||
1422 | mask_evtchn(evtchn); | 1516 | mask_evtchn(evtchn); |
1423 | 1517 | ||
1424 | /* No IRQ <-> event-channel mappings. */ | 1518 | /* No IRQ <-> event-channel mappings. */ |
1425 | for (irq = 0; irq < nr_irqs; irq++) | 1519 | list_for_each_entry(info, &xen_irq_list_head, list) |
1426 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ | 1520 | info->evtchn = 0; /* zap event-channel binding */ |
1427 | 1521 | ||
1428 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | 1522 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
1429 | evtchn_to_irq[evtchn] = -1; | 1523 | evtchn_to_irq[evtchn] = -1; |
@@ -1433,7 +1527,7 @@ void xen_irq_resume(void) | |||
1433 | restore_cpu_ipis(cpu); | 1527 | restore_cpu_ipis(cpu); |
1434 | } | 1528 | } |
1435 | 1529 | ||
1436 | restore_cpu_pirqs(); | 1530 | restore_pirqs(); |
1437 | } | 1531 | } |
1438 | 1532 | ||
1439 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 1533 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
@@ -1519,17 +1613,6 @@ void __init xen_init_IRQ(void) | |||
1519 | { | 1613 | { |
1520 | int i; | 1614 | int i; |
1521 | 1615 | ||
1522 | cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), | ||
1523 | GFP_KERNEL); | ||
1524 | irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); | ||
1525 | |||
1526 | /* We are using nr_irqs as the maximum number of pirq available but | ||
1527 | * that number is actually chosen by Xen and we don't know exactly | ||
1528 | * what it is. Be careful choosing high pirq numbers. */ | ||
1529 | pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL); | ||
1530 | for (i = 0; i < nr_irqs; i++) | ||
1531 | pirq_to_irq[i] = -1; | ||
1532 | |||
1533 | evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), | 1616 | evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), |
1534 | GFP_KERNEL); | 1617 | GFP_KERNEL); |
1535 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 1618 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
diff --git a/include/xen/events.h b/include/xen/events.h index d3b9010ee96a..f1b87ad48ac7 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -47,9 +47,9 @@ static inline void notify_remote_via_evtchn(int port) | |||
47 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); | 47 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); |
48 | } | 48 | } |
49 | 49 | ||
50 | extern void notify_remote_via_irq(int irq); | 50 | void notify_remote_via_irq(int irq); |
51 | 51 | ||
52 | extern void xen_irq_resume(void); | 52 | void xen_irq_resume(void); |
53 | 53 | ||
54 | /* Clear an irq's pending state, in preparation for polling on it */ | 54 | /* Clear an irq's pending state, in preparation for polling on it */ |
55 | void xen_clear_irq_pending(int irq); | 55 | void xen_clear_irq_pending(int irq); |
@@ -68,20 +68,22 @@ void xen_poll_irq_timeout(int irq, u64 timeout); | |||
68 | unsigned irq_from_evtchn(unsigned int evtchn); | 68 | unsigned irq_from_evtchn(unsigned int evtchn); |
69 | 69 | ||
70 | /* Xen HVM evtchn vector callback */ | 70 | /* Xen HVM evtchn vector callback */ |
71 | extern void xen_hvm_callback_vector(void); | 71 | void xen_hvm_callback_vector(void); |
72 | extern int xen_have_vector_callback; | 72 | extern int xen_have_vector_callback; |
73 | int xen_set_callback_via(uint64_t via); | 73 | int xen_set_callback_via(uint64_t via); |
74 | void xen_evtchn_do_upcall(struct pt_regs *regs); | 74 | void xen_evtchn_do_upcall(struct pt_regs *regs); |
75 | void xen_hvm_evtchn_do_upcall(void); | 75 | void xen_hvm_evtchn_do_upcall(void); |
76 | 76 | ||
77 | /* Allocate an irq for a physical interrupt, given a gsi. "Legacy" | 77 | /* Allocate a pirq for a physical interrupt, given a gsi. */ |
78 | * GSIs are identity mapped; others are dynamically allocated as | 78 | int xen_allocate_pirq_gsi(unsigned gsi); |
79 | * usual. */ | 79 | /* Bind a pirq for a physical interrupt to an irq. */ |
80 | int xen_allocate_pirq(unsigned gsi, int shareable, char *name); | 80 | int xen_bind_pirq_gsi_to_irq(unsigned gsi, |
81 | int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); | 81 | unsigned pirq, int shareable, char *name); |
82 | 82 | ||
83 | #ifdef CONFIG_PCI_MSI | 83 | #ifdef CONFIG_PCI_MSI |
84 | /* Allocate a pirq for a MSI style physical interrupt. */ | ||
84 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); | 85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); |
86 | /* Bind an PSI pirq to an irq. */ | ||
85 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
86 | int pirq, int vector, const char *name); | 88 | int pirq, int vector, const char *name); |
87 | #endif | 89 | #endif |
@@ -89,12 +91,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
89 | /* De-allocates the above mentioned physical interrupt. */ | 91 | /* De-allocates the above mentioned physical interrupt. */ |
90 | int xen_destroy_irq(int irq); | 92 | int xen_destroy_irq(int irq); |
91 | 93 | ||
92 | /* Return vector allocated to pirq */ | ||
93 | int xen_vector_from_irq(unsigned pirq); | ||
94 | |||
95 | /* Return gsi allocated to pirq */ | ||
96 | int xen_gsi_from_irq(unsigned pirq); | ||
97 | |||
98 | /* Return irq from pirq */ | 94 | /* Return irq from pirq */ |
99 | int xen_irq_from_pirq(unsigned pirq); | 95 | int xen_irq_from_pirq(unsigned pirq); |
100 | 96 | ||