diff options
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/cpu_hotplug.c | 4 | ||||
-rw-r--r-- | drivers/xen/events.c | 251 | ||||
-rw-r--r-- | drivers/xen/manage.c | 2 |
3 files changed, 177 insertions, 80 deletions
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 974f56d1ebe1..5f54c01c1568 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c | |||
@@ -10,7 +10,7 @@ static void enable_hotplug_cpu(int cpu) | |||
10 | if (!cpu_present(cpu)) | 10 | if (!cpu_present(cpu)) |
11 | arch_register_cpu(cpu); | 11 | arch_register_cpu(cpu); |
12 | 12 | ||
13 | cpu_set(cpu, cpu_present_map); | 13 | set_cpu_present(cpu, true); |
14 | } | 14 | } |
15 | 15 | ||
16 | static void disable_hotplug_cpu(int cpu) | 16 | static void disable_hotplug_cpu(int cpu) |
@@ -18,7 +18,7 @@ static void disable_hotplug_cpu(int cpu) | |||
18 | if (cpu_present(cpu)) | 18 | if (cpu_present(cpu)) |
19 | arch_unregister_cpu(cpu); | 19 | arch_unregister_cpu(cpu); |
20 | 20 | ||
21 | cpu_clear(cpu, cpu_present_map); | 21 | set_cpu_present(cpu, false); |
22 | } | 22 | } |
23 | 23 | ||
24 | static void vcpu_hotplug(unsigned int cpu) | 24 | static void vcpu_hotplug(unsigned int cpu) |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index eb0dfdeaa949..30963af5dba0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -26,9 +26,11 @@ | |||
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | #include <linux/bootmem.h> | ||
29 | 30 | ||
30 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
31 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
33 | #include <asm/idle.h> | ||
32 | #include <asm/sync_bitops.h> | 34 | #include <asm/sync_bitops.h> |
33 | #include <asm/xen/hypercall.h> | 35 | #include <asm/xen/hypercall.h> |
34 | #include <asm/xen/hypervisor.h> | 36 | #include <asm/xen/hypervisor.h> |
@@ -50,36 +52,55 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |||
50 | /* IRQ <-> IPI mapping */ | 52 | /* IRQ <-> IPI mapping */ |
51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | 53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; |
52 | 54 | ||
53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | 55 | /* Interrupt types. */ |
54 | struct packed_irq | 56 | enum xen_irq_type { |
55 | { | 57 | IRQT_UNBOUND = 0, |
56 | unsigned short evtchn; | ||
57 | unsigned char index; | ||
58 | unsigned char type; | ||
59 | }; | ||
60 | |||
61 | static struct packed_irq irq_info[NR_IRQS]; | ||
62 | |||
63 | /* Binding types. */ | ||
64 | enum { | ||
65 | IRQT_UNBOUND, | ||
66 | IRQT_PIRQ, | 58 | IRQT_PIRQ, |
67 | IRQT_VIRQ, | 59 | IRQT_VIRQ, |
68 | IRQT_IPI, | 60 | IRQT_IPI, |
69 | IRQT_EVTCHN | 61 | IRQT_EVTCHN |
70 | }; | 62 | }; |
71 | 63 | ||
72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | 64 | /* |
73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | 65 | * Packed IRQ information: |
66 | * type - enum xen_irq_type | ||
67 | * event channel - irq->event channel mapping | ||
68 | * cpu - cpu this event channel is bound to | ||
69 | * index - type-specific information: | ||
70 | * PIRQ - vector, with MSB being "needs EIO" | ||
71 | * VIRQ - virq number | ||
72 | * IPI - IPI vector | ||
73 | * EVTCHN - | ||
74 | */ | ||
75 | struct irq_info | ||
76 | { | ||
77 | enum xen_irq_type type; /* type */ | ||
78 | unsigned short evtchn; /* event channel */ | ||
79 | unsigned short cpu; /* cpu bound */ | ||
80 | |||
81 | union { | ||
82 | unsigned short virq; | ||
83 | enum ipi_vector ipi; | ||
84 | struct { | ||
85 | unsigned short gsi; | ||
86 | unsigned short vector; | ||
87 | } pirq; | ||
88 | } u; | ||
89 | }; | ||
90 | |||
91 | static struct irq_info irq_info[NR_IRQS]; | ||
74 | 92 | ||
75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | 93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { |
76 | [0 ... NR_EVENT_CHANNELS-1] = -1 | 94 | [0 ... NR_EVENT_CHANNELS-1] = -1 |
77 | }; | 95 | }; |
78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | 96 | struct cpu_evtchn_s { |
79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | 97 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; |
80 | 98 | }; | |
81 | /* Reference counts for bindings to IRQs. */ | 99 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; |
82 | static int irq_bindcount[NR_IRQS]; | 100 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
101 | { | ||
102 | return cpu_evtchn_mask_p[cpu].bits; | ||
103 | } | ||
83 | 104 | ||
84 | /* Xen will never allocate port zero for any purpose. */ | 105 | /* Xen will never allocate port zero for any purpose. */ |
85 | #define VALID_EVTCHN(chn) ((chn) != 0) | 106 | #define VALID_EVTCHN(chn) ((chn) != 0) |
@@ -87,27 +108,108 @@ static int irq_bindcount[NR_IRQS]; | |||
87 | static struct irq_chip xen_dynamic_chip; | 108 | static struct irq_chip xen_dynamic_chip; |
88 | 109 | ||
89 | /* Constructor for packed IRQ information. */ | 110 | /* Constructor for packed IRQ information. */ |
90 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | 111 | static struct irq_info mk_unbound_info(void) |
112 | { | ||
113 | return (struct irq_info) { .type = IRQT_UNBOUND }; | ||
114 | } | ||
115 | |||
116 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | ||
117 | { | ||
118 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, | ||
119 | .cpu = 0 }; | ||
120 | } | ||
121 | |||
122 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | ||
91 | { | 123 | { |
92 | return (struct packed_irq) { evtchn, index, type }; | 124 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
125 | .cpu = 0, .u.ipi = ipi }; | ||
126 | } | ||
127 | |||
128 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | ||
129 | { | ||
130 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | ||
131 | .cpu = 0, .u.virq = virq }; | ||
132 | } | ||
133 | |||
134 | static struct irq_info mk_pirq_info(unsigned short evtchn, | ||
135 | unsigned short gsi, unsigned short vector) | ||
136 | { | ||
137 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | ||
138 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; | ||
93 | } | 139 | } |
94 | 140 | ||
95 | /* | 141 | /* |
96 | * Accessors for packed IRQ information. | 142 | * Accessors for packed IRQ information. |
97 | */ | 143 | */ |
98 | static inline unsigned int evtchn_from_irq(int irq) | 144 | static struct irq_info *info_for_irq(unsigned irq) |
145 | { | ||
146 | return &irq_info[irq]; | ||
147 | } | ||
148 | |||
149 | static unsigned int evtchn_from_irq(unsigned irq) | ||
99 | { | 150 | { |
100 | return irq_info[irq].evtchn; | 151 | return info_for_irq(irq)->evtchn; |
101 | } | 152 | } |
102 | 153 | ||
103 | static inline unsigned int index_from_irq(int irq) | 154 | static enum ipi_vector ipi_from_irq(unsigned irq) |
104 | { | 155 | { |
105 | return irq_info[irq].index; | 156 | struct irq_info *info = info_for_irq(irq); |
157 | |||
158 | BUG_ON(info == NULL); | ||
159 | BUG_ON(info->type != IRQT_IPI); | ||
160 | |||
161 | return info->u.ipi; | ||
106 | } | 162 | } |
107 | 163 | ||
108 | static inline unsigned int type_from_irq(int irq) | 164 | static unsigned virq_from_irq(unsigned irq) |
109 | { | 165 | { |
110 | return irq_info[irq].type; | 166 | struct irq_info *info = info_for_irq(irq); |
167 | |||
168 | BUG_ON(info == NULL); | ||
169 | BUG_ON(info->type != IRQT_VIRQ); | ||
170 | |||
171 | return info->u.virq; | ||
172 | } | ||
173 | |||
174 | static unsigned gsi_from_irq(unsigned irq) | ||
175 | { | ||
176 | struct irq_info *info = info_for_irq(irq); | ||
177 | |||
178 | BUG_ON(info == NULL); | ||
179 | BUG_ON(info->type != IRQT_PIRQ); | ||
180 | |||
181 | return info->u.pirq.gsi; | ||
182 | } | ||
183 | |||
184 | static unsigned vector_from_irq(unsigned irq) | ||
185 | { | ||
186 | struct irq_info *info = info_for_irq(irq); | ||
187 | |||
188 | BUG_ON(info == NULL); | ||
189 | BUG_ON(info->type != IRQT_PIRQ); | ||
190 | |||
191 | return info->u.pirq.vector; | ||
192 | } | ||
193 | |||
194 | static enum xen_irq_type type_from_irq(unsigned irq) | ||
195 | { | ||
196 | return info_for_irq(irq)->type; | ||
197 | } | ||
198 | |||
199 | static unsigned cpu_from_irq(unsigned irq) | ||
200 | { | ||
201 | return info_for_irq(irq)->cpu; | ||
202 | } | ||
203 | |||
204 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
205 | { | ||
206 | int irq = evtchn_to_irq[evtchn]; | ||
207 | unsigned ret = 0; | ||
208 | |||
209 | if (irq != -1) | ||
210 | ret = cpu_from_irq(irq); | ||
211 | |||
212 | return ret; | ||
111 | } | 213 | } |
112 | 214 | ||
113 | static inline unsigned long active_evtchns(unsigned int cpu, | 215 | static inline unsigned long active_evtchns(unsigned int cpu, |
@@ -115,7 +217,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, | |||
115 | unsigned int idx) | 217 | unsigned int idx) |
116 | { | 218 | { |
117 | return (sh->evtchn_pending[idx] & | 219 | return (sh->evtchn_pending[idx] & |
118 | cpu_evtchn_mask[cpu][idx] & | 220 | cpu_evtchn_mask(cpu)[idx] & |
119 | ~sh->evtchn_mask[idx]); | 221 | ~sh->evtchn_mask[idx]); |
120 | } | 222 | } |
121 | 223 | ||
@@ -125,13 +227,13 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
125 | 227 | ||
126 | BUG_ON(irq == -1); | 228 | BUG_ON(irq == -1); |
127 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
128 | irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); | 230 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
129 | #endif | 231 | #endif |
130 | 232 | ||
131 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | 233 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
132 | __set_bit(chn, cpu_evtchn_mask[cpu]); | 234 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
133 | 235 | ||
134 | cpu_evtchn[chn] = cpu; | 236 | irq_info[irq].cpu = cpu; |
135 | } | 237 | } |
136 | 238 | ||
137 | static void init_evtchn_cpu_bindings(void) | 239 | static void init_evtchn_cpu_bindings(void) |
@@ -142,17 +244,11 @@ static void init_evtchn_cpu_bindings(void) | |||
142 | 244 | ||
143 | /* By default all event channels notify CPU#0. */ | 245 | /* By default all event channels notify CPU#0. */ |
144 | for_each_irq_desc(i, desc) { | 246 | for_each_irq_desc(i, desc) { |
145 | desc->affinity = cpumask_of_cpu(0); | 247 | cpumask_copy(desc->affinity, cpumask_of(0)); |
146 | } | 248 | } |
147 | #endif | 249 | #endif |
148 | 250 | ||
149 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | 251 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
150 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | ||
151 | } | ||
152 | |||
153 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
154 | { | ||
155 | return cpu_evtchn[evtchn]; | ||
156 | } | 252 | } |
157 | 253 | ||
158 | static inline void clear_evtchn(int port) | 254 | static inline void clear_evtchn(int port) |
@@ -232,9 +328,8 @@ static int find_unbound_irq(void) | |||
232 | int irq; | 328 | int irq; |
233 | struct irq_desc *desc; | 329 | struct irq_desc *desc; |
234 | 330 | ||
235 | /* Only allocate from dynirq range */ | ||
236 | for (irq = 0; irq < nr_irqs; irq++) | 331 | for (irq = 0; irq < nr_irqs; irq++) |
237 | if (irq_bindcount[irq] == 0) | 332 | if (irq_info[irq].type == IRQT_UNBOUND) |
238 | break; | 333 | break; |
239 | 334 | ||
240 | if (irq == nr_irqs) | 335 | if (irq == nr_irqs) |
@@ -244,6 +339,8 @@ static int find_unbound_irq(void) | |||
244 | if (WARN_ON(desc == NULL)) | 339 | if (WARN_ON(desc == NULL)) |
245 | return -1; | 340 | return -1; |
246 | 341 | ||
342 | dynamic_irq_init(irq); | ||
343 | |||
247 | return irq; | 344 | return irq; |
248 | } | 345 | } |
249 | 346 | ||
@@ -258,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
258 | if (irq == -1) { | 355 | if (irq == -1) { |
259 | irq = find_unbound_irq(); | 356 | irq = find_unbound_irq(); |
260 | 357 | ||
261 | dynamic_irq_init(irq); | ||
262 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 358 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
263 | handle_level_irq, "event"); | 359 | handle_level_irq, "event"); |
264 | 360 | ||
265 | evtchn_to_irq[evtchn] = irq; | 361 | evtchn_to_irq[evtchn] = irq; |
266 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 362 | irq_info[irq] = mk_evtchn_info(evtchn); |
267 | } | 363 | } |
268 | 364 | ||
269 | irq_bindcount[irq]++; | ||
270 | |||
271 | spin_unlock(&irq_mapping_update_lock); | 365 | spin_unlock(&irq_mapping_update_lock); |
272 | 366 | ||
273 | return irq; | 367 | return irq; |
@@ -282,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
282 | spin_lock(&irq_mapping_update_lock); | 376 | spin_lock(&irq_mapping_update_lock); |
283 | 377 | ||
284 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 378 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
379 | |||
285 | if (irq == -1) { | 380 | if (irq == -1) { |
286 | irq = find_unbound_irq(); | 381 | irq = find_unbound_irq(); |
287 | if (irq < 0) | 382 | if (irq < 0) |
288 | goto out; | 383 | goto out; |
289 | 384 | ||
290 | dynamic_irq_init(irq); | ||
291 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 385 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
292 | handle_level_irq, "ipi"); | 386 | handle_level_irq, "ipi"); |
293 | 387 | ||
@@ -298,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
298 | evtchn = bind_ipi.port; | 392 | evtchn = bind_ipi.port; |
299 | 393 | ||
300 | evtchn_to_irq[evtchn] = irq; | 394 | evtchn_to_irq[evtchn] = irq; |
301 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 395 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
302 | |||
303 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | 396 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
304 | 397 | ||
305 | bind_evtchn_to_cpu(evtchn, cpu); | 398 | bind_evtchn_to_cpu(evtchn, cpu); |
306 | } | 399 | } |
307 | 400 | ||
308 | irq_bindcount[irq]++; | ||
309 | |||
310 | out: | 401 | out: |
311 | spin_unlock(&irq_mapping_update_lock); | 402 | spin_unlock(&irq_mapping_update_lock); |
312 | return irq; | 403 | return irq; |
@@ -332,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
332 | 423 | ||
333 | irq = find_unbound_irq(); | 424 | irq = find_unbound_irq(); |
334 | 425 | ||
335 | dynamic_irq_init(irq); | ||
336 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 426 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
337 | handle_level_irq, "virq"); | 427 | handle_level_irq, "virq"); |
338 | 428 | ||
339 | evtchn_to_irq[evtchn] = irq; | 429 | evtchn_to_irq[evtchn] = irq; |
340 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 430 | irq_info[irq] = mk_virq_info(evtchn, virq); |
341 | 431 | ||
342 | per_cpu(virq_to_irq, cpu)[virq] = irq; | 432 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
343 | 433 | ||
344 | bind_evtchn_to_cpu(evtchn, cpu); | 434 | bind_evtchn_to_cpu(evtchn, cpu); |
345 | } | 435 | } |
346 | 436 | ||
347 | irq_bindcount[irq]++; | ||
348 | |||
349 | spin_unlock(&irq_mapping_update_lock); | 437 | spin_unlock(&irq_mapping_update_lock); |
350 | 438 | ||
351 | return irq; | 439 | return irq; |
@@ -358,7 +446,7 @@ static void unbind_from_irq(unsigned int irq) | |||
358 | 446 | ||
359 | spin_lock(&irq_mapping_update_lock); | 447 | spin_lock(&irq_mapping_update_lock); |
360 | 448 | ||
361 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { | 449 | if (VALID_EVTCHN(evtchn)) { |
362 | close.port = evtchn; | 450 | close.port = evtchn; |
363 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 451 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
364 | BUG(); | 452 | BUG(); |
@@ -366,11 +454,11 @@ static void unbind_from_irq(unsigned int irq) | |||
366 | switch (type_from_irq(irq)) { | 454 | switch (type_from_irq(irq)) { |
367 | case IRQT_VIRQ: | 455 | case IRQT_VIRQ: |
368 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | 456 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
369 | [index_from_irq(irq)] = -1; | 457 | [virq_from_irq(irq)] = -1; |
370 | break; | 458 | break; |
371 | case IRQT_IPI: | 459 | case IRQT_IPI: |
372 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | 460 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
373 | [index_from_irq(irq)] = -1; | 461 | [ipi_from_irq(irq)] = -1; |
374 | break; | 462 | break; |
375 | default: | 463 | default: |
376 | break; | 464 | break; |
@@ -380,7 +468,7 @@ static void unbind_from_irq(unsigned int irq) | |||
380 | bind_evtchn_to_cpu(evtchn, 0); | 468 | bind_evtchn_to_cpu(evtchn, 0); |
381 | 469 | ||
382 | evtchn_to_irq[evtchn] = -1; | 470 | evtchn_to_irq[evtchn] = -1; |
383 | irq_info[irq] = IRQ_UNBOUND; | 471 | irq_info[irq] = mk_unbound_info(); |
384 | 472 | ||
385 | dynamic_irq_cleanup(irq); | 473 | dynamic_irq_cleanup(irq); |
386 | } | 474 | } |
@@ -498,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
498 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | 586 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
499 | if (sync_test_bit(i, sh->evtchn_pending)) { | 587 | if (sync_test_bit(i, sh->evtchn_pending)) { |
500 | printk(" %d: event %d -> irq %d\n", | 588 | printk(" %d: event %d -> irq %d\n", |
501 | cpu_evtchn[i], i, | 589 | cpu_from_evtchn(i), i, |
502 | evtchn_to_irq[i]); | 590 | evtchn_to_irq[i]); |
503 | } | 591 | } |
504 | } | 592 | } |
505 | 593 | ||
@@ -508,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
508 | return IRQ_HANDLED; | 596 | return IRQ_HANDLED; |
509 | } | 597 | } |
510 | 598 | ||
511 | |||
512 | /* | 599 | /* |
513 | * Search the CPUs pending events bitmasks. For each one found, map | 600 | * Search the CPUs pending events bitmasks. For each one found, map |
514 | * the event number to an irq, and feed it into do_IRQ() for | 601 | * the event number to an irq, and feed it into do_IRQ() for |
@@ -521,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
521 | void xen_evtchn_do_upcall(struct pt_regs *regs) | 608 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
522 | { | 609 | { |
523 | int cpu = get_cpu(); | 610 | int cpu = get_cpu(); |
611 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
524 | struct shared_info *s = HYPERVISOR_shared_info; | 612 | struct shared_info *s = HYPERVISOR_shared_info; |
525 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 613 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
526 | static DEFINE_PER_CPU(unsigned, nesting_count); | 614 | static DEFINE_PER_CPU(unsigned, nesting_count); |
527 | unsigned count; | 615 | unsigned count; |
528 | 616 | ||
617 | exit_idle(); | ||
618 | irq_enter(); | ||
619 | |||
529 | do { | 620 | do { |
530 | unsigned long pending_words; | 621 | unsigned long pending_words; |
531 | 622 | ||
@@ -550,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
550 | int irq = evtchn_to_irq[port]; | 641 | int irq = evtchn_to_irq[port]; |
551 | 642 | ||
552 | if (irq != -1) | 643 | if (irq != -1) |
553 | xen_do_IRQ(irq, regs); | 644 | handle_irq(irq, regs); |
554 | } | 645 | } |
555 | } | 646 | } |
556 | 647 | ||
@@ -561,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
561 | } while(count != 1); | 652 | } while(count != 1); |
562 | 653 | ||
563 | out: | 654 | out: |
655 | irq_exit(); | ||
656 | set_irq_regs(old_regs); | ||
657 | |||
564 | put_cpu(); | 658 | put_cpu(); |
565 | } | 659 | } |
566 | 660 | ||
567 | /* Rebind a new event channel to an existing irq. */ | 661 | /* Rebind a new event channel to an existing irq. */ |
568 | void rebind_evtchn_irq(int evtchn, int irq) | 662 | void rebind_evtchn_irq(int evtchn, int irq) |
569 | { | 663 | { |
664 | struct irq_info *info = info_for_irq(irq); | ||
665 | |||
570 | /* Make sure the irq is masked, since the new event channel | 666 | /* Make sure the irq is masked, since the new event channel |
571 | will also be masked. */ | 667 | will also be masked. */ |
572 | disable_irq(irq); | 668 | disable_irq(irq); |
@@ -576,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
576 | /* After resume the irq<->evtchn mappings are all cleared out */ | 672 | /* After resume the irq<->evtchn mappings are all cleared out */ |
577 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 673 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
578 | /* Expect irq to have been bound before, | 674 | /* Expect irq to have been bound before, |
579 | so the bindcount should be non-0 */ | 675 | so there should be a proper type */ |
580 | BUG_ON(irq_bindcount[irq] == 0); | 676 | BUG_ON(info->type == IRQT_UNBOUND); |
581 | 677 | ||
582 | evtchn_to_irq[evtchn] = irq; | 678 | evtchn_to_irq[evtchn] = irq; |
583 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 679 | irq_info[irq] = mk_evtchn_info(evtchn); |
584 | 680 | ||
585 | spin_unlock(&irq_mapping_update_lock); | 681 | spin_unlock(&irq_mapping_update_lock); |
586 | 682 | ||
@@ -690,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
690 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | 786 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
691 | continue; | 787 | continue; |
692 | 788 | ||
693 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | 789 | BUG_ON(virq_from_irq(irq) != virq); |
694 | BUG_ON(irq_info[irq].index != virq); | ||
695 | 790 | ||
696 | /* Get a new binding from Xen. */ | 791 | /* Get a new binding from Xen. */ |
697 | bind_virq.virq = virq; | 792 | bind_virq.virq = virq; |
@@ -703,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
703 | 798 | ||
704 | /* Record the new mapping. */ | 799 | /* Record the new mapping. */ |
705 | evtchn_to_irq[evtchn] = irq; | 800 | evtchn_to_irq[evtchn] = irq; |
706 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 801 | irq_info[irq] = mk_virq_info(evtchn, virq); |
707 | bind_evtchn_to_cpu(evtchn, cpu); | 802 | bind_evtchn_to_cpu(evtchn, cpu); |
708 | 803 | ||
709 | /* Ready for use. */ | 804 | /* Ready for use. */ |
@@ -720,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
720 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | 815 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
721 | continue; | 816 | continue; |
722 | 817 | ||
723 | BUG_ON(irq_info[irq].type != IRQT_IPI); | 818 | BUG_ON(ipi_from_irq(irq) != ipi); |
724 | BUG_ON(irq_info[irq].index != ipi); | ||
725 | 819 | ||
726 | /* Get a new binding from Xen. */ | 820 | /* Get a new binding from Xen. */ |
727 | bind_ipi.vcpu = cpu; | 821 | bind_ipi.vcpu = cpu; |
@@ -732,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
732 | 826 | ||
733 | /* Record the new mapping. */ | 827 | /* Record the new mapping. */ |
734 | evtchn_to_irq[evtchn] = irq; | 828 | evtchn_to_irq[evtchn] = irq; |
735 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 829 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
736 | bind_evtchn_to_cpu(evtchn, cpu); | 830 | bind_evtchn_to_cpu(evtchn, cpu); |
737 | 831 | ||
738 | /* Ready for use. */ | 832 | /* Ready for use. */ |
@@ -812,8 +906,11 @@ void xen_irq_resume(void) | |||
812 | 906 | ||
813 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 907 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
814 | .name = "xen-dyn", | 908 | .name = "xen-dyn", |
909 | |||
910 | .disable = disable_dynirq, | ||
815 | .mask = disable_dynirq, | 911 | .mask = disable_dynirq, |
816 | .unmask = enable_dynirq, | 912 | .unmask = enable_dynirq, |
913 | |||
817 | .ack = ack_dynirq, | 914 | .ack = ack_dynirq, |
818 | .set_affinity = set_affinity_irq, | 915 | .set_affinity = set_affinity_irq, |
819 | .retrigger = retrigger_dynirq, | 916 | .retrigger = retrigger_dynirq, |
@@ -822,6 +919,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
822 | void __init xen_init_IRQ(void) | 919 | void __init xen_init_IRQ(void) |
823 | { | 920 | { |
824 | int i; | 921 | int i; |
922 | size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); | ||
923 | |||
924 | cpu_evtchn_mask_p = alloc_bootmem(size); | ||
925 | BUG_ON(cpu_evtchn_mask_p == NULL); | ||
825 | 926 | ||
826 | init_evtchn_cpu_bindings(); | 927 | init_evtchn_cpu_bindings(); |
827 | 928 | ||
@@ -829,9 +930,5 @@ void __init xen_init_IRQ(void) | |||
829 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 930 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
830 | mask_evtchn(i); | 931 | mask_evtchn(i); |
831 | 932 | ||
832 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
833 | for (i = 0; i < nr_irqs; i++) | ||
834 | irq_bindcount[i] = 0; | ||
835 | |||
836 | irq_ctx_init(smp_processor_id()); | 933 | irq_ctx_init(smp_processor_id()); |
837 | } | 934 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 56892a142ee2..3ccd348d112d 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -108,7 +108,7 @@ static void do_suspend(void) | |||
108 | /* XXX use normal device tree? */ | 108 | /* XXX use normal device tree? */ |
109 | xenbus_suspend(); | 109 | xenbus_suspend(); |
110 | 110 | ||
111 | err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0)); | 111 | err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); |
112 | if (err) { | 112 | if (err) { |
113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
114 | goto out; | 114 | goto out; |