aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2013-03-13 11:29:25 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-06 10:07:41 -0500
commit9a489f45a155fe96b9b55fbbef2b757ef7737cfc (patch)
tree1104f50a1adaeea564344bdfe221a5edf2141077 /drivers/xen
parentd2ba3166f23baa53f5ee9c5c2ca43b42fb4e9e62 (diff)
xen/events: move 2-level specific code into its own file
In preparation for alternative event channel ABIs, move all the functions accessing the shared data structures into their own file. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events/Makefile1
-rw-r--r--drivers/xen/events/events_2l.c348
-rw-r--r--drivers/xen/events/events_base.c379
-rw-r--r--drivers/xen/events/events_internal.h74
4 files changed, 440 insertions, 362 deletions
diff --git a/drivers/xen/events/Makefile b/drivers/xen/events/Makefile
index f0bc6071fd84..08179fe04612 100644
--- a/drivers/xen/events/Makefile
+++ b/drivers/xen/events/Makefile
@@ -1,3 +1,4 @@
1obj-y += events.o 1obj-y += events.o
2 2
3events-y += events_base.o 3events-y += events_base.o
4events-y += events_2l.o
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
new file mode 100644
index 000000000000..a77e98d025fa
--- /dev/null
+++ b/drivers/xen/events/events_2l.c
@@ -0,0 +1,348 @@
1/*
2 * Xen event channels (2-level ABI)
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
8
9#include <linux/linkage.h>
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/module.h>
13
14#include <asm/sync_bitops.h>
15#include <asm/xen/hypercall.h>
16#include <asm/xen/hypervisor.h>
17
18#include <xen/xen.h>
19#include <xen/xen-ops.h>
20#include <xen/events.h>
21#include <xen/interface/xen.h>
22#include <xen/interface/event_channel.h>
23
24#include "events_internal.h"
25
26/*
27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
28 * careful to only use bitops which allow for this (e.g
29 * test_bit/find_first_bit and friends but not __ffs) and to pass
30 * BITS_PER_EVTCHN_WORD as the bitmask length.
31 */
32#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
33/*
34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
35 * array. Primarily to avoid long lines (hence the terse name).
36 */
37#define BM(x) (unsigned long *)(x)
38/* Find the first set bit in a evtchn mask */
39#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
40
41static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
42 cpu_evtchn_mask);
43
44void xen_evtchn_port_bind_to_cpu(struct irq_info *info, int cpu)
45{
46 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
47 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
48}
49
50void clear_evtchn(int port)
51{
52 struct shared_info *s = HYPERVISOR_shared_info;
53 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
54}
55
56void set_evtchn(int port)
57{
58 struct shared_info *s = HYPERVISOR_shared_info;
59 sync_set_bit(port, BM(&s->evtchn_pending[0]));
60}
61
62int test_evtchn(int port)
63{
64 struct shared_info *s = HYPERVISOR_shared_info;
65 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
66}
67
68int test_and_set_mask(int port)
69{
70 struct shared_info *s = HYPERVISOR_shared_info;
71 return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
72}
73
74void mask_evtchn(int port)
75{
76 struct shared_info *s = HYPERVISOR_shared_info;
77 sync_set_bit(port, BM(&s->evtchn_mask[0]));
78}
79
80void unmask_evtchn(int port)
81{
82 struct shared_info *s = HYPERVISOR_shared_info;
83 unsigned int cpu = get_cpu();
84 int do_hypercall = 0, evtchn_pending = 0;
85
86 BUG_ON(!irqs_disabled());
87
88 if (unlikely((cpu != cpu_from_evtchn(port))))
89 do_hypercall = 1;
90 else {
91 /*
92 * Need to clear the mask before checking pending to
93 * avoid a race with an event becoming pending.
94 *
95 * EVTCHNOP_unmask will only trigger an upcall if the
96 * mask bit was set, so if a hypercall is needed
97 * remask the event.
98 */
99 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
100 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
101
102 if (unlikely(evtchn_pending && xen_hvm_domain())) {
103 sync_set_bit(port, BM(&s->evtchn_mask[0]));
104 do_hypercall = 1;
105 }
106 }
107
108 /* Slow path (hypercall) if this is a non-local port or if this is
109 * an hvm domain and an event is pending (hvm domains don't have
110 * their own implementation of irq_enable). */
111 if (do_hypercall) {
112 struct evtchn_unmask unmask = { .port = port };
113 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
114 } else {
115 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
116
117 /*
118 * The following is basically the equivalent of
119 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
120 * the interrupt edge' if the channel is masked.
121 */
122 if (evtchn_pending &&
123 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
124 BM(&vcpu_info->evtchn_pending_sel)))
125 vcpu_info->evtchn_upcall_pending = 1;
126 }
127
128 put_cpu();
129}
130
131static DEFINE_PER_CPU(unsigned int, current_word_idx);
132static DEFINE_PER_CPU(unsigned int, current_bit_idx);
133
134/*
135 * Mask out the i least significant bits of w
136 */
137#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
138
139static inline xen_ulong_t active_evtchns(unsigned int cpu,
140 struct shared_info *sh,
141 unsigned int idx)
142{
143 return sh->evtchn_pending[idx] &
144 per_cpu(cpu_evtchn_mask, cpu)[idx] &
145 ~sh->evtchn_mask[idx];
146}
147
148/*
149 * Search the CPU's pending events bitmasks. For each one found, map
150 * the event number to an irq, and feed it into do_IRQ() for handling.
151 *
152 * Xen uses a two-level bitmap to speed searching. The first level is
153 * a bitset of words which contain pending event bits. The second
154 * level is a bitset of pending events themselves.
155 */
156void xen_evtchn_handle_events(int cpu)
157{
158 int irq;
159 xen_ulong_t pending_words;
160 xen_ulong_t pending_bits;
161 int start_word_idx, start_bit_idx;
162 int word_idx, bit_idx;
163 int i;
164 struct irq_desc *desc;
165 struct shared_info *s = HYPERVISOR_shared_info;
166 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
167
168 /* Timer interrupt has highest priority. */
169 irq = irq_from_virq(cpu, VIRQ_TIMER);
170 if (irq != -1) {
171 unsigned int evtchn = evtchn_from_irq(irq);
172 word_idx = evtchn / BITS_PER_LONG;
173 bit_idx = evtchn % BITS_PER_LONG;
174 if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) {
175 desc = irq_to_desc(irq);
176 if (desc)
177 generic_handle_irq_desc(irq, desc);
178 }
179 }
180
181 /*
182 * Master flag must be cleared /before/ clearing
183 * selector flag. xchg_xen_ulong must contain an
184 * appropriate barrier.
185 */
186 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
187
188 start_word_idx = __this_cpu_read(current_word_idx);
189 start_bit_idx = __this_cpu_read(current_bit_idx);
190
191 word_idx = start_word_idx;
192
193 for (i = 0; pending_words != 0; i++) {
194 xen_ulong_t words;
195
196 words = MASK_LSBS(pending_words, word_idx);
197
198 /*
199 * If we masked out all events, wrap to beginning.
200 */
201 if (words == 0) {
202 word_idx = 0;
203 bit_idx = 0;
204 continue;
205 }
206 word_idx = EVTCHN_FIRST_BIT(words);
207
208 pending_bits = active_evtchns(cpu, s, word_idx);
209 bit_idx = 0; /* usually scan entire word from start */
210 /*
211 * We scan the starting word in two parts.
212 *
213 * 1st time: start in the middle, scanning the
214 * upper bits.
215 *
216 * 2nd time: scan the whole word (not just the
217 * parts skipped in the first pass) -- if an
218 * event in the previously scanned bits is
219 * pending again it would just be scanned on
220 * the next loop anyway.
221 */
222 if (word_idx == start_word_idx) {
223 if (i == 0)
224 bit_idx = start_bit_idx;
225 }
226
227 do {
228 xen_ulong_t bits;
229 int port;
230
231 bits = MASK_LSBS(pending_bits, bit_idx);
232
233 /* If we masked out all events, move on. */
234 if (bits == 0)
235 break;
236
237 bit_idx = EVTCHN_FIRST_BIT(bits);
238
239 /* Process port. */
240 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
241 irq = evtchn_to_irq[port];
242
243 if (irq != -1) {
244 desc = irq_to_desc(irq);
245 if (desc)
246 generic_handle_irq_desc(irq, desc);
247 }
248
249 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
250
251 /* Next caller starts at last processed + 1 */
252 __this_cpu_write(current_word_idx,
253 bit_idx ? word_idx :
254 (word_idx+1) % BITS_PER_EVTCHN_WORD);
255 __this_cpu_write(current_bit_idx, bit_idx);
256 } while (bit_idx != 0);
257
258 /* Scan start_l1i twice; all others once. */
259 if ((word_idx != start_word_idx) || (i != 0))
260 pending_words &= ~(1UL << word_idx);
261
262 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
263 }
264}
265
266irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
267{
268 struct shared_info *sh = HYPERVISOR_shared_info;
269 int cpu = smp_processor_id();
270 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
271 int i;
272 unsigned long flags;
273 static DEFINE_SPINLOCK(debug_lock);
274 struct vcpu_info *v;
275
276 spin_lock_irqsave(&debug_lock, flags);
277
278 printk("\nvcpu %d\n ", cpu);
279
280 for_each_online_cpu(i) {
281 int pending;
282 v = per_cpu(xen_vcpu, i);
283 pending = (get_irq_regs() && i == cpu)
284 ? xen_irqs_disabled(get_irq_regs())
285 : v->evtchn_upcall_mask;
286 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
287 pending, v->evtchn_upcall_pending,
288 (int)(sizeof(v->evtchn_pending_sel)*2),
289 v->evtchn_pending_sel);
290 }
291 v = per_cpu(xen_vcpu, cpu);
292
293 printk("\npending:\n ");
294 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
295 printk("%0*"PRI_xen_ulong"%s",
296 (int)sizeof(sh->evtchn_pending[0])*2,
297 sh->evtchn_pending[i],
298 i % 8 == 0 ? "\n " : " ");
299 printk("\nglobal mask:\n ");
300 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
301 printk("%0*"PRI_xen_ulong"%s",
302 (int)(sizeof(sh->evtchn_mask[0])*2),
303 sh->evtchn_mask[i],
304 i % 8 == 0 ? "\n " : " ");
305
306 printk("\nglobally unmasked:\n ");
307 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
308 printk("%0*"PRI_xen_ulong"%s",
309 (int)(sizeof(sh->evtchn_mask[0])*2),
310 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
311 i % 8 == 0 ? "\n " : " ");
312
313 printk("\nlocal cpu%d mask:\n ", cpu);
314 for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
315 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
316 cpu_evtchn[i],
317 i % 8 == 0 ? "\n " : " ");
318
319 printk("\nlocally unmasked:\n ");
320 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
321 xen_ulong_t pending = sh->evtchn_pending[i]
322 & ~sh->evtchn_mask[i]
323 & cpu_evtchn[i];
324 printk("%0*"PRI_xen_ulong"%s",
325 (int)(sizeof(sh->evtchn_mask[0])*2),
326 pending, i % 8 == 0 ? "\n " : " ");
327 }
328
329 printk("\npending list:\n");
330 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
331 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
332 int word_idx = i / BITS_PER_EVTCHN_WORD;
333 printk(" %d: event %d -> irq %d%s%s%s\n",
334 cpu_from_evtchn(i), i,
335 evtchn_to_irq[i],
336 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
337 ? "" : " l2-clear",
338 !sync_test_bit(i, BM(sh->evtchn_mask))
339 ? "" : " globally-masked",
340 sync_test_bit(i, BM(cpu_evtchn))
341 ? "" : " locally-masked");
342 }
343 }
344
345 spin_unlock_irqrestore(&debug_lock, flags);
346
347 return IRQ_HANDLED;
348}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index fec5da4ff3a0..8771b740e30f 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -59,6 +59,8 @@
59#include <xen/interface/vcpu.h> 59#include <xen/interface/vcpu.h>
60#include <asm/hw_irq.h> 60#include <asm/hw_irq.h>
61 61
62#include "events_internal.h"
63
62/* 64/*
63 * This lock protects updates to the following mapping and reference-count 65 * This lock protects updates to the following mapping and reference-count
64 * arrays. The lock does not need to be acquired to read the mapping tables. 66 * arrays. The lock does not need to be acquired to read the mapping tables.
@@ -73,72 +75,12 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
73/* IRQ <-> IPI mapping */ 75/* IRQ <-> IPI mapping */
74static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 76static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
75 77
76/* Interrupt types. */ 78int *evtchn_to_irq;
77enum xen_irq_type {
78 IRQT_UNBOUND = 0,
79 IRQT_PIRQ,
80 IRQT_VIRQ,
81 IRQT_IPI,
82 IRQT_EVTCHN
83};
84
85/*
86 * Packed IRQ information:
87 * type - enum xen_irq_type
88 * event channel - irq->event channel mapping
89 * cpu - cpu this event channel is bound to
90 * index - type-specific information:
91 * PIRQ - physical IRQ, GSI, flags, and owner domain
92 * VIRQ - virq number
93 * IPI - IPI vector
94 * EVTCHN -
95 */
96struct irq_info {
97 struct list_head list;
98 int refcnt;
99 enum xen_irq_type type; /* type */
100 unsigned irq;
101 unsigned short evtchn; /* event channel */
102 unsigned short cpu; /* cpu bound */
103
104 union {
105 unsigned short virq;
106 enum ipi_vector ipi;
107 struct {
108 unsigned short pirq;
109 unsigned short gsi;
110 unsigned char flags;
111 uint16_t domid;
112 } pirq;
113 } u;
114};
115#define PIRQ_NEEDS_EOI (1 << 0)
116#define PIRQ_SHAREABLE (1 << 1)
117
118static int *evtchn_to_irq;
119#ifdef CONFIG_X86 79#ifdef CONFIG_X86
120static unsigned long *pirq_eoi_map; 80static unsigned long *pirq_eoi_map;
121#endif 81#endif
122static bool (*pirq_needs_eoi)(unsigned irq); 82static bool (*pirq_needs_eoi)(unsigned irq);
123 83
124/*
125 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
126 * careful to only use bitops which allow for this (e.g
127 * test_bit/find_first_bit and friends but not __ffs) and to pass
128 * BITS_PER_EVTCHN_WORD as the bitmask length.
129 */
130#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
131/*
132 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
133 * array. Primarily to avoid long lines (hence the terse name).
134 */
135#define BM(x) (unsigned long *)(x)
136/* Find the first set bit in a evtchn mask */
137#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
138
139static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
140 cpu_evtchn_mask);
141
142/* Xen will never allocate port zero for any purpose. */ 84/* Xen will never allocate port zero for any purpose. */
143#define VALID_EVTCHN(chn) ((chn) != 0) 85#define VALID_EVTCHN(chn) ((chn) != 0)
144 86
@@ -149,7 +91,7 @@ static void enable_dynirq(struct irq_data *data);
149static void disable_dynirq(struct irq_data *data); 91static void disable_dynirq(struct irq_data *data);
150 92
151/* Get info for IRQ */ 93/* Get info for IRQ */
152static struct irq_info *info_for_irq(unsigned irq) 94struct irq_info *info_for_irq(unsigned irq)
153{ 95{
154 return irq_get_handler_data(irq); 96 return irq_get_handler_data(irq);
155} 97}
@@ -230,7 +172,7 @@ static void xen_irq_info_pirq_init(unsigned irq,
230/* 172/*
231 * Accessors for packed IRQ information. 173 * Accessors for packed IRQ information.
232 */ 174 */
233static unsigned int evtchn_from_irq(unsigned irq) 175unsigned int evtchn_from_irq(unsigned irq)
234{ 176{
235 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) 177 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
236 return 0; 178 return 0;
@@ -244,6 +186,11 @@ unsigned irq_from_evtchn(unsigned int evtchn)
244} 186}
245EXPORT_SYMBOL_GPL(irq_from_evtchn); 187EXPORT_SYMBOL_GPL(irq_from_evtchn);
246 188
189int irq_from_virq(unsigned int cpu, unsigned int virq)
190{
191 return per_cpu(virq_to_irq, cpu)[virq];
192}
193
247static enum ipi_vector ipi_from_irq(unsigned irq) 194static enum ipi_vector ipi_from_irq(unsigned irq)
248{ 195{
249 struct irq_info *info = info_for_irq(irq); 196 struct irq_info *info = info_for_irq(irq);
@@ -279,12 +226,12 @@ static enum xen_irq_type type_from_irq(unsigned irq)
279 return info_for_irq(irq)->type; 226 return info_for_irq(irq)->type;
280} 227}
281 228
282static unsigned cpu_from_irq(unsigned irq) 229unsigned cpu_from_irq(unsigned irq)
283{ 230{
284 return info_for_irq(irq)->cpu; 231 return info_for_irq(irq)->cpu;
285} 232}
286 233
287static unsigned int cpu_from_evtchn(unsigned int evtchn) 234unsigned int cpu_from_evtchn(unsigned int evtchn)
288{ 235{
289 int irq = evtchn_to_irq[evtchn]; 236 int irq = evtchn_to_irq[evtchn];
290 unsigned ret = 0; 237 unsigned ret = 0;
@@ -310,55 +257,21 @@ static bool pirq_needs_eoi_flag(unsigned irq)
310 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 257 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
311} 258}
312 259
313static inline xen_ulong_t active_evtchns(unsigned int cpu,
314 struct shared_info *sh,
315 unsigned int idx)
316{
317 return sh->evtchn_pending[idx] &
318 per_cpu(cpu_evtchn_mask, cpu)[idx] &
319 ~sh->evtchn_mask[idx];
320}
321
322static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 260static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
323{ 261{
324 int irq = evtchn_to_irq[chn]; 262 int irq = evtchn_to_irq[chn];
263 struct irq_info *info = info_for_irq(irq);
325 264
326 BUG_ON(irq == -1); 265 BUG_ON(irq == -1);
327#ifdef CONFIG_SMP 266#ifdef CONFIG_SMP
328 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); 267 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
329#endif 268#endif
330 269
331 clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)))); 270 xen_evtchn_port_bind_to_cpu(info, cpu);
332 set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
333 271
334 info_for_irq(irq)->cpu = cpu; 272 info->cpu = cpu;
335}
336
337static inline void clear_evtchn(int port)
338{
339 struct shared_info *s = HYPERVISOR_shared_info;
340 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
341}
342
343static inline void set_evtchn(int port)
344{
345 struct shared_info *s = HYPERVISOR_shared_info;
346 sync_set_bit(port, BM(&s->evtchn_pending[0]));
347}
348
349static inline int test_evtchn(int port)
350{
351 struct shared_info *s = HYPERVISOR_shared_info;
352 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
353}
354
355static inline int test_and_set_mask(int port)
356{
357 struct shared_info *s = HYPERVISOR_shared_info;
358 return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
359} 273}
360 274
361
362/** 275/**
363 * notify_remote_via_irq - send event to remote end of event channel via irq 276 * notify_remote_via_irq - send event to remote end of event channel via irq
364 * @irq: irq of event channel to send event to 277 * @irq: irq of event channel to send event to
@@ -376,63 +289,6 @@ void notify_remote_via_irq(int irq)
376} 289}
377EXPORT_SYMBOL_GPL(notify_remote_via_irq); 290EXPORT_SYMBOL_GPL(notify_remote_via_irq);
378 291
379static void mask_evtchn(int port)
380{
381 struct shared_info *s = HYPERVISOR_shared_info;
382 sync_set_bit(port, BM(&s->evtchn_mask[0]));
383}
384
385static void unmask_evtchn(int port)
386{
387 struct shared_info *s = HYPERVISOR_shared_info;
388 unsigned int cpu = get_cpu();
389 int do_hypercall = 0, evtchn_pending = 0;
390
391 BUG_ON(!irqs_disabled());
392
393 if (unlikely((cpu != cpu_from_evtchn(port))))
394 do_hypercall = 1;
395 else {
396 /*
397 * Need to clear the mask before checking pending to
398 * avoid a race with an event becoming pending.
399 *
400 * EVTCHNOP_unmask will only trigger an upcall if the
401 * mask bit was set, so if a hypercall is needed
402 * remask the event.
403 */
404 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
405 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
406
407 if (unlikely(evtchn_pending && xen_hvm_domain())) {
408 sync_set_bit(port, BM(&s->evtchn_mask[0]));
409 do_hypercall = 1;
410 }
411 }
412
413 /* Slow path (hypercall) if this is a non-local port or if this is
414 * an hvm domain and an event is pending (hvm domains don't have
415 * their own implementation of irq_enable). */
416 if (do_hypercall) {
417 struct evtchn_unmask unmask = { .port = port };
418 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
419 } else {
420 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
421
422 /*
423 * The following is basically the equivalent of
424 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
425 * the interrupt edge' if the channel is masked.
426 */
427 if (evtchn_pending &&
428 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
429 BM(&vcpu_info->evtchn_pending_sel)))
430 vcpu_info->evtchn_upcall_pending = 1;
431 }
432
433 put_cpu();
434}
435
436static void xen_irq_init(unsigned irq) 292static void xen_irq_init(unsigned irq)
437{ 293{
438 struct irq_info *info; 294 struct irq_info *info;
@@ -1216,222 +1072,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1216 notify_remote_via_irq(irq); 1072 notify_remote_via_irq(irq);
1217} 1073}
1218 1074
1219irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1220{
1221 struct shared_info *sh = HYPERVISOR_shared_info;
1222 int cpu = smp_processor_id();
1223 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1224 int i;
1225 unsigned long flags;
1226 static DEFINE_SPINLOCK(debug_lock);
1227 struct vcpu_info *v;
1228
1229 spin_lock_irqsave(&debug_lock, flags);
1230
1231 printk("\nvcpu %d\n ", cpu);
1232
1233 for_each_online_cpu(i) {
1234 int pending;
1235 v = per_cpu(xen_vcpu, i);
1236 pending = (get_irq_regs() && i == cpu)
1237 ? xen_irqs_disabled(get_irq_regs())
1238 : v->evtchn_upcall_mask;
1239 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
1240 pending, v->evtchn_upcall_pending,
1241 (int)(sizeof(v->evtchn_pending_sel)*2),
1242 v->evtchn_pending_sel);
1243 }
1244 v = per_cpu(xen_vcpu, cpu);
1245
1246 printk("\npending:\n ");
1247 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1248 printk("%0*"PRI_xen_ulong"%s",
1249 (int)sizeof(sh->evtchn_pending[0])*2,
1250 sh->evtchn_pending[i],
1251 i % 8 == 0 ? "\n " : " ");
1252 printk("\nglobal mask:\n ");
1253 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1254 printk("%0*"PRI_xen_ulong"%s",
1255 (int)(sizeof(sh->evtchn_mask[0])*2),
1256 sh->evtchn_mask[i],
1257 i % 8 == 0 ? "\n " : " ");
1258
1259 printk("\nglobally unmasked:\n ");
1260 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1261 printk("%0*"PRI_xen_ulong"%s",
1262 (int)(sizeof(sh->evtchn_mask[0])*2),
1263 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1264 i % 8 == 0 ? "\n " : " ");
1265
1266 printk("\nlocal cpu%d mask:\n ", cpu);
1267 for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
1268 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
1269 cpu_evtchn[i],
1270 i % 8 == 0 ? "\n " : " ");
1271
1272 printk("\nlocally unmasked:\n ");
1273 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1274 xen_ulong_t pending = sh->evtchn_pending[i]
1275 & ~sh->evtchn_mask[i]
1276 & cpu_evtchn[i];
1277 printk("%0*"PRI_xen_ulong"%s",
1278 (int)(sizeof(sh->evtchn_mask[0])*2),
1279 pending, i % 8 == 0 ? "\n " : " ");
1280 }
1281
1282 printk("\npending list:\n");
1283 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1284 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
1285 int word_idx = i / BITS_PER_EVTCHN_WORD;
1286 printk(" %d: event %d -> irq %d%s%s%s\n",
1287 cpu_from_evtchn(i), i,
1288 evtchn_to_irq[i],
1289 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
1290 ? "" : " l2-clear",
1291 !sync_test_bit(i, BM(sh->evtchn_mask))
1292 ? "" : " globally-masked",
1293 sync_test_bit(i, BM(cpu_evtchn))
1294 ? "" : " locally-masked");
1295 }
1296 }
1297
1298 spin_unlock_irqrestore(&debug_lock, flags);
1299
1300 return IRQ_HANDLED;
1301}
1302
1303static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1075static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1304static DEFINE_PER_CPU(unsigned int, current_word_idx);
1305static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1306 1076
1307/*
1308 * Mask out the i least significant bits of w
1309 */
1310#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
1311
1312/*
1313 * Search the CPUs pending events bitmasks. For each one found, map
1314 * the event number to an irq, and feed it into do_IRQ() for
1315 * handling.
1316 *
1317 * Xen uses a two-level bitmap to speed searching. The first level is
1318 * a bitset of words which contain pending event bits. The second
1319 * level is a bitset of pending events themselves.
1320 */
1321static void __xen_evtchn_do_upcall(void) 1077static void __xen_evtchn_do_upcall(void)
1322{ 1078{
1323 int start_word_idx, start_bit_idx;
1324 int word_idx, bit_idx;
1325 int i, irq;
1326 int cpu = get_cpu();
1327 struct shared_info *s = HYPERVISOR_shared_info;
1328 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1079 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1080 int cpu = get_cpu();
1329 unsigned count; 1081 unsigned count;
1330 1082
1331 do { 1083 do {
1332 xen_ulong_t pending_words;
1333 xen_ulong_t pending_bits;
1334 struct irq_desc *desc;
1335
1336 vcpu_info->evtchn_upcall_pending = 0; 1084 vcpu_info->evtchn_upcall_pending = 0;
1337 1085
1338 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1086 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1339 goto out; 1087 goto out;
1340 1088
1341 /* 1089 xen_evtchn_handle_events(cpu);
1342 * Master flag must be cleared /before/ clearing
1343 * selector flag. xchg_xen_ulong must contain an
1344 * appropriate barrier.
1345 */
1346 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1347 int evtchn = evtchn_from_irq(irq);
1348 word_idx = evtchn / BITS_PER_LONG;
1349 pending_bits = evtchn % BITS_PER_LONG;
1350 if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1351 desc = irq_to_desc(irq);
1352 if (desc)
1353 generic_handle_irq_desc(irq, desc);
1354 }
1355 }
1356
1357 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1358
1359 start_word_idx = __this_cpu_read(current_word_idx);
1360 start_bit_idx = __this_cpu_read(current_bit_idx);
1361
1362 word_idx = start_word_idx;
1363
1364 for (i = 0; pending_words != 0; i++) {
1365 xen_ulong_t words;
1366
1367 words = MASK_LSBS(pending_words, word_idx);
1368
1369 /*
1370 * If we masked out all events, wrap to beginning.
1371 */
1372 if (words == 0) {
1373 word_idx = 0;
1374 bit_idx = 0;
1375 continue;
1376 }
1377 word_idx = EVTCHN_FIRST_BIT(words);
1378
1379 pending_bits = active_evtchns(cpu, s, word_idx);
1380 bit_idx = 0; /* usually scan entire word from start */
1381 /*
1382 * We scan the starting word in two parts.
1383 *
1384 * 1st time: start in the middle, scanning the
1385 * upper bits.
1386 *
1387 * 2nd time: scan the whole word (not just the
1388 * parts skipped in the first pass) -- if an
1389 * event in the previously scanned bits is
1390 * pending again it would just be scanned on
1391 * the next loop anyway.
1392 */
1393 if (word_idx == start_word_idx) {
1394 if (i == 0)
1395 bit_idx = start_bit_idx;
1396 }
1397
1398 do {
1399 xen_ulong_t bits;
1400 int port;
1401
1402 bits = MASK_LSBS(pending_bits, bit_idx);
1403
1404 /* If we masked out all events, move on. */
1405 if (bits == 0)
1406 break;
1407
1408 bit_idx = EVTCHN_FIRST_BIT(bits);
1409
1410 /* Process port. */
1411 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
1412 irq = evtchn_to_irq[port];
1413
1414 if (irq != -1) {
1415 desc = irq_to_desc(irq);
1416 if (desc)
1417 generic_handle_irq_desc(irq, desc);
1418 }
1419
1420 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
1421
1422 /* Next caller starts at last processed + 1 */
1423 __this_cpu_write(current_word_idx,
1424 bit_idx ? word_idx :
1425 (word_idx+1) % BITS_PER_EVTCHN_WORD);
1426 __this_cpu_write(current_bit_idx, bit_idx);
1427 } while (bit_idx != 0);
1428
1429 /* Scan start_l1i twice; all others once. */
1430 if ((word_idx != start_word_idx) || (i != 0))
1431 pending_words &= ~(1UL << word_idx);
1432
1433 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
1434 }
1435 1090
1436 BUG_ON(!irqs_disabled()); 1091 BUG_ON(!irqs_disabled());
1437 1092
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
new file mode 100644
index 000000000000..79ac70bbbd26
--- /dev/null
+++ b/drivers/xen/events/events_internal.h
@@ -0,0 +1,74 @@
1/*
2 * Xen Event Channels (internal header)
3 *
4 * Copyright (C) 2013 Citrix Systems R&D Ltd.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2 or later. See the file COPYING for more details.
8 */
9#ifndef __EVENTS_INTERNAL_H__
10#define __EVENTS_INTERNAL_H__
11
12/* Interrupt types. */
13enum xen_irq_type {
14 IRQT_UNBOUND = 0,
15 IRQT_PIRQ,
16 IRQT_VIRQ,
17 IRQT_IPI,
18 IRQT_EVTCHN
19};
20
21/*
22 * Packed IRQ information:
23 * type - enum xen_irq_type
24 * event channel - irq->event channel mapping
25 * cpu - cpu this event channel is bound to
26 * index - type-specific information:
27 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
28 * guest, or GSI (real passthrough IRQ) of the device.
29 * VIRQ - virq number
30 * IPI - IPI vector
31 * EVTCHN -
32 */
33struct irq_info {
34 struct list_head list;
35 int refcnt;
36 enum xen_irq_type type; /* type */
37 unsigned irq;
38 unsigned short evtchn; /* event channel */
39 unsigned short cpu; /* cpu bound */
40
41 union {
42 unsigned short virq;
43 enum ipi_vector ipi;
44 struct {
45 unsigned short pirq;
46 unsigned short gsi;
47 unsigned char vector;
48 unsigned char flags;
49 uint16_t domid;
50 } pirq;
51 } u;
52};
53
54#define PIRQ_NEEDS_EOI (1 << 0)
55#define PIRQ_SHAREABLE (1 << 1)
56
57extern int *evtchn_to_irq;
58
59struct irq_info *info_for_irq(unsigned irq);
60unsigned cpu_from_irq(unsigned irq);
61unsigned cpu_from_evtchn(unsigned int evtchn);
62
63void xen_evtchn_port_bind_to_cpu(struct irq_info *info, int cpu);
64
65void clear_evtchn(int port);
66void set_evtchn(int port);
67int test_evtchn(int port);
68int test_and_set_mask(int port);
69void mask_evtchn(int port);
70void unmask_evtchn(int port);
71
72void xen_evtchn_handle_events(int cpu);
73
74#endif /* #ifndef __EVENTS_INTERNAL_H__ */