diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-09-18 19:31:22 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-09-18 20:13:14 -0400 |
commit | e3cc067b0a79d3a3672bfe7cfba12f2e8ae56039 (patch) | |
tree | 645b3eed7f7a4e426a731a5cc4f906834d0e6189 | |
parent | 74fca6a42863ffacaf7ba6f1936a9f228950f657 (diff) |
xen/evtchn: track enabled state for each port
enable/disable_irq() complain if the enables/disables are unbalanced,
so keep track of the state and avoid redundant enables.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r-- | drivers/xen/evtchn.c | 71 |
1 files changed, 56 insertions, 15 deletions
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index af031950f9b1..4356a9a030df 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -69,10 +69,36 @@ struct per_user_data { | |||
69 | const char *name; | 69 | const char *name; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Who's bound to each port? */ | 72 | /* |
73 | static struct per_user_data *port_user[NR_EVENT_CHANNELS]; | 73 | * Who's bound to each port? This is logically an array of struct |
74 | * per_user_data *, but we encode the current enabled-state in bit 0. | ||
75 | */ | ||
76 | static unsigned long port_user[NR_EVENT_CHANNELS]; | ||
74 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ | 77 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ |
75 | 78 | ||
79 | static inline struct per_user_data *get_port_user(unsigned port) | ||
80 | { | ||
81 | return (struct per_user_data *)(port_user[port] & ~1); | ||
82 | } | ||
83 | |||
84 | static inline void set_port_user(unsigned port, struct per_user_data *u) | ||
85 | { | ||
86 | port_user[port] = (unsigned long)u; | ||
87 | } | ||
88 | |||
89 | static inline bool get_port_enabled(unsigned port) | ||
90 | { | ||
91 | return port_user[port] & 1; | ||
92 | } | ||
93 | |||
94 | static inline void set_port_enabled(unsigned port, bool enabled) | ||
95 | { | ||
96 | if (enabled) | ||
97 | port_user[port] |= 1; | ||
98 | else | ||
99 | port_user[port] &= ~1; | ||
100 | } | ||
101 | |||
76 | irqreturn_t evtchn_interrupt(int irq, void *data) | 102 | irqreturn_t evtchn_interrupt(int irq, void *data) |
77 | { | 103 | { |
78 | unsigned int port = (unsigned long)data; | 104 | unsigned int port = (unsigned long)data; |
@@ -80,9 +106,15 @@ irqreturn_t evtchn_interrupt(int irq, void *data) | |||
80 | 106 | ||
81 | spin_lock(&port_user_lock); | 107 | spin_lock(&port_user_lock); |
82 | 108 | ||
83 | u = port_user[port]; | 109 | u = get_port_user(port); |
110 | |||
111 | if (WARN(!get_port_enabled(port), | ||
112 | "Interrupt for port %d, but apparently not enabled; per-user %p\n", | ||
113 | port, u)) | ||
114 | goto out; | ||
84 | 115 | ||
85 | disable_irq_nosync(irq); | 116 | disable_irq_nosync(irq); |
117 | set_port_enabled(port, false); | ||
86 | 118 | ||
87 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | 119 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { |
88 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; | 120 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; |
@@ -92,10 +124,10 @@ irqreturn_t evtchn_interrupt(int irq, void *data) | |||
92 | kill_fasync(&u->evtchn_async_queue, | 124 | kill_fasync(&u->evtchn_async_queue, |
93 | SIGIO, POLL_IN); | 125 | SIGIO, POLL_IN); |
94 | } | 126 | } |
95 | } else { | 127 | } else |
96 | u->ring_overflow = 1; | 128 | u->ring_overflow = 1; |
97 | } | ||
98 | 129 | ||
130 | out: | ||
99 | spin_unlock(&port_user_lock); | 131 | spin_unlock(&port_user_lock); |
100 | 132 | ||
101 | return IRQ_HANDLED; | 133 | return IRQ_HANDLED; |
@@ -198,9 +230,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, | |||
198 | goto out; | 230 | goto out; |
199 | 231 | ||
200 | spin_lock_irq(&port_user_lock); | 232 | spin_lock_irq(&port_user_lock); |
201 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) | 233 | |
202 | if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) | 234 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { |
203 | enable_irq(irq_from_evtchn(kbuf[i])); | 235 | unsigned port = kbuf[i]; |
236 | |||
237 | if (port < NR_EVENT_CHANNELS && | ||
238 | get_port_user(port) == u && | ||
239 | !get_port_enabled(port)) { | ||
240 | set_port_enabled(port, true); | ||
241 | enable_irq(irq_from_evtchn(port)); | ||
242 | } | ||
243 | } | ||
244 | |||
204 | spin_unlock_irq(&port_user_lock); | 245 | spin_unlock_irq(&port_user_lock); |
205 | 246 | ||
206 | rc = count; | 247 | rc = count; |
@@ -222,8 +263,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
222 | * interrupt handler yet, and our caller has already | 263 | * interrupt handler yet, and our caller has already |
223 | * serialized bind operations.) | 264 | * serialized bind operations.) |
224 | */ | 265 | */ |
225 | BUG_ON(port_user[port] != NULL); | 266 | BUG_ON(get_port_user(port) != NULL); |
226 | port_user[port] = u; | 267 | set_port_user(port, u); |
227 | 268 | ||
228 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | 269 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, |
229 | u->name, (void *)(unsigned long)port); | 270 | u->name, (void *)(unsigned long)port); |
@@ -242,7 +283,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) | |||
242 | /* make sure we unbind the irq handler before clearing the port */ | 283 | /* make sure we unbind the irq handler before clearing the port */ |
243 | barrier(); | 284 | barrier(); |
244 | 285 | ||
245 | port_user[port] = NULL; | 286 | set_port_user(port, NULL); |
246 | } | 287 | } |
247 | 288 | ||
248 | static long evtchn_ioctl(struct file *file, | 289 | static long evtchn_ioctl(struct file *file, |
@@ -333,7 +374,7 @@ static long evtchn_ioctl(struct file *file, | |||
333 | spin_lock_irq(&port_user_lock); | 374 | spin_lock_irq(&port_user_lock); |
334 | 375 | ||
335 | rc = -ENOTCONN; | 376 | rc = -ENOTCONN; |
336 | if (port_user[unbind.port] != u) { | 377 | if (get_port_user(unbind.port) != u) { |
337 | spin_unlock_irq(&port_user_lock); | 378 | spin_unlock_irq(&port_user_lock); |
338 | break; | 379 | break; |
339 | } | 380 | } |
@@ -355,7 +396,7 @@ static long evtchn_ioctl(struct file *file, | |||
355 | 396 | ||
356 | if (notify.port >= NR_EVENT_CHANNELS) { | 397 | if (notify.port >= NR_EVENT_CHANNELS) { |
357 | rc = -EINVAL; | 398 | rc = -EINVAL; |
358 | } else if (port_user[notify.port] != u) { | 399 | } else if (get_port_user(notify.port) != u) { |
359 | rc = -ENOTCONN; | 400 | rc = -ENOTCONN; |
360 | } else { | 401 | } else { |
361 | notify_remote_via_evtchn(notify.port); | 402 | notify_remote_via_evtchn(notify.port); |
@@ -444,10 +485,10 @@ static int evtchn_release(struct inode *inode, struct file *filp) | |||
444 | free_page((unsigned long)u->ring); | 485 | free_page((unsigned long)u->ring); |
445 | 486 | ||
446 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | 487 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
447 | if (port_user[i] != u) | 488 | if (get_port_user(i) != u) |
448 | continue; | 489 | continue; |
449 | 490 | ||
450 | evtchn_unbind_from_user(port_user[i], i); | 491 | evtchn_unbind_from_user(get_port_user(i), i); |
451 | } | 492 | } |
452 | 493 | ||
453 | spin_unlock_irq(&port_user_lock); | 494 | spin_unlock_irq(&port_user_lock); |