diff options
author | Wei Liu <wei.liu2@citrix.com> | 2013-05-22 02:34:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-05-23 21:40:37 -0400 |
commit | e1f00a69ec26e3eb9847c61c665b8fb3f0c6b477 (patch) | |
tree | 5958617283d231a3a58229e0e78a4d4623a569a5 /drivers/net/xen-netback/interface.c | |
parent | 8892475386e819aa50856947948c546ccc964d96 (diff) |
xen-netback: split event channels support for Xen backend driver
Netback and netfront only use one event channel to do TX / RX notification,
which may cause unnecessary wake-up of processing routines. This patch adds a
new feature called feature-split-event-channels to netback, enabling it to
handle TX and RX events separately.
Netback will use tx_irq to notify guest for TX completion, rx_irq for RX
notification.
If frontend doesn't support this feature, tx_irq equals to rx_irq.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 85 |
1 files changed, 70 insertions, 15 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 82202c2b1bd1..087d2db0389d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif) | |||
60 | return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); | 60 | return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); |
61 | } | 61 | } |
62 | 62 | ||
63 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | 63 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
64 | { | 64 | { |
65 | struct xenvif *vif = dev_id; | 65 | struct xenvif *vif = dev_id; |
66 | 66 | ||
67 | if (vif->netbk == NULL) | 67 | if (vif->netbk == NULL) |
68 | return IRQ_NONE; | 68 | return IRQ_HANDLED; |
69 | 69 | ||
70 | xen_netbk_schedule_xenvif(vif); | 70 | xen_netbk_schedule_xenvif(vif); |
71 | 71 | ||
72 | return IRQ_HANDLED; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | ||
76 | { | ||
77 | struct xenvif *vif = dev_id; | ||
78 | |||
79 | if (vif->netbk == NULL) | ||
80 | return IRQ_HANDLED; | ||
81 | |||
72 | if (xenvif_rx_schedulable(vif)) | 82 | if (xenvif_rx_schedulable(vif)) |
73 | netif_wake_queue(vif->dev); | 83 | netif_wake_queue(vif->dev); |
74 | 84 | ||
75 | return IRQ_HANDLED; | 85 | return IRQ_HANDLED; |
76 | } | 86 | } |
77 | 87 | ||
88 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | ||
89 | { | ||
90 | xenvif_tx_interrupt(irq, dev_id); | ||
91 | xenvif_rx_interrupt(irq, dev_id); | ||
92 | |||
93 | return IRQ_HANDLED; | ||
94 | } | ||
95 | |||
78 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 96 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
79 | { | 97 | { |
80 | struct xenvif *vif = netdev_priv(dev); | 98 | struct xenvif *vif = netdev_priv(dev); |
@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | |||
125 | static void xenvif_up(struct xenvif *vif) | 143 | static void xenvif_up(struct xenvif *vif) |
126 | { | 144 | { |
127 | xen_netbk_add_xenvif(vif); | 145 | xen_netbk_add_xenvif(vif); |
128 | enable_irq(vif->irq); | 146 | enable_irq(vif->tx_irq); |
147 | if (vif->tx_irq != vif->rx_irq) | ||
148 | enable_irq(vif->rx_irq); | ||
129 | xen_netbk_check_rx_xenvif(vif); | 149 | xen_netbk_check_rx_xenvif(vif); |
130 | } | 150 | } |
131 | 151 | ||
132 | static void xenvif_down(struct xenvif *vif) | 152 | static void xenvif_down(struct xenvif *vif) |
133 | { | 153 | { |
134 | disable_irq(vif->irq); | 154 | disable_irq(vif->tx_irq); |
155 | if (vif->tx_irq != vif->rx_irq) | ||
156 | disable_irq(vif->rx_irq); | ||
135 | del_timer_sync(&vif->credit_timeout); | 157 | del_timer_sync(&vif->credit_timeout); |
136 | xen_netbk_deschedule_xenvif(vif); | 158 | xen_netbk_deschedule_xenvif(vif); |
137 | xen_netbk_remove_xenvif(vif); | 159 | xen_netbk_remove_xenvif(vif); |
@@ -308,12 +330,13 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
308 | } | 330 | } |
309 | 331 | ||
310 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | 332 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, |
311 | unsigned long rx_ring_ref, unsigned int evtchn) | 333 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
334 | unsigned int rx_evtchn) | ||
312 | { | 335 | { |
313 | int err = -ENOMEM; | 336 | int err = -ENOMEM; |
314 | 337 | ||
315 | /* Already connected through? */ | 338 | /* Already connected through? */ |
316 | if (vif->irq) | 339 | if (vif->tx_irq) |
317 | return 0; | 340 | return 0; |
318 | 341 | ||
319 | __module_get(THIS_MODULE); | 342 | __module_get(THIS_MODULE); |
@@ -322,13 +345,37 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
322 | if (err < 0) | 345 | if (err < 0) |
323 | goto err; | 346 | goto err; |
324 | 347 | ||
325 | err = bind_interdomain_evtchn_to_irqhandler( | 348 | if (tx_evtchn == rx_evtchn) { |
326 | vif->domid, evtchn, xenvif_interrupt, 0, | 349 | /* feature-split-event-channels == 0 */ |
327 | vif->dev->name, vif); | 350 | err = bind_interdomain_evtchn_to_irqhandler( |
328 | if (err < 0) | 351 | vif->domid, tx_evtchn, xenvif_interrupt, 0, |
329 | goto err_unmap; | 352 | vif->dev->name, vif); |
330 | vif->irq = err; | 353 | if (err < 0) |
331 | disable_irq(vif->irq); | 354 | goto err_unmap; |
355 | vif->tx_irq = vif->rx_irq = err; | ||
356 | disable_irq(vif->tx_irq); | ||
357 | } else { | ||
358 | /* feature-split-event-channels == 1 */ | ||
359 | snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), | ||
360 | "%s-tx", vif->dev->name); | ||
361 | err = bind_interdomain_evtchn_to_irqhandler( | ||
362 | vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, | ||
363 | vif->tx_irq_name, vif); | ||
364 | if (err < 0) | ||
365 | goto err_unmap; | ||
366 | vif->tx_irq = err; | ||
367 | disable_irq(vif->tx_irq); | ||
368 | |||
369 | snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), | ||
370 | "%s-rx", vif->dev->name); | ||
371 | err = bind_interdomain_evtchn_to_irqhandler( | ||
372 | vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, | ||
373 | vif->rx_irq_name, vif); | ||
374 | if (err < 0) | ||
375 | goto err_tx_unbind; | ||
376 | vif->rx_irq = err; | ||
377 | disable_irq(vif->rx_irq); | ||
378 | } | ||
332 | 379 | ||
333 | xenvif_get(vif); | 380 | xenvif_get(vif); |
334 | 381 | ||
@@ -342,6 +389,9 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
342 | rtnl_unlock(); | 389 | rtnl_unlock(); |
343 | 390 | ||
344 | return 0; | 391 | return 0; |
392 | err_tx_unbind: | ||
393 | unbind_from_irqhandler(vif->tx_irq, vif); | ||
394 | vif->tx_irq = 0; | ||
345 | err_unmap: | 395 | err_unmap: |
346 | xen_netbk_unmap_frontend_rings(vif); | 396 | xen_netbk_unmap_frontend_rings(vif); |
347 | err: | 397 | err: |
@@ -375,8 +425,13 @@ void xenvif_disconnect(struct xenvif *vif) | |||
375 | atomic_dec(&vif->refcnt); | 425 | atomic_dec(&vif->refcnt); |
376 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); | 426 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); |
377 | 427 | ||
378 | if (vif->irq) { | 428 | if (vif->tx_irq) { |
379 | unbind_from_irqhandler(vif->irq, vif); | 429 | if (vif->tx_irq == vif->rx_irq) |
430 | unbind_from_irqhandler(vif->tx_irq, vif); | ||
431 | else { | ||
432 | unbind_from_irqhandler(vif->tx_irq, vif); | ||
433 | unbind_from_irqhandler(vif->rx_irq, vif); | ||
434 | } | ||
380 | /* vif->irq is valid, we had a module_get in | 435 | /* vif->irq is valid, we had a module_get in |
381 | * xenvif_connect. | 436 | * xenvif_connect. |
382 | */ | 437 | */ |