aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c219
1 files changed, 163 insertions, 56 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d98414168485..01bb854c7f62 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
30 30
31#include "common.h" 31#include "common.h"
32 32
33#include <linux/kthread.h>
33#include <linux/ethtool.h> 34#include <linux/ethtool.h>
34#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
35#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
@@ -38,50 +39,93 @@
38#include <asm/xen/hypercall.h> 39#include <asm/xen/hypercall.h>
39 40
40#define XENVIF_QUEUE_LENGTH 32 41#define XENVIF_QUEUE_LENGTH 32
42#define XENVIF_NAPI_WEIGHT 64
41 43
42void xenvif_get(struct xenvif *vif) 44int xenvif_schedulable(struct xenvif *vif)
43{ 45{
44 atomic_inc(&vif->refcnt); 46 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
45} 47}
46 48
47void xenvif_put(struct xenvif *vif) 49static int xenvif_rx_schedulable(struct xenvif *vif)
48{ 50{
49 if (atomic_dec_and_test(&vif->refcnt)) 51 return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
50 wake_up(&vif->waiting_to_free);
51} 52}
52 53
53int xenvif_schedulable(struct xenvif *vif) 54static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
54{ 55{
55 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 56 struct xenvif *vif = dev_id;
57
58 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
59 napi_schedule(&vif->napi);
60
61 return IRQ_HANDLED;
56} 62}
57 63
58static int xenvif_rx_schedulable(struct xenvif *vif) 64static int xenvif_poll(struct napi_struct *napi, int budget)
59{ 65{
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); 66 struct xenvif *vif = container_of(napi, struct xenvif, napi);
67 int work_done;
68
69 work_done = xenvif_tx_action(vif, budget);
70
71 if (work_done < budget) {
72 int more_to_do = 0;
73 unsigned long flags;
74
75 /* It is necessary to disable IRQ before calling
76 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
77 * lose event from the frontend.
78 *
79 * Consider:
80 * RING_HAS_UNCONSUMED_REQUESTS
81 * <frontend generates event to trigger napi_schedule>
82 * __napi_complete
83 *
84 * This handler is still in scheduled state so the
85 * event has no effect at all. After __napi_complete
86 * this handler is descheduled and cannot get
87 * scheduled again. We lose event in this case and the ring
88 * will be completely stalled.
89 */
90
91 local_irq_save(flags);
92
93 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
94 if (!more_to_do)
95 __napi_complete(napi);
96
97 local_irq_restore(flags);
98 }
99
100 return work_done;
61} 101}
62 102
63static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 103static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
64{ 104{
65 struct xenvif *vif = dev_id; 105 struct xenvif *vif = dev_id;
66 106
67 if (vif->netbk == NULL)
68 return IRQ_NONE;
69
70 xen_netbk_schedule_xenvif(vif);
71
72 if (xenvif_rx_schedulable(vif)) 107 if (xenvif_rx_schedulable(vif))
73 netif_wake_queue(vif->dev); 108 netif_wake_queue(vif->dev);
74 109
75 return IRQ_HANDLED; 110 return IRQ_HANDLED;
76} 111}
77 112
113static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
114{
115 xenvif_tx_interrupt(irq, dev_id);
116 xenvif_rx_interrupt(irq, dev_id);
117
118 return IRQ_HANDLED;
119}
120
78static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 121static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 122{
80 struct xenvif *vif = netdev_priv(dev); 123 struct xenvif *vif = netdev_priv(dev);
81 124
82 BUG_ON(skb->dev != dev); 125 BUG_ON(skb->dev != dev);
83 126
84 if (vif->netbk == NULL) 127 /* Drop the packet if vif is not ready */
128 if (vif->task == NULL)
85 goto drop; 129 goto drop;
86 130
87 /* Drop the packet if the target domain has no receive buffers. */ 131 /* Drop the packet if the target domain has no receive buffers. */
@@ -89,13 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
89 goto drop; 133 goto drop;
90 134
91 /* Reserve ring slots for the worst-case number of fragments. */ 135 /* Reserve ring slots for the worst-case number of fragments. */
92 vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); 136 vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
93 xenvif_get(vif);
94 137
95 if (vif->can_queue && xen_netbk_must_stop_queue(vif)) 138 if (vif->can_queue && xenvif_must_stop_queue(vif))
96 netif_stop_queue(dev); 139 netif_stop_queue(dev);
97 140
98 xen_netbk_queue_tx_skb(vif, skb); 141 xenvif_queue_tx_skb(vif, skb);
99 142
100 return NETDEV_TX_OK; 143 return NETDEV_TX_OK;
101 144
@@ -105,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
105 return NETDEV_TX_OK; 148 return NETDEV_TX_OK;
106} 149}
107 150
108void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
109{
110 netif_rx_ni(skb);
111}
112
113void xenvif_notify_tx_completion(struct xenvif *vif) 151void xenvif_notify_tx_completion(struct xenvif *vif)
114{ 152{
115 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) 153 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -124,17 +162,20 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
124 162
125static void xenvif_up(struct xenvif *vif) 163static void xenvif_up(struct xenvif *vif)
126{ 164{
127 xen_netbk_add_xenvif(vif); 165 napi_enable(&vif->napi);
128 enable_irq(vif->irq); 166 enable_irq(vif->tx_irq);
129 xen_netbk_check_rx_xenvif(vif); 167 if (vif->tx_irq != vif->rx_irq)
168 enable_irq(vif->rx_irq);
169 xenvif_check_rx_xenvif(vif);
130} 170}
131 171
132static void xenvif_down(struct xenvif *vif) 172static void xenvif_down(struct xenvif *vif)
133{ 173{
134 disable_irq(vif->irq); 174 napi_disable(&vif->napi);
175 disable_irq(vif->tx_irq);
176 if (vif->tx_irq != vif->rx_irq)
177 disable_irq(vif->rx_irq);
135 del_timer_sync(&vif->credit_timeout); 178 del_timer_sync(&vif->credit_timeout);
136 xen_netbk_deschedule_xenvif(vif);
137 xen_netbk_remove_xenvif(vif);
138} 179}
139 180
140static int xenvif_open(struct net_device *dev) 181static int xenvif_open(struct net_device *dev)
@@ -250,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
250 struct net_device *dev; 291 struct net_device *dev;
251 struct xenvif *vif; 292 struct xenvif *vif;
252 char name[IFNAMSIZ] = {}; 293 char name[IFNAMSIZ] = {};
294 int i;
253 295
254 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 296 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
255 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 297 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
256 if (dev == NULL) { 298 if (dev == NULL) {
257 pr_warn("Could not allocate netdev\n"); 299 pr_warn("Could not allocate netdev for %s\n", name);
258 return ERR_PTR(-ENOMEM); 300 return ERR_PTR(-ENOMEM);
259 } 301 }
260 302
@@ -263,14 +305,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
263 vif = netdev_priv(dev); 305 vif = netdev_priv(dev);
264 vif->domid = domid; 306 vif->domid = domid;
265 vif->handle = handle; 307 vif->handle = handle;
266 vif->netbk = NULL;
267 vif->can_sg = 1; 308 vif->can_sg = 1;
268 vif->csum = 1; 309 vif->csum = 1;
269 atomic_set(&vif->refcnt, 1);
270 init_waitqueue_head(&vif->waiting_to_free);
271 vif->dev = dev; 310 vif->dev = dev;
272 INIT_LIST_HEAD(&vif->schedule_list);
273 INIT_LIST_HEAD(&vif->notify_list);
274 311
275 vif->credit_bytes = vif->remaining_credit = ~0UL; 312 vif->credit_bytes = vif->remaining_credit = ~0UL;
276 vif->credit_usec = 0UL; 313 vif->credit_usec = 0UL;
@@ -285,6 +322,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
285 322
286 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 323 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
287 324
325 skb_queue_head_init(&vif->rx_queue);
326 skb_queue_head_init(&vif->tx_queue);
327
328 vif->pending_cons = 0;
329 vif->pending_prod = MAX_PENDING_REQS;
330 for (i = 0; i < MAX_PENDING_REQS; i++)
331 vif->pending_ring[i] = i;
332 for (i = 0; i < MAX_PENDING_REQS; i++)
333 vif->mmap_pages[i] = NULL;
334
288 /* 335 /*
289 * Initialise a dummy MAC address. We choose the numerically 336 * Initialise a dummy MAC address. We choose the numerically
290 * largest non-broadcast address to prevent the address getting 337 * largest non-broadcast address to prevent the address getting
@@ -294,6 +341,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
294 memset(dev->dev_addr, 0xFF, ETH_ALEN); 341 memset(dev->dev_addr, 0xFF, ETH_ALEN);
295 dev->dev_addr[0] &= ~0x01; 342 dev->dev_addr[0] &= ~0x01;
296 343
344 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
345
297 netif_carrier_off(dev); 346 netif_carrier_off(dev);
298 347
299 err = register_netdev(dev); 348 err = register_netdev(dev);
@@ -304,31 +353,66 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
304 } 353 }
305 354
306 netdev_dbg(dev, "Successfully created xenvif\n"); 355 netdev_dbg(dev, "Successfully created xenvif\n");
356
357 __module_get(THIS_MODULE);
358
307 return vif; 359 return vif;
308} 360}
309 361
310int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 362int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
311 unsigned long rx_ring_ref, unsigned int evtchn) 363 unsigned long rx_ring_ref, unsigned int tx_evtchn,
364 unsigned int rx_evtchn)
312{ 365{
313 int err = -ENOMEM; 366 int err = -ENOMEM;
314 367
315 /* Already connected through? */ 368 /* Already connected through? */
316 if (vif->irq) 369 if (vif->tx_irq)
317 return 0; 370 return 0;
318 371
319 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 372 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
320 if (err < 0) 373 if (err < 0)
321 goto err; 374 goto err;
322 375
323 err = bind_interdomain_evtchn_to_irqhandler( 376 if (tx_evtchn == rx_evtchn) {
324 vif->domid, evtchn, xenvif_interrupt, 0, 377 /* feature-split-event-channels == 0 */
325 vif->dev->name, vif); 378 err = bind_interdomain_evtchn_to_irqhandler(
326 if (err < 0) 379 vif->domid, tx_evtchn, xenvif_interrupt, 0,
327 goto err_unmap; 380 vif->dev->name, vif);
328 vif->irq = err; 381 if (err < 0)
329 disable_irq(vif->irq); 382 goto err_unmap;
383 vif->tx_irq = vif->rx_irq = err;
384 disable_irq(vif->tx_irq);
385 } else {
386 /* feature-split-event-channels == 1 */
387 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
388 "%s-tx", vif->dev->name);
389 err = bind_interdomain_evtchn_to_irqhandler(
390 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
391 vif->tx_irq_name, vif);
392 if (err < 0)
393 goto err_unmap;
394 vif->tx_irq = err;
395 disable_irq(vif->tx_irq);
396
397 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
398 "%s-rx", vif->dev->name);
399 err = bind_interdomain_evtchn_to_irqhandler(
400 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
401 vif->rx_irq_name, vif);
402 if (err < 0)
403 goto err_tx_unbind;
404 vif->rx_irq = err;
405 disable_irq(vif->rx_irq);
406 }
330 407
331 xenvif_get(vif); 408 init_waitqueue_head(&vif->wq);
409 vif->task = kthread_create(xenvif_kthread,
410 (void *)vif, "%s", vif->dev->name);
411 if (IS_ERR(vif->task)) {
412 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
413 err = PTR_ERR(vif->task);
414 goto err_rx_unbind;
415 }
332 416
333 rtnl_lock(); 417 rtnl_lock();
334 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 418 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -339,10 +423,20 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
339 xenvif_up(vif); 423 xenvif_up(vif);
340 rtnl_unlock(); 424 rtnl_unlock();
341 425
426 wake_up_process(vif->task);
427
342 return 0; 428 return 0;
429
430err_rx_unbind:
431 unbind_from_irqhandler(vif->rx_irq, vif);
432 vif->rx_irq = 0;
433err_tx_unbind:
434 unbind_from_irqhandler(vif->tx_irq, vif);
435 vif->tx_irq = 0;
343err_unmap: 436err_unmap:
344 xen_netbk_unmap_frontend_rings(vif); 437 xenvif_unmap_frontend_rings(vif);
345err: 438err:
439 module_put(THIS_MODULE);
346 return err; 440 return err;
347} 441}
348 442
@@ -355,7 +449,6 @@ void xenvif_carrier_off(struct xenvif *vif)
355 if (netif_running(dev)) 449 if (netif_running(dev))
356 xenvif_down(vif); 450 xenvif_down(vif);
357 rtnl_unlock(); 451 rtnl_unlock();
358 xenvif_put(vif);
359} 452}
360 453
361void xenvif_disconnect(struct xenvif *vif) 454void xenvif_disconnect(struct xenvif *vif)
@@ -363,15 +456,29 @@ void xenvif_disconnect(struct xenvif *vif)
363 if (netif_carrier_ok(vif->dev)) 456 if (netif_carrier_ok(vif->dev))
364 xenvif_carrier_off(vif); 457 xenvif_carrier_off(vif);
365 458
366 atomic_dec(&vif->refcnt); 459 if (vif->tx_irq) {
367 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 460 if (vif->tx_irq == vif->rx_irq)
461 unbind_from_irqhandler(vif->tx_irq, vif);
462 else {
463 unbind_from_irqhandler(vif->tx_irq, vif);
464 unbind_from_irqhandler(vif->rx_irq, vif);
465 }
466 vif->tx_irq = 0;
467 }
368 468
369 if (vif->irq) 469 if (vif->task)
370 unbind_from_irqhandler(vif->irq, vif); 470 kthread_stop(vif->task);
371 471
372 unregister_netdev(vif->dev); 472 xenvif_unmap_frontend_rings(vif);
473}
373 474
374 xen_netbk_unmap_frontend_rings(vif); 475void xenvif_free(struct xenvif *vif)
476{
477 netif_napi_del(&vif->napi);
478
479 unregister_netdev(vif->dev);
375 480
376 free_netdev(vif->dev); 481 free_netdev(vif->dev);
482
483 module_put(THIS_MODULE);
377} 484}