aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c162
1 files changed, 98 insertions, 64 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 087d2db0389d..459935a6bfae 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
30 30
31#include "common.h" 31#include "common.h"
32 32
33#include <linux/kthread.h>
33#include <linux/ethtool.h> 34#include <linux/ethtool.h>
34#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
35#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
@@ -38,17 +39,7 @@
38#include <asm/xen/hypercall.h> 39#include <asm/xen/hypercall.h>
39 40
40#define XENVIF_QUEUE_LENGTH 32 41#define XENVIF_QUEUE_LENGTH 32
41 42#define XENVIF_NAPI_WEIGHT 64
42void xenvif_get(struct xenvif *vif)
43{
44 atomic_inc(&vif->refcnt);
45}
46
47void xenvif_put(struct xenvif *vif)
48{
49 if (atomic_dec_and_test(&vif->refcnt))
50 wake_up(&vif->waiting_to_free);
51}
52 43
53int xenvif_schedulable(struct xenvif *vif) 44int xenvif_schedulable(struct xenvif *vif)
54{ 45{
@@ -57,28 +48,62 @@ int xenvif_schedulable(struct xenvif *vif)
57 48
58static int xenvif_rx_schedulable(struct xenvif *vif) 49static int xenvif_rx_schedulable(struct xenvif *vif)
59{ 50{
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); 51 return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
61} 52}
62 53
63static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 54static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
64{ 55{
65 struct xenvif *vif = dev_id; 56 struct xenvif *vif = dev_id;
66 57
67 if (vif->netbk == NULL) 58 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
68 return IRQ_HANDLED; 59 napi_schedule(&vif->napi);
69
70 xen_netbk_schedule_xenvif(vif);
71 60
72 return IRQ_HANDLED; 61 return IRQ_HANDLED;
73} 62}
74 63
64static int xenvif_poll(struct napi_struct *napi, int budget)
65{
66 struct xenvif *vif = container_of(napi, struct xenvif, napi);
67 int work_done;
68
69 work_done = xenvif_tx_action(vif, budget);
70
71 if (work_done < budget) {
72 int more_to_do = 0;
73 unsigned long flags;
74
75 /* It is necessary to disable IRQ before calling
76 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
77 * lose event from the frontend.
78 *
79 * Consider:
80 * RING_HAS_UNCONSUMED_REQUESTS
81 * <frontend generates event to trigger napi_schedule>
82 * __napi_complete
83 *
84 * This handler is still in scheduled state so the
85 * event has no effect at all. After __napi_complete
86 * this handler is descheduled and cannot get
87 * scheduled again. We lose event in this case and the ring
88 * will be completely stalled.
89 */
90
91 local_irq_save(flags);
92
93 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
94 if (!more_to_do)
95 __napi_complete(napi);
96
97 local_irq_restore(flags);
98 }
99
100 return work_done;
101}
102
75static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 103static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
76{ 104{
77 struct xenvif *vif = dev_id; 105 struct xenvif *vif = dev_id;
78 106
79 if (vif->netbk == NULL)
80 return IRQ_HANDLED;
81
82 if (xenvif_rx_schedulable(vif)) 107 if (xenvif_rx_schedulable(vif))
83 netif_wake_queue(vif->dev); 108 netif_wake_queue(vif->dev);
84 109
@@ -99,7 +124,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
99 124
100 BUG_ON(skb->dev != dev); 125 BUG_ON(skb->dev != dev);
101 126
102 if (vif->netbk == NULL) 127 /* Drop the packet if vif is not ready */
128 if (vif->task == NULL)
103 goto drop; 129 goto drop;
104 130
105 /* Drop the packet if the target domain has no receive buffers. */ 131 /* Drop the packet if the target domain has no receive buffers. */
@@ -107,13 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
107 goto drop; 133 goto drop;
108 134
109 /* Reserve ring slots for the worst-case number of fragments. */ 135 /* Reserve ring slots for the worst-case number of fragments. */
110 vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); 136 vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
111 xenvif_get(vif);
112 137
113 if (vif->can_queue && xen_netbk_must_stop_queue(vif)) 138 if (vif->can_queue && xenvif_must_stop_queue(vif))
114 netif_stop_queue(dev); 139 netif_stop_queue(dev);
115 140
116 xen_netbk_queue_tx_skb(vif, skb); 141 xenvif_queue_tx_skb(vif, skb);
117 142
118 return NETDEV_TX_OK; 143 return NETDEV_TX_OK;
119 144
@@ -123,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
123 return NETDEV_TX_OK; 148 return NETDEV_TX_OK;
124} 149}
125 150
126void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
127{
128 netif_rx_ni(skb);
129}
130
131void xenvif_notify_tx_completion(struct xenvif *vif) 151void xenvif_notify_tx_completion(struct xenvif *vif)
132{ 152{
133 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) 153 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -142,21 +162,20 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
142 162
143static void xenvif_up(struct xenvif *vif) 163static void xenvif_up(struct xenvif *vif)
144{ 164{
145 xen_netbk_add_xenvif(vif); 165 napi_enable(&vif->napi);
146 enable_irq(vif->tx_irq); 166 enable_irq(vif->tx_irq);
147 if (vif->tx_irq != vif->rx_irq) 167 if (vif->tx_irq != vif->rx_irq)
148 enable_irq(vif->rx_irq); 168 enable_irq(vif->rx_irq);
149 xen_netbk_check_rx_xenvif(vif); 169 xenvif_check_rx_xenvif(vif);
150} 170}
151 171
152static void xenvif_down(struct xenvif *vif) 172static void xenvif_down(struct xenvif *vif)
153{ 173{
174 napi_disable(&vif->napi);
154 disable_irq(vif->tx_irq); 175 disable_irq(vif->tx_irq);
155 if (vif->tx_irq != vif->rx_irq) 176 if (vif->tx_irq != vif->rx_irq)
156 disable_irq(vif->rx_irq); 177 disable_irq(vif->rx_irq);
157 del_timer_sync(&vif->credit_timeout); 178 del_timer_sync(&vif->credit_timeout);
158 xen_netbk_deschedule_xenvif(vif);
159 xen_netbk_remove_xenvif(vif);
160} 179}
161 180
162static int xenvif_open(struct net_device *dev) 181static int xenvif_open(struct net_device *dev)
@@ -272,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
272 struct net_device *dev; 291 struct net_device *dev;
273 struct xenvif *vif; 292 struct xenvif *vif;
274 char name[IFNAMSIZ] = {}; 293 char name[IFNAMSIZ] = {};
294 int i;
275 295
276 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 296 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
277 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 297 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
278 if (dev == NULL) { 298 if (dev == NULL) {
279 pr_warn("Could not allocate netdev\n"); 299 pr_warn("Could not allocate netdev for %s\n", name);
280 return ERR_PTR(-ENOMEM); 300 return ERR_PTR(-ENOMEM);
281 } 301 }
282 302
@@ -285,20 +305,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
285 vif = netdev_priv(dev); 305 vif = netdev_priv(dev);
286 vif->domid = domid; 306 vif->domid = domid;
287 vif->handle = handle; 307 vif->handle = handle;
288 vif->netbk = NULL;
289 vif->can_sg = 1; 308 vif->can_sg = 1;
290 vif->csum = 1; 309 vif->csum = 1;
291 atomic_set(&vif->refcnt, 1);
292 init_waitqueue_head(&vif->waiting_to_free);
293 vif->dev = dev; 310 vif->dev = dev;
294 INIT_LIST_HEAD(&vif->schedule_list);
295 INIT_LIST_HEAD(&vif->notify_list);
296 311
297 vif->credit_bytes = vif->remaining_credit = ~0UL; 312 vif->credit_bytes = vif->remaining_credit = ~0UL;
298 vif->credit_usec = 0UL; 313 vif->credit_usec = 0UL;
299 init_timer(&vif->credit_timeout); 314 init_timer(&vif->credit_timeout);
300 /* Initialize 'expires' now: it's used to track the credit window. */ 315 vif->credit_window_start = get_jiffies_64();
301 vif->credit_timeout.expires = jiffies;
302 316
303 dev->netdev_ops = &xenvif_netdev_ops; 317 dev->netdev_ops = &xenvif_netdev_ops;
304 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 318 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
@@ -307,6 +321,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 321
308 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 322 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
309 323
324 skb_queue_head_init(&vif->rx_queue);
325 skb_queue_head_init(&vif->tx_queue);
326
327 vif->pending_cons = 0;
328 vif->pending_prod = MAX_PENDING_REQS;
329 for (i = 0; i < MAX_PENDING_REQS; i++)
330 vif->pending_ring[i] = i;
331 for (i = 0; i < MAX_PENDING_REQS; i++)
332 vif->mmap_pages[i] = NULL;
333
310 /* 334 /*
311 * Initialise a dummy MAC address. We choose the numerically 335 * Initialise a dummy MAC address. We choose the numerically
312 * largest non-broadcast address to prevent the address getting 336 * largest non-broadcast address to prevent the address getting
@@ -316,6 +340,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
316 memset(dev->dev_addr, 0xFF, ETH_ALEN); 340 memset(dev->dev_addr, 0xFF, ETH_ALEN);
317 dev->dev_addr[0] &= ~0x01; 341 dev->dev_addr[0] &= ~0x01;
318 342
343 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
344
319 netif_carrier_off(dev); 345 netif_carrier_off(dev);
320 346
321 err = register_netdev(dev); 347 err = register_netdev(dev);
@@ -326,6 +352,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
326 } 352 }
327 353
328 netdev_dbg(dev, "Successfully created xenvif\n"); 354 netdev_dbg(dev, "Successfully created xenvif\n");
355
356 __module_get(THIS_MODULE);
357
329 return vif; 358 return vif;
330} 359}
331 360
@@ -339,9 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
339 if (vif->tx_irq) 368 if (vif->tx_irq)
340 return 0; 369 return 0;
341 370
342 __module_get(THIS_MODULE); 371 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
343
344 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
345 if (err < 0) 372 if (err < 0)
346 goto err; 373 goto err;
347 374
@@ -377,7 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
377 disable_irq(vif->rx_irq); 404 disable_irq(vif->rx_irq);
378 } 405 }
379 406
380 xenvif_get(vif); 407 init_waitqueue_head(&vif->wq);
408 vif->task = kthread_create(xenvif_kthread,
409 (void *)vif, "%s", vif->dev->name);
410 if (IS_ERR(vif->task)) {
411 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
412 err = PTR_ERR(vif->task);
413 goto err_rx_unbind;
414 }
381 415
382 rtnl_lock(); 416 rtnl_lock();
383 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 417 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -388,12 +422,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
388 xenvif_up(vif); 422 xenvif_up(vif);
389 rtnl_unlock(); 423 rtnl_unlock();
390 424
425 wake_up_process(vif->task);
426
391 return 0; 427 return 0;
428
429err_rx_unbind:
430 unbind_from_irqhandler(vif->rx_irq, vif);
431 vif->rx_irq = 0;
392err_tx_unbind: 432err_tx_unbind:
393 unbind_from_irqhandler(vif->tx_irq, vif); 433 unbind_from_irqhandler(vif->tx_irq, vif);
394 vif->tx_irq = 0; 434 vif->tx_irq = 0;
395err_unmap: 435err_unmap:
396 xen_netbk_unmap_frontend_rings(vif); 436 xenvif_unmap_frontend_rings(vif);
397err: 437err:
398 module_put(THIS_MODULE); 438 module_put(THIS_MODULE);
399 return err; 439 return err;
@@ -408,23 +448,13 @@ void xenvif_carrier_off(struct xenvif *vif)
408 if (netif_running(dev)) 448 if (netif_running(dev))
409 xenvif_down(vif); 449 xenvif_down(vif);
410 rtnl_unlock(); 450 rtnl_unlock();
411 xenvif_put(vif);
412} 451}
413 452
414void xenvif_disconnect(struct xenvif *vif) 453void xenvif_disconnect(struct xenvif *vif)
415{ 454{
416 /* Disconnect funtion might get called by generic framework
417 * even before vif connects, so we need to check if we really
418 * need to do a module_put.
419 */
420 int need_module_put = 0;
421
422 if (netif_carrier_ok(vif->dev)) 455 if (netif_carrier_ok(vif->dev))
423 xenvif_carrier_off(vif); 456 xenvif_carrier_off(vif);
424 457
425 atomic_dec(&vif->refcnt);
426 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
427
428 if (vif->tx_irq) { 458 if (vif->tx_irq) {
429 if (vif->tx_irq == vif->rx_irq) 459 if (vif->tx_irq == vif->rx_irq)
430 unbind_from_irqhandler(vif->tx_irq, vif); 460 unbind_from_irqhandler(vif->tx_irq, vif);
@@ -432,18 +462,22 @@ void xenvif_disconnect(struct xenvif *vif)
432 unbind_from_irqhandler(vif->tx_irq, vif); 462 unbind_from_irqhandler(vif->tx_irq, vif);
433 unbind_from_irqhandler(vif->rx_irq, vif); 463 unbind_from_irqhandler(vif->rx_irq, vif);
434 } 464 }
435 /* vif->irq is valid, we had a module_get in 465 vif->tx_irq = 0;
436 * xenvif_connect.
437 */
438 need_module_put = 1;
439 } 466 }
440 467
441 unregister_netdev(vif->dev); 468 if (vif->task)
469 kthread_stop(vif->task);
470
471 xenvif_unmap_frontend_rings(vif);
472}
442 473
443 xen_netbk_unmap_frontend_rings(vif); 474void xenvif_free(struct xenvif *vif)
475{
476 netif_napi_del(&vif->napi);
477
478 unregister_netdev(vif->dev);
444 479
445 free_netdev(vif->dev); 480 free_netdev(vif->dev);
446 481
447 if (need_module_put) 482 module_put(THIS_MODULE);
448 module_put(THIS_MODULE);
449} 483}