diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 78 |
1 files changed, 47 insertions, 31 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 2329cccf1fa6..b9de31ea7fc4 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/rtnetlink.h> | 35 | #include <linux/rtnetlink.h> |
36 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
37 | #include <linux/vmalloc.h> | ||
37 | 38 | ||
38 | #include <xen/events.h> | 39 | #include <xen/events.h> |
39 | #include <asm/xen/hypercall.h> | 40 | #include <asm/xen/hypercall.h> |
@@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
47 | } | 48 | } |
48 | 49 | ||
49 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
50 | { | ||
51 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
52 | } | ||
53 | |||
54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
55 | { | 51 | { |
56 | struct xenvif *vif = dev_id; | 52 | struct xenvif *vif = dev_id; |
@@ -104,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
104 | { | 100 | { |
105 | struct xenvif *vif = dev_id; | 101 | struct xenvif *vif = dev_id; |
106 | 102 | ||
107 | if (xenvif_rx_schedulable(vif)) | 103 | vif->rx_event = true; |
108 | netif_wake_queue(vif->dev); | 104 | xenvif_kick_thread(vif); |
109 | 105 | ||
110 | return IRQ_HANDLED; | 106 | return IRQ_HANDLED; |
111 | } | 107 | } |
@@ -121,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
121 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 117 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
122 | { | 118 | { |
123 | struct xenvif *vif = netdev_priv(dev); | 119 | struct xenvif *vif = netdev_priv(dev); |
120 | int min_slots_needed; | ||
124 | 121 | ||
125 | BUG_ON(skb->dev != dev); | 122 | BUG_ON(skb->dev != dev); |
126 | 123 | ||
127 | /* Drop the packet if vif is not ready */ | 124 | /* Drop the packet if vif is not ready */ |
128 | if (vif->task == NULL) | 125 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
129 | goto drop; | 126 | goto drop; |
130 | 127 | ||
131 | /* Drop the packet if the target domain has no receive buffers. */ | 128 | /* At best we'll need one slot for the header and one for each |
132 | if (!xenvif_rx_schedulable(vif)) | 129 | * frag. |
133 | goto drop; | 130 | */ |
131 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
134 | 132 | ||
135 | /* Reserve ring slots for the worst-case number of fragments. */ | 133 | /* If the skb is GSO then we'll also need an extra slot for the |
136 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 134 | * metadata. |
135 | */ | ||
136 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
137 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
138 | min_slots_needed++; | ||
137 | 139 | ||
138 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 140 | /* If the skb can't possibly fit in the remaining slots |
139 | netif_stop_queue(dev); | 141 | * then turn off the queue to give the ring a chance to |
142 | * drain. | ||
143 | */ | ||
144 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
145 | xenvif_stop_queue(vif); | ||
140 | 146 | ||
141 | xenvif_queue_tx_skb(vif, skb); | 147 | skb_queue_tail(&vif->rx_queue, skb); |
148 | xenvif_kick_thread(vif); | ||
142 | 149 | ||
143 | return NETDEV_TX_OK; | 150 | return NETDEV_TX_OK; |
144 | 151 | ||
@@ -148,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
148 | return NETDEV_TX_OK; | 155 | return NETDEV_TX_OK; |
149 | } | 156 | } |
150 | 157 | ||
151 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
152 | { | ||
153 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
154 | netif_wake_queue(vif->dev); | ||
155 | } | ||
156 | |||
157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 158 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
158 | { | 159 | { |
159 | struct xenvif *vif = netdev_priv(dev); | 160 | struct xenvif *vif = netdev_priv(dev); |
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
307 | SET_NETDEV_DEV(dev, parent); | 308 | SET_NETDEV_DEV(dev, parent); |
308 | 309 | ||
309 | vif = netdev_priv(dev); | 310 | vif = netdev_priv(dev); |
311 | |||
312 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
313 | MAX_GRANT_COPY_OPS); | ||
314 | if (vif->grant_copy_op == NULL) { | ||
315 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
316 | free_netdev(dev); | ||
317 | return ERR_PTR(-ENOMEM); | ||
318 | } | ||
319 | |||
310 | vif->domid = domid; | 320 | vif->domid = domid; |
311 | vif->handle = handle; | 321 | vif->handle = handle; |
312 | vif->can_sg = 1; | 322 | vif->can_sg = 1; |
@@ -368,16 +378,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
368 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 378 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
369 | unsigned int rx_evtchn) | 379 | unsigned int rx_evtchn) |
370 | { | 380 | { |
381 | struct task_struct *task; | ||
371 | int err = -ENOMEM; | 382 | int err = -ENOMEM; |
372 | 383 | ||
373 | /* Already connected through? */ | 384 | BUG_ON(vif->tx_irq); |
374 | if (vif->tx_irq) | 385 | BUG_ON(vif->task); |
375 | return 0; | ||
376 | 386 | ||
377 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 387 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); |
378 | if (err < 0) | 388 | if (err < 0) |
379 | goto err; | 389 | goto err; |
380 | 390 | ||
391 | init_waitqueue_head(&vif->wq); | ||
392 | |||
381 | if (tx_evtchn == rx_evtchn) { | 393 | if (tx_evtchn == rx_evtchn) { |
382 | /* feature-split-event-channels == 0 */ | 394 | /* feature-split-event-channels == 0 */ |
383 | err = bind_interdomain_evtchn_to_irqhandler( | 395 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -410,15 +422,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
410 | disable_irq(vif->rx_irq); | 422 | disable_irq(vif->rx_irq); |
411 | } | 423 | } |
412 | 424 | ||
413 | init_waitqueue_head(&vif->wq); | 425 | task = kthread_create(xenvif_kthread, |
414 | vif->task = kthread_create(xenvif_kthread, | 426 | (void *)vif, "%s", vif->dev->name); |
415 | (void *)vif, "%s", vif->dev->name); | 427 | if (IS_ERR(task)) { |
416 | if (IS_ERR(vif->task)) { | ||
417 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 428 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); |
418 | err = PTR_ERR(vif->task); | 429 | err = PTR_ERR(task); |
419 | goto err_rx_unbind; | 430 | goto err_rx_unbind; |
420 | } | 431 | } |
421 | 432 | ||
433 | vif->task = task; | ||
434 | |||
422 | rtnl_lock(); | 435 | rtnl_lock(); |
423 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 436 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
424 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 437 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
@@ -461,8 +474,10 @@ void xenvif_disconnect(struct xenvif *vif) | |||
461 | if (netif_carrier_ok(vif->dev)) | 474 | if (netif_carrier_ok(vif->dev)) |
462 | xenvif_carrier_off(vif); | 475 | xenvif_carrier_off(vif); |
463 | 476 | ||
464 | if (vif->task) | 477 | if (vif->task) { |
465 | kthread_stop(vif->task); | 478 | kthread_stop(vif->task); |
479 | vif->task = NULL; | ||
480 | } | ||
466 | 481 | ||
467 | if (vif->tx_irq) { | 482 | if (vif->tx_irq) { |
468 | if (vif->tx_irq == vif->rx_irq) | 483 | if (vif->tx_irq == vif->rx_irq) |
@@ -483,6 +498,7 @@ void xenvif_free(struct xenvif *vif) | |||
483 | 498 | ||
484 | unregister_netdev(vif->dev); | 499 | unregister_netdev(vif->dev); |
485 | 500 | ||
501 | vfree(vif->grant_copy_op); | ||
486 | free_netdev(vif->dev); | 502 | free_netdev(vif->dev); |
487 | 503 | ||
488 | module_put(THIS_MODULE); | 504 | module_put(THIS_MODULE); |