aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2014-02-07 14:27:30 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2014-02-07 14:27:30 -0500
commita3b072cd180c12e8fe0ece9487b9065808327640 (patch)
tree62b982041be84748852d77cdf6ca5639ef40858f /drivers/net/xen-netback/interface.c
parent75a1ba5b2c529db60ca49626bcaf0bddf4548438 (diff)
parent081cd62a010f97b5bc1d2b0cd123c5abc692b68a (diff)
Merge tag 'efi-urgent' into x86/urgent
* Avoid WARN_ON() when mapping BGRT on Baytrail (EFI 32-bit). Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fff8cddfed81..b9de31ea7fc4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
47 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 47 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
48} 48}
49 49
50static int xenvif_rx_schedulable(struct xenvif *vif)
51{
52 return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
53}
54
55static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 50static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
56{ 51{
57 struct xenvif *vif = dev_id; 52 struct xenvif *vif = dev_id;
@@ -105,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
105{ 100{
106 struct xenvif *vif = dev_id; 101 struct xenvif *vif = dev_id;
107 102
108 if (xenvif_rx_schedulable(vif)) 103 vif->rx_event = true;
109 netif_wake_queue(vif->dev); 104 xenvif_kick_thread(vif);
110 105
111 return IRQ_HANDLED; 106 return IRQ_HANDLED;
112} 107}
@@ -122,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
122static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 117static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
123{ 118{
124 struct xenvif *vif = netdev_priv(dev); 119 struct xenvif *vif = netdev_priv(dev);
120 int min_slots_needed;
125 121
126 BUG_ON(skb->dev != dev); 122 BUG_ON(skb->dev != dev);
127 123
128 /* Drop the packet if vif is not ready */ 124 /* Drop the packet if vif is not ready */
129 if (vif->task == NULL) 125 if (vif->task == NULL || !xenvif_schedulable(vif))
130 goto drop; 126 goto drop;
131 127
132 /* Drop the packet if the target domain has no receive buffers. */ 128 /* At best we'll need one slot for the header and one for each
133 if (!xenvif_rx_schedulable(vif)) 129 * frag.
134 goto drop; 130 */
131 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
135 132
136 /* Reserve ring slots for the worst-case number of fragments. */ 133 /* If the skb is GSO then we'll also need an extra slot for the
137 vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); 134 * metadata.
135 */
136 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
137 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
138 min_slots_needed++;
138 139
139 if (vif->can_queue && xenvif_must_stop_queue(vif)) 140 /* If the skb can't possibly fit in the remaining slots
140 netif_stop_queue(dev); 141 * then turn off the queue to give the ring a chance to
142 * drain.
143 */
144 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
145 xenvif_stop_queue(vif);
141 146
142 xenvif_queue_tx_skb(vif, skb); 147 skb_queue_tail(&vif->rx_queue, skb);
148 xenvif_kick_thread(vif);
143 149
144 return NETDEV_TX_OK; 150 return NETDEV_TX_OK;
145 151
@@ -149,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
149 return NETDEV_TX_OK; 155 return NETDEV_TX_OK;
150} 156}
151 157
152void xenvif_notify_tx_completion(struct xenvif *vif)
153{
154 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
155 netif_wake_queue(vif->dev);
156}
157
158static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 158static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
159{ 159{
160 struct xenvif *vif = netdev_priv(dev); 160 struct xenvif *vif = netdev_priv(dev);
@@ -388,6 +388,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
388 if (err < 0) 388 if (err < 0)
389 goto err; 389 goto err;
390 390
391 init_waitqueue_head(&vif->wq);
392
391 if (tx_evtchn == rx_evtchn) { 393 if (tx_evtchn == rx_evtchn) {
392 /* feature-split-event-channels == 0 */ 394 /* feature-split-event-channels == 0 */
393 err = bind_interdomain_evtchn_to_irqhandler( 395 err = bind_interdomain_evtchn_to_irqhandler(
@@ -420,7 +422,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
420 disable_irq(vif->rx_irq); 422 disable_irq(vif->rx_irq);
421 } 423 }
422 424
423 init_waitqueue_head(&vif->wq);
424 task = kthread_create(xenvif_kthread, 425 task = kthread_create(xenvif_kthread,
425 (void *)vif, "%s", vif->dev->name); 426 (void *)vif, "%s", vif->dev->name);
426 if (IS_ERR(task)) { 427 if (IS_ERR(task)) {