diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 47 |
1 files changed, 24 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa58370..1dcb9606e6e0 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -46,11 +46,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
47 | } | 47 | } |
48 | 48 | ||
49 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
50 | { | ||
51 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
52 | } | ||
53 | |||
54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 49 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
55 | { | 50 | { |
56 | struct xenvif *vif = dev_id; | 51 | struct xenvif *vif = dev_id; |
@@ -104,8 +99,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
104 | { | 99 | { |
105 | struct xenvif *vif = dev_id; | 100 | struct xenvif *vif = dev_id; |
106 | 101 | ||
107 | if (xenvif_rx_schedulable(vif)) | 102 | vif->rx_event = true; |
108 | netif_wake_queue(vif->dev); | 103 | xenvif_kick_thread(vif); |
109 | 104 | ||
110 | return IRQ_HANDLED; | 105 | return IRQ_HANDLED; |
111 | } | 106 | } |
@@ -121,24 +116,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
121 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 116 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
122 | { | 117 | { |
123 | struct xenvif *vif = netdev_priv(dev); | 118 | struct xenvif *vif = netdev_priv(dev); |
119 | int min_slots_needed; | ||
124 | 120 | ||
125 | BUG_ON(skb->dev != dev); | 121 | BUG_ON(skb->dev != dev); |
126 | 122 | ||
127 | /* Drop the packet if vif is not ready */ | 123 | /* Drop the packet if vif is not ready */ |
128 | if (vif->task == NULL) | 124 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
129 | goto drop; | 125 | goto drop; |
130 | 126 | ||
131 | /* Drop the packet if the target domain has no receive buffers. */ | 127 | /* At best we'll need one slot for the header and one for each |
132 | if (!xenvif_rx_schedulable(vif)) | 128 | * frag. |
133 | goto drop; | 129 | */ |
130 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
134 | 131 | ||
135 | /* Reserve ring slots for the worst-case number of fragments. */ | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
136 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 133 | * metadata. |
134 | */ | ||
135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
137 | min_slots_needed++; | ||
137 | 138 | ||
138 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 139 | /* If the skb can't possibly fit in the remaining slots |
139 | netif_stop_queue(dev); | 140 | * then turn off the queue to give the ring a chance to |
141 | * drain. | ||
142 | */ | ||
143 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
144 | xenvif_stop_queue(vif); | ||
140 | 145 | ||
141 | xenvif_queue_tx_skb(vif, skb); | 146 | skb_queue_tail(&vif->rx_queue, skb); |
147 | xenvif_kick_thread(vif); | ||
142 | 148 | ||
143 | return NETDEV_TX_OK; | 149 | return NETDEV_TX_OK; |
144 | 150 | ||
@@ -148,12 +154,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
148 | return NETDEV_TX_OK; | 154 | return NETDEV_TX_OK; |
149 | } | 155 | } |
150 | 156 | ||
151 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
152 | { | ||
153 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
154 | netif_wake_queue(vif->dev); | ||
155 | } | ||
156 | |||
157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
158 | { | 158 | { |
159 | struct xenvif *vif = netdev_priv(dev); | 159 | struct xenvif *vif = netdev_priv(dev); |
@@ -378,6 +378,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
378 | if (err < 0) | 378 | if (err < 0) |
379 | goto err; | 379 | goto err; |
380 | 380 | ||
381 | init_waitqueue_head(&vif->wq); | ||
382 | |||
381 | if (tx_evtchn == rx_evtchn) { | 383 | if (tx_evtchn == rx_evtchn) { |
382 | /* feature-split-event-channels == 0 */ | 384 | /* feature-split-event-channels == 0 */ |
383 | err = bind_interdomain_evtchn_to_irqhandler( | 385 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -410,7 +412,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
410 | disable_irq(vif->rx_irq); | 412 | disable_irq(vif->rx_irq); |
411 | } | 413 | } |
412 | 414 | ||
413 | init_waitqueue_head(&vif->wq); | ||
414 | task = kthread_create(xenvif_kthread, | 415 | task = kthread_create(xenvif_kthread, |
415 | (void *)vif, "%s", vif->dev->name); | 416 | (void *)vif, "%s", vif->dev->name); |
416 | if (IS_ERR(task)) { | 417 | if (IS_ERR(task)) { |