aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c74
1 files changed, 15 insertions, 59 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f379689dde30..895fe84011e7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,9 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
46/* This function is used to set SKBTX_DEV_ZEROCOPY as well as 49/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
47 * increasing the inflight counter. We need to increase the inflight 50 * increasing the inflight counter. We need to increase the inflight
48 * counter because core driver calls into xenvif_zerocopy_callback 51 * counter because core driver calls into xenvif_zerocopy_callback
@@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
60 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
61} 64}
62 65
63static inline void xenvif_stop_queue(struct xenvif_queue *queue)
64{
65 struct net_device *dev = queue->vif->dev;
66
67 if (!queue->vif->can_queue)
68 return;
69
70 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
71}
72
73int xenvif_schedulable(struct xenvif *vif) 66int xenvif_schedulable(struct xenvif *vif)
74{ 67{
75 return netif_running(vif->dev) && 68 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status); 69 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
70 !vif->disabled;
77} 71}
78 72
79static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 73static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 108static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{ 109{
116 struct xenvif_queue *queue = dev_id; 110 struct xenvif_queue *queue = dev_id;
117 struct netdev_queue *net_queue =
118 netdev_get_tx_queue(queue->vif->dev, queue->id);
119 111
120 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
121 * the carrier went down and this queue was previously blocked
122 */
123 if (unlikely(netif_tx_queue_stopped(net_queue) ||
124 (!netif_carrier_ok(queue->vif->dev) &&
125 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
126 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
127 xenvif_kick_thread(queue); 112 xenvif_kick_thread(queue);
128 113
129 return IRQ_HANDLED; 114 return IRQ_HANDLED;
@@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
151 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 136 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
152} 137}
153 138
154/* Callback to wake the queue's thread and turn the carrier off on timeout */
155static void xenvif_rx_stalled(unsigned long data)
156{
157 struct xenvif_queue *queue = (struct xenvif_queue *)data;
158
159 if (xenvif_queue_stopped(queue)) {
160 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
161 xenvif_kick_thread(queue);
162 }
163}
164
165static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 139static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
166{ 140{
167 struct xenvif *vif = netdev_priv(dev); 141 struct xenvif *vif = netdev_priv(dev);
168 struct xenvif_queue *queue = NULL; 142 struct xenvif_queue *queue = NULL;
169 unsigned int num_queues = vif->num_queues; 143 unsigned int num_queues = vif->num_queues;
170 u16 index; 144 u16 index;
171 int min_slots_needed; 145 struct xenvif_rx_cb *cb;
172 146
173 BUG_ON(skb->dev != dev); 147 BUG_ON(skb->dev != dev);
174 148
@@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
191 !xenvif_schedulable(vif)) 165 !xenvif_schedulable(vif))
192 goto drop; 166 goto drop;
193 167
194 /* At best we'll need one slot for the header and one for each 168 cb = XENVIF_RX_CB(skb);
195 * frag. 169 cb->expires = jiffies + rx_drain_timeout_jiffies;
196 */
197 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
198
199 /* If the skb is GSO then we'll also need an extra slot for the
200 * metadata.
201 */
202 if (skb_is_gso(skb))
203 min_slots_needed++;
204 170
205 /* If the skb can't possibly fit in the remaining slots 171 xenvif_rx_queue_tail(queue, skb);
206 * then turn off the queue to give the ring a chance to
207 * drain.
208 */
209 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
210 queue->rx_stalled.function = xenvif_rx_stalled;
211 queue->rx_stalled.data = (unsigned long)queue;
212 xenvif_stop_queue(queue);
213 mod_timer(&queue->rx_stalled,
214 jiffies + rx_drain_timeout_jiffies);
215 }
216
217 skb_queue_tail(&queue->rx_queue, skb);
218 xenvif_kick_thread(queue); 172 xenvif_kick_thread(queue);
219 173
220 return NETDEV_TX_OK; 174 return NETDEV_TX_OK;
@@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
465 vif->queues = NULL; 419 vif->queues = NULL;
466 vif->num_queues = 0; 420 vif->num_queues = 0;
467 421
422 spin_lock_init(&vif->lock);
423
468 dev->netdev_ops = &xenvif_netdev_ops; 424 dev->netdev_ops = &xenvif_netdev_ops;
469 dev->hw_features = NETIF_F_SG | 425 dev->hw_features = NETIF_F_SG |
470 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 426 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
508 init_timer(&queue->credit_timeout); 464 init_timer(&queue->credit_timeout);
509 queue->credit_window_start = get_jiffies_64(); 465 queue->credit_window_start = get_jiffies_64();
510 466
467 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
468
511 skb_queue_head_init(&queue->rx_queue); 469 skb_queue_head_init(&queue->rx_queue);
512 skb_queue_head_init(&queue->tx_queue); 470 skb_queue_head_init(&queue->tx_queue);
513 471
@@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
539 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 497 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
540 } 498 }
541 499
542 init_timer(&queue->rx_stalled);
543
544 return 0; 500 return 0;
545} 501}
546 502
@@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)
551 dev_set_mtu(vif->dev, ETH_DATA_LEN); 507 dev_set_mtu(vif->dev, ETH_DATA_LEN);
552 netdev_update_features(vif->dev); 508 netdev_update_features(vif->dev);
553 set_bit(VIF_STATUS_CONNECTED, &vif->status); 509 set_bit(VIF_STATUS_CONNECTED, &vif->status);
554 netif_carrier_on(vif->dev);
555 if (netif_running(vif->dev)) 510 if (netif_running(vif->dev))
556 xenvif_up(vif); 511 xenvif_up(vif);
557 rtnl_unlock(); 512 rtnl_unlock();
@@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
611 disable_irq(queue->rx_irq); 566 disable_irq(queue->rx_irq);
612 } 567 }
613 568
569 queue->stalled = true;
570
614 task = kthread_create(xenvif_kthread_guest_rx, 571 task = kthread_create(xenvif_kthread_guest_rx,
615 (void *)queue, "%s-guest-rx", queue->name); 572 (void *)queue, "%s-guest-rx", queue->name);
616 if (IS_ERR(task)) { 573 if (IS_ERR(task)) {
@@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)
674 netif_napi_del(&queue->napi); 631 netif_napi_del(&queue->napi);
675 632
676 if (queue->task) { 633 if (queue->task) {
677 del_timer_sync(&queue->rx_stalled);
678 kthread_stop(queue->task); 634 kthread_stop(queue->task);
679 queue->task = NULL; 635 queue->task = NULL;
680 } 636 }