aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c68
-rw-r--r--drivers/net/xen-netback/netback.c99
3 files changed, 140 insertions, 46 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 28c98229e95f..ef3026f46a37 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -176,9 +176,9 @@ struct xenvif_queue { /* Per-queue data for xenvif */
176 struct xen_netif_rx_back_ring rx; 176 struct xen_netif_rx_back_ring rx;
177 struct sk_buff_head rx_queue; 177 struct sk_buff_head rx_queue;
178 RING_IDX rx_last_skb_slots; 178 RING_IDX rx_last_skb_slots;
179 bool rx_queue_purge; 179 unsigned long status;
180 180
181 struct timer_list wake_queue; 181 struct timer_list rx_stalled;
182 182
183 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; 183 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
184 184
@@ -198,6 +198,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
198 struct xenvif_stats stats; 198 struct xenvif_stats stats;
199}; 199};
200 200
201enum state_bit_shift {
202 /* This bit marks that the vif is connected */
203 VIF_STATUS_CONNECTED,
204 /* This bit signals the RX thread that queuing was stopped (in
205 * start_xmit), and either the timer fired or an RX interrupt came
206 */
207 QUEUE_STATUS_RX_PURGE_EVENT,
208 /* This bit tells the interrupt handler that this queue was the reason
209 * for the carrier off, so it should kick the thread. Only queues which
210 * brought it down can turn on the carrier.
211 */
212 QUEUE_STATUS_RX_STALLED
213};
214
201struct xenvif { 215struct xenvif {
202 /* Unique identifier for this interface. */ 216 /* Unique identifier for this interface. */
203 domid_t domid; 217 domid_t domid;
@@ -220,6 +234,7 @@ struct xenvif {
220 * frontend is rogue. 234 * frontend is rogue.
221 */ 235 */
222 bool disabled; 236 bool disabled;
237 unsigned long status;
223 238
224 /* Queues */ 239 /* Queues */
225 struct xenvif_queue *queues; 240 struct xenvif_queue *queues;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index bd59d9dbf27b..48a55cda979b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
55 55
56int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
57{ 57{
58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) &&
59 test_bit(VIF_STATUS_CONNECTED, &vif->status);
59} 60}
60 61
61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 62static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -77,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget)
77 /* This vif is rogue, we pretend we've there is nothing to do 78 /* This vif is rogue, we pretend we've there is nothing to do
78 * for this vif to deschedule it from NAPI. But this interface 79 * for this vif to deschedule it from NAPI. But this interface
79 * will be turned off in thread context later. 80 * will be turned off in thread context later.
81 * Also, if a guest doesn't post enough slots to receive data on one of
82 * its queues, the carrier goes down and NAPI is descheduled here so
83 * the guest can't send more packets until it's ready to receive.
80 */ 84 */
81 if (unlikely(queue->vif->disabled)) { 85 if (unlikely(queue->vif->disabled ||
86 !netif_carrier_ok(queue->vif->dev))) {
82 napi_complete(napi); 87 napi_complete(napi);
83 return 0; 88 return 0;
84 } 89 }
@@ -96,7 +101,16 @@ int xenvif_poll(struct napi_struct *napi, int budget)
96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
97{ 102{
98 struct xenvif_queue *queue = dev_id; 103 struct xenvif_queue *queue = dev_id;
104 struct netdev_queue *net_queue =
105 netdev_get_tx_queue(queue->vif->dev, queue->id);
99 106
107 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
108 * the carrier went down and this queue was previously blocked
109 */
110 if (unlikely(netif_tx_queue_stopped(net_queue) ||
111 (!netif_carrier_ok(queue->vif->dev) &&
112 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
113 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
100 xenvif_kick_thread(queue); 114 xenvif_kick_thread(queue);
101 115
102 return IRQ_HANDLED; 116 return IRQ_HANDLED;
@@ -124,16 +138,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 138 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125} 139}
126 140
127/* Callback to wake the queue and drain it on timeout */ 141/* Callback to wake the queue's thread and turn the carrier off on timeout */
128static void xenvif_wake_queue_callback(unsigned long data) 142static void xenvif_rx_stalled(unsigned long data)
129{ 143{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data; 144 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131 145
132 if (xenvif_queue_stopped(queue)) { 146 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n"); 147 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue); 148 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
137 } 149 }
138} 150}
139 151
@@ -182,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
182 * drain. 194 * drain.
183 */ 195 */
184 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { 196 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
185 queue->wake_queue.function = xenvif_wake_queue_callback; 197 queue->rx_stalled.function = xenvif_rx_stalled;
186 queue->wake_queue.data = (unsigned long)queue; 198 queue->rx_stalled.data = (unsigned long)queue;
187 xenvif_stop_queue(queue); 199 xenvif_stop_queue(queue);
188 mod_timer(&queue->wake_queue, 200 mod_timer(&queue->rx_stalled,
189 jiffies + rx_drain_timeout_jiffies); 201 jiffies + rx_drain_timeout_jiffies);
190 } 202 }
191 203
192 skb_queue_tail(&queue->rx_queue, skb); 204 skb_queue_tail(&queue->rx_queue, skb);
@@ -267,7 +279,7 @@ static void xenvif_down(struct xenvif *vif)
267static int xenvif_open(struct net_device *dev) 279static int xenvif_open(struct net_device *dev)
268{ 280{
269 struct xenvif *vif = netdev_priv(dev); 281 struct xenvif *vif = netdev_priv(dev);
270 if (netif_carrier_ok(dev)) 282 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
271 xenvif_up(vif); 283 xenvif_up(vif);
272 netif_tx_start_all_queues(dev); 284 netif_tx_start_all_queues(dev);
273 return 0; 285 return 0;
@@ -276,7 +288,7 @@ static int xenvif_open(struct net_device *dev)
276static int xenvif_close(struct net_device *dev) 288static int xenvif_close(struct net_device *dev)
277{ 289{
278 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
279 if (netif_carrier_ok(dev)) 291 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
280 xenvif_down(vif); 292 xenvif_down(vif);
281 netif_tx_stop_all_queues(dev); 293 netif_tx_stop_all_queues(dev);
282 return 0; 294 return 0;
@@ -514,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
514 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 526 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
515 } 527 }
516 528
517 init_timer(&queue->wake_queue); 529 init_timer(&queue->rx_stalled);
518 530
519 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, 531 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
520 XENVIF_NAPI_WEIGHT); 532 XENVIF_NAPI_WEIGHT);
@@ -528,6 +540,7 @@ void xenvif_carrier_on(struct xenvif *vif)
528 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 540 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
529 dev_set_mtu(vif->dev, ETH_DATA_LEN); 541 dev_set_mtu(vif->dev, ETH_DATA_LEN);
530 netdev_update_features(vif->dev); 542 netdev_update_features(vif->dev);
543 set_bit(VIF_STATUS_CONNECTED, &vif->status);
531 netif_carrier_on(vif->dev); 544 netif_carrier_on(vif->dev);
532 if (netif_running(vif->dev)) 545 if (netif_running(vif->dev))
533 xenvif_up(vif); 546 xenvif_up(vif);
@@ -625,9 +638,11 @@ void xenvif_carrier_off(struct xenvif *vif)
625 struct net_device *dev = vif->dev; 638 struct net_device *dev = vif->dev;
626 639
627 rtnl_lock(); 640 rtnl_lock();
628 netif_carrier_off(dev); /* discard queued packets */ 641 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
629 if (netif_running(dev)) 642 netif_carrier_off(dev); /* discard queued packets */
630 xenvif_down(vif); 643 if (netif_running(dev))
644 xenvif_down(vif);
645 }
631 rtnl_unlock(); 646 rtnl_unlock();
632} 647}
633 648
@@ -656,14 +671,13 @@ void xenvif_disconnect(struct xenvif *vif)
656 unsigned int num_queues = vif->num_queues; 671 unsigned int num_queues = vif->num_queues;
657 unsigned int queue_index; 672 unsigned int queue_index;
658 673
659 if (netif_carrier_ok(vif->dev)) 674 xenvif_carrier_off(vif);
660 xenvif_carrier_off(vif);
661 675
662 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 676 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
663 queue = &vif->queues[queue_index]; 677 queue = &vif->queues[queue_index];
664 678
665 if (queue->task) { 679 if (queue->task) {
666 del_timer_sync(&queue->wake_queue); 680 del_timer_sync(&queue->rx_stalled);
667 kthread_stop(queue->task); 681 kthread_stop(queue->task);
668 queue->task = NULL; 682 queue->task = NULL;
669 } 683 }
@@ -705,16 +719,12 @@ void xenvif_free(struct xenvif *vif)
705 /* Here we want to avoid timeout messages if an skb can be legitimately 719 /* Here we want to avoid timeout messages if an skb can be legitimately
706 * stuck somewhere else. Realistically this could be an another vif's 720 * stuck somewhere else. Realistically this could be an another vif's
707 * internal or QDisc queue. That another vif also has this 721 * internal or QDisc queue. That another vif also has this
708 * rx_drain_timeout_msecs timeout, but the timer only ditches the 722 * rx_drain_timeout_msecs timeout, so give it time to drain out.
709 * internal queue. After that, the QDisc queue can put in worst case 723 * Although if that other guest wakes up just before its timeout happens
710 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's 724 * and takes only one skb from QDisc, it can hold onto other skbs for a
711 * internal queue, so we need several rounds of such timeouts until we 725 * longer period.
712 * can be sure that no another vif should have skb's from us. We are
713 * not sending more skb's, so newly stuck packets are not interesting
714 * for us here.
715 */ 726 */
716 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 727 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
717 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
718 728
719 unregister_netdev(vif->dev); 729 unregister_netdev(vif->dev);
720 730
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 769e553d3f45..aa2093325be1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1869,8 +1869,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1869static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
1870{ 1870{
1871 return (!skb_queue_empty(&queue->rx_queue) && 1871 return (!skb_queue_empty(&queue->rx_queue) &&
1872 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || 1872 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
1873 queue->rx_queue_purge;
1874} 1873}
1875 1874
1876static inline int tx_work_todo(struct xenvif_queue *queue) 1875static inline int tx_work_todo(struct xenvif_queue *queue)
@@ -1935,6 +1934,75 @@ static void xenvif_start_queue(struct xenvif_queue *queue)
1935 xenvif_wake_queue(queue); 1934 xenvif_wake_queue(queue);
1936} 1935}
1937 1936
1937/* Only called from the queue's thread, it handles the situation when the guest
1938 * doesn't post enough requests on the receiving ring.
1939 * First xenvif_start_xmit disables QDisc and start a timer, and then either the
1940 * timer fires, or the guest send an interrupt after posting new request. If it
1941 * is the timer, the carrier is turned off here.
1942 * */
1943static void xenvif_rx_purge_event(struct xenvif_queue *queue)
1944{
1945 /* Either the last unsuccesful skb or at least 1 slot should fit */
1946 int needed = queue->rx_last_skb_slots ?
1947 queue->rx_last_skb_slots : 1;
1948
1949 /* It is assumed that if the guest post new slots after this, the RX
1950 * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
1951 * the thread again
1952 */
1953 set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
1954 if (!xenvif_rx_ring_slots_available(queue, needed)) {
1955 rtnl_lock();
1956 if (netif_carrier_ok(queue->vif->dev)) {
1957 /* Timer fired and there are still no slots. Turn off
1958 * everything except the interrupts
1959 */
1960 netif_carrier_off(queue->vif->dev);
1961 skb_queue_purge(&queue->rx_queue);
1962 queue->rx_last_skb_slots = 0;
1963 if (net_ratelimit())
1964 netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
1965 } else {
1966 /* Probably an another queue already turned the carrier
1967 * off, make sure nothing is stucked in the internal
1968 * queue of this queue
1969 */
1970 skb_queue_purge(&queue->rx_queue);
1971 queue->rx_last_skb_slots = 0;
1972 }
1973 rtnl_unlock();
1974 } else if (!netif_carrier_ok(queue->vif->dev)) {
1975 unsigned int num_queues = queue->vif->num_queues;
1976 unsigned int i;
1977 /* The carrier was down, but an interrupt kicked
1978 * the thread again after new requests were
1979 * posted
1980 */
1981 clear_bit(QUEUE_STATUS_RX_STALLED,
1982 &queue->status);
1983 rtnl_lock();
1984 netif_carrier_on(queue->vif->dev);
1985 netif_tx_wake_all_queues(queue->vif->dev);
1986 rtnl_unlock();
1987
1988 for (i = 0; i < num_queues; i++) {
1989 struct xenvif_queue *temp = &queue->vif->queues[i];
1990
1991 xenvif_napi_schedule_or_enable_events(temp);
1992 }
1993 if (net_ratelimit())
1994 netdev_err(queue->vif->dev, "Carrier on again\n");
1995 } else {
1996 /* Queuing were stopped, but the guest posted
1997 * new requests and sent an interrupt
1998 */
1999 clear_bit(QUEUE_STATUS_RX_STALLED,
2000 &queue->status);
2001 del_timer_sync(&queue->rx_stalled);
2002 xenvif_start_queue(queue);
2003 }
2004}
2005
1938int xenvif_kthread_guest_rx(void *data) 2006int xenvif_kthread_guest_rx(void *data)
1939{ 2007{
1940 struct xenvif_queue *queue = data; 2008 struct xenvif_queue *queue = data;
@@ -1944,8 +2012,12 @@ int xenvif_kthread_guest_rx(void *data)
1944 wait_event_interruptible(queue->wq, 2012 wait_event_interruptible(queue->wq,
1945 rx_work_todo(queue) || 2013 rx_work_todo(queue) ||
1946 queue->vif->disabled || 2014 queue->vif->disabled ||
2015 test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
1947 kthread_should_stop()); 2016 kthread_should_stop());
1948 2017
2018 if (kthread_should_stop())
2019 break;
2020
1949 /* This frontend is found to be rogue, disable it in 2021 /* This frontend is found to be rogue, disable it in
1950 * kthread context. Currently this is only set when 2022 * kthread context. Currently this is only set when
1951 * netback finds out frontend sends malformed packet, 2023 * netback finds out frontend sends malformed packet,
@@ -1953,26 +2025,23 @@ int xenvif_kthread_guest_rx(void *data)
1953 * context so we defer it here, if this thread is 2025 * context so we defer it here, if this thread is
1954 * associated with queue 0. 2026 * associated with queue 0.
1955 */ 2027 */
1956 if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) 2028 if (unlikely(queue->vif->disabled && queue->id == 0))
1957 xenvif_carrier_off(queue->vif); 2029 xenvif_carrier_off(queue->vif);
1958 2030 else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
1959 if (kthread_should_stop()) 2031 &queue->status))) {
1960 break; 2032 xenvif_rx_purge_event(queue);
1961 2033 } else if (!netif_carrier_ok(queue->vif->dev)) {
1962 if (queue->rx_queue_purge) { 2034 /* Another queue stalled and turned the carrier off, so
2035 * purge the internal queue of queues which were not
2036 * blocked
2037 */
1963 skb_queue_purge(&queue->rx_queue); 2038 skb_queue_purge(&queue->rx_queue);
1964 queue->rx_queue_purge = false; 2039 queue->rx_last_skb_slots = 0;
1965 } 2040 }
1966 2041
1967 if (!skb_queue_empty(&queue->rx_queue)) 2042 if (!skb_queue_empty(&queue->rx_queue))
1968 xenvif_rx_action(queue); 2043 xenvif_rx_action(queue);
1969 2044
1970 if (skb_queue_empty(&queue->rx_queue) &&
1971 xenvif_queue_stopped(queue)) {
1972 del_timer_sync(&queue->wake_queue);
1973 xenvif_start_queue(queue);
1974 }
1975
1976 cond_resched(); 2045 cond_resched();
1977 } 2046 }
1978 2047