aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c142
1 files changed, 120 insertions, 22 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c65b636bcab9..08f65996534c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1525 /* remove traces of mapped pages and frag_list */ 1525 /* remove traces of mapped pages and frag_list */
1526 skb_frag_list_init(skb); 1526 skb_frag_list_init(skb);
1527 uarg = skb_shinfo(skb)->destructor_arg; 1527 uarg = skb_shinfo(skb)->destructor_arg;
1528 /* increase inflight counter to offset decrement in callback */
1529 atomic_inc(&queue->inflight_packets);
1528 uarg->callback(uarg, true); 1530 uarg->callback(uarg, true);
1529 skb_shinfo(skb)->destructor_arg = NULL; 1531 skb_shinfo(skb)->destructor_arg = NULL;
1530 1532
1531 skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1533 xenvif_skb_zerocopy_prepare(queue, nskb);
1532 kfree_skb(nskb); 1534 kfree_skb(nskb);
1533 1535
1534 return 0; 1536 return 0;
@@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1589 if (net_ratelimit()) 1591 if (net_ratelimit())
1590 netdev_err(queue->vif->dev, 1592 netdev_err(queue->vif->dev,
1591 "Not enough memory to consolidate frag_list!\n"); 1593 "Not enough memory to consolidate frag_list!\n");
1592 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1594 xenvif_skb_zerocopy_prepare(queue, skb);
1593 kfree_skb(skb); 1595 kfree_skb(skb);
1594 continue; 1596 continue;
1595 } 1597 }
@@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1609 "Can't setup checksum in net_tx_action\n"); 1611 "Can't setup checksum in net_tx_action\n");
1610 /* We have to set this flag to trigger the callback */ 1612 /* We have to set this flag to trigger the callback */
1611 if (skb_shinfo(skb)->destructor_arg) 1613 if (skb_shinfo(skb)->destructor_arg)
1612 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1614 xenvif_skb_zerocopy_prepare(queue, skb);
1613 kfree_skb(skb); 1615 kfree_skb(skb);
1614 continue; 1616 continue;
1615 } 1617 }
@@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1641 * skb. E.g. the __pskb_pull_tail earlier can do such thing. 1643 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1642 */ 1644 */
1643 if (skb_shinfo(skb)->destructor_arg) { 1645 if (skb_shinfo(skb)->destructor_arg) {
1644 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1646 xenvif_skb_zerocopy_prepare(queue, skb);
1645 queue->stats.tx_zerocopy_sent++; 1647 queue->stats.tx_zerocopy_sent++;
1646 } 1648 }
1647 1649
@@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1681 queue->stats.tx_zerocopy_success++; 1683 queue->stats.tx_zerocopy_success++;
1682 else 1684 else
1683 queue->stats.tx_zerocopy_fail++; 1685 queue->stats.tx_zerocopy_fail++;
1686 xenvif_skb_zerocopy_complete(queue);
1684} 1687}
1685 1688
1686static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) 1689static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
@@ -1869,8 +1872,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1869static inline int rx_work_todo(struct xenvif_queue *queue) 1872static inline int rx_work_todo(struct xenvif_queue *queue)
1870{ 1873{
1871 return (!skb_queue_empty(&queue->rx_queue) && 1874 return (!skb_queue_empty(&queue->rx_queue) &&
1872 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || 1875 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
1873 queue->rx_queue_purge;
1874} 1876}
1875 1877
1876static inline int tx_work_todo(struct xenvif_queue *queue) 1878static inline int tx_work_todo(struct xenvif_queue *queue)
@@ -1935,6 +1937,75 @@ static void xenvif_start_queue(struct xenvif_queue *queue)
1935 xenvif_wake_queue(queue); 1937 xenvif_wake_queue(queue);
1936} 1938}
1937 1939
1940/* Only called from the queue's thread, it handles the situation when the guest
1941 * doesn't post enough requests on the receiving ring.
1942 * First xenvif_start_xmit disables QDisc and start a timer, and then either the
1943 * timer fires, or the guest send an interrupt after posting new request. If it
1944 * is the timer, the carrier is turned off here.
1945 * */
1946static void xenvif_rx_purge_event(struct xenvif_queue *queue)
1947{
1948 /* Either the last unsuccesful skb or at least 1 slot should fit */
1949 int needed = queue->rx_last_skb_slots ?
1950 queue->rx_last_skb_slots : 1;
1951
1952 /* It is assumed that if the guest post new slots after this, the RX
1953 * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
1954 * the thread again
1955 */
1956 set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
1957 if (!xenvif_rx_ring_slots_available(queue, needed)) {
1958 rtnl_lock();
1959 if (netif_carrier_ok(queue->vif->dev)) {
1960 /* Timer fired and there are still no slots. Turn off
1961 * everything except the interrupts
1962 */
1963 netif_carrier_off(queue->vif->dev);
1964 skb_queue_purge(&queue->rx_queue);
1965 queue->rx_last_skb_slots = 0;
1966 if (net_ratelimit())
1967 netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
1968 } else {
1969 /* Probably an another queue already turned the carrier
1970 * off, make sure nothing is stucked in the internal
1971 * queue of this queue
1972 */
1973 skb_queue_purge(&queue->rx_queue);
1974 queue->rx_last_skb_slots = 0;
1975 }
1976 rtnl_unlock();
1977 } else if (!netif_carrier_ok(queue->vif->dev)) {
1978 unsigned int num_queues = queue->vif->num_queues;
1979 unsigned int i;
1980 /* The carrier was down, but an interrupt kicked
1981 * the thread again after new requests were
1982 * posted
1983 */
1984 clear_bit(QUEUE_STATUS_RX_STALLED,
1985 &queue->status);
1986 rtnl_lock();
1987 netif_carrier_on(queue->vif->dev);
1988 netif_tx_wake_all_queues(queue->vif->dev);
1989 rtnl_unlock();
1990
1991 for (i = 0; i < num_queues; i++) {
1992 struct xenvif_queue *temp = &queue->vif->queues[i];
1993
1994 xenvif_napi_schedule_or_enable_events(temp);
1995 }
1996 if (net_ratelimit())
1997 netdev_err(queue->vif->dev, "Carrier on again\n");
1998 } else {
1999 /* Queuing were stopped, but the guest posted
2000 * new requests and sent an interrupt
2001 */
2002 clear_bit(QUEUE_STATUS_RX_STALLED,
2003 &queue->status);
2004 del_timer_sync(&queue->rx_stalled);
2005 xenvif_start_queue(queue);
2006 }
2007}
2008
1938int xenvif_kthread_guest_rx(void *data) 2009int xenvif_kthread_guest_rx(void *data)
1939{ 2010{
1940 struct xenvif_queue *queue = data; 2011 struct xenvif_queue *queue = data;
@@ -1944,8 +2015,12 @@ int xenvif_kthread_guest_rx(void *data)
1944 wait_event_interruptible(queue->wq, 2015 wait_event_interruptible(queue->wq,
1945 rx_work_todo(queue) || 2016 rx_work_todo(queue) ||
1946 queue->vif->disabled || 2017 queue->vif->disabled ||
2018 test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
1947 kthread_should_stop()); 2019 kthread_should_stop());
1948 2020
2021 if (kthread_should_stop())
2022 break;
2023
1949 /* This frontend is found to be rogue, disable it in 2024 /* This frontend is found to be rogue, disable it in
1950 * kthread context. Currently this is only set when 2025 * kthread context. Currently this is only set when
1951 * netback finds out frontend sends malformed packet, 2026 * netback finds out frontend sends malformed packet,
@@ -1953,26 +2028,29 @@ int xenvif_kthread_guest_rx(void *data)
1953 * context so we defer it here, if this thread is 2028 * context so we defer it here, if this thread is
1954 * associated with queue 0. 2029 * associated with queue 0.
1955 */ 2030 */
1956 if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) 2031 if (unlikely(queue->vif->disabled && queue->id == 0)) {
1957 xenvif_carrier_off(queue->vif); 2032 xenvif_carrier_off(queue->vif);
1958 2033 } else if (unlikely(queue->vif->disabled)) {
1959 if (kthread_should_stop()) 2034 /* kthread_stop() would be called upon this thread soon,
1960 break; 2035 * be a bit proactive
1961 2036 */
1962 if (queue->rx_queue_purge) { 2037 skb_queue_purge(&queue->rx_queue);
2038 queue->rx_last_skb_slots = 0;
2039 } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
2040 &queue->status))) {
2041 xenvif_rx_purge_event(queue);
2042 } else if (!netif_carrier_ok(queue->vif->dev)) {
2043 /* Another queue stalled and turned the carrier off, so
2044 * purge the internal queue of queues which were not
2045 * blocked
2046 */
1963 skb_queue_purge(&queue->rx_queue); 2047 skb_queue_purge(&queue->rx_queue);
1964 queue->rx_queue_purge = false; 2048 queue->rx_last_skb_slots = 0;
1965 } 2049 }
1966 2050
1967 if (!skb_queue_empty(&queue->rx_queue)) 2051 if (!skb_queue_empty(&queue->rx_queue))
1968 xenvif_rx_action(queue); 2052 xenvif_rx_action(queue);
1969 2053
1970 if (skb_queue_empty(&queue->rx_queue) &&
1971 xenvif_queue_stopped(queue)) {
1972 del_timer_sync(&queue->wake_queue);
1973 xenvif_start_queue(queue);
1974 }
1975
1976 cond_resched(); 2054 cond_resched();
1977 } 2055 }
1978 2056
@@ -1983,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data)
1983 return 0; 2061 return 0;
1984} 2062}
1985 2063
2064static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2065{
2066 /* Dealloc thread must remain running until all inflight
2067 * packets complete.
2068 */
2069 return kthread_should_stop() &&
2070 !atomic_read(&queue->inflight_packets);
2071}
2072
1986int xenvif_dealloc_kthread(void *data) 2073int xenvif_dealloc_kthread(void *data)
1987{ 2074{
1988 struct xenvif_queue *queue = data; 2075 struct xenvif_queue *queue = data;
1989 2076
1990 while (!kthread_should_stop()) { 2077 for (;;) {
1991 wait_event_interruptible(queue->dealloc_wq, 2078 wait_event_interruptible(queue->dealloc_wq,
1992 tx_dealloc_work_todo(queue) || 2079 tx_dealloc_work_todo(queue) ||
1993 kthread_should_stop()); 2080 xenvif_dealloc_kthread_should_stop(queue));
1994 if (kthread_should_stop()) 2081 if (xenvif_dealloc_kthread_should_stop(queue))
1995 break; 2082 break;
1996 2083
1997 xenvif_tx_dealloc_action(queue); 2084 xenvif_tx_dealloc_action(queue);
@@ -2027,6 +2114,13 @@ static int __init netback_init(void)
2027 2114
2028 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); 2115 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
2029 2116
2117#ifdef CONFIG_DEBUG_FS
2118 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2119 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2120 pr_warn("Init of debugfs returned %ld!\n",
2121 PTR_ERR(xen_netback_dbg_root));
2122#endif /* CONFIG_DEBUG_FS */
2123
2030 return 0; 2124 return 0;
2031 2125
2032failed_init: 2126failed_init:
@@ -2037,6 +2131,10 @@ module_init(netback_init);
2037 2131
2038static void __exit netback_fini(void) 2132static void __exit netback_fini(void)
2039{ 2133{
2134#ifdef CONFIG_DEBUG_FS
2135 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2136 debugfs_remove_recursive(xen_netback_dbg_root);
2137#endif /* CONFIG_DEBUG_FS */
2040 xenvif_xenbus_fini(); 2138 xenvif_xenbus_fini();
2041} 2139}
2042module_exit(netback_fini); 2140module_exit(netback_fini);