aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-07-02 11:09:14 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-08 14:21:03 -0400
commitf50b407653f64e76d1c9abda61d0d85cde3ca9ca (patch)
tree83856002d96da7aacba68de416ca34e44169b894
parent6e08d5e3c8236e7484229e46fdf92006e1dd4c49 (diff)
xen-netfront: don't nest queue locks in xennet_connect()
The nesting of the per-queue rx_lock and tx_lock in xennet_connect() is confusing to both humans and lockdep. The locking is safe because this is the only place where the locks are nested in this way but lockdep still warns. Instead of adding the missing lockdep annotations, refactor the locking to avoid the confusing nesting. This is still safe, because the xenbus connection state changes are all serialized by the xenwatch thread. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netfront.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..6a37d62de40b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2046,13 +2046,15 @@ static int xennet_connect(struct net_device *dev)
2046 /* By now, the queue structures have been set up */ 2046 /* By now, the queue structures have been set up */
2047 for (j = 0; j < num_queues; ++j) { 2047 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2048 queue = &np->queues[j];
2049 spin_lock_bh(&queue->rx_lock);
2050 spin_lock_irq(&queue->tx_lock);
2051 2049
2052 /* Step 1: Discard all pending TX packet fragments. */ 2050 /* Step 1: Discard all pending TX packet fragments. */
2051 spin_lock_irq(&queue->tx_lock);
2053 xennet_release_tx_bufs(queue); 2052 xennet_release_tx_bufs(queue);
2053 spin_unlock_irq(&queue->tx_lock);
2054 2054
2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2056 spin_lock_bh(&queue->rx_lock);
2057
2056 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2058 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2057 skb_frag_t *frag; 2059 skb_frag_t *frag;
2058 const struct page *page; 2060 const struct page *page;
@@ -2076,6 +2078,8 @@ static int xennet_connect(struct net_device *dev)
2076 } 2078 }
2077 2079
2078 queue->rx.req_prod_pvt = requeue_idx; 2080 queue->rx.req_prod_pvt = requeue_idx;
2081
2082 spin_unlock_bh(&queue->rx_lock);
2079 } 2083 }
2080 2084
2081 /* 2085 /*
@@ -2087,13 +2091,17 @@ static int xennet_connect(struct net_device *dev)
2087 netif_carrier_on(np->netdev); 2091 netif_carrier_on(np->netdev);
2088 for (j = 0; j < num_queues; ++j) { 2092 for (j = 0; j < num_queues; ++j) {
2089 queue = &np->queues[j]; 2093 queue = &np->queues[j];
2094
2090 notify_remote_via_irq(queue->tx_irq); 2095 notify_remote_via_irq(queue->tx_irq);
2091 if (queue->tx_irq != queue->rx_irq) 2096 if (queue->tx_irq != queue->rx_irq)
2092 notify_remote_via_irq(queue->rx_irq); 2097 notify_remote_via_irq(queue->rx_irq);
2093 xennet_tx_buf_gc(queue);
2094 xennet_alloc_rx_buffers(queue);
2095 2098
2099 spin_lock_irq(&queue->tx_lock);
2100 xennet_tx_buf_gc(queue);
2096 spin_unlock_irq(&queue->tx_lock); 2101 spin_unlock_irq(&queue->tx_lock);
2102
2103 spin_lock_bh(&queue->rx_lock);
2104 xennet_alloc_rx_buffers(queue);
2097 spin_unlock_bh(&queue->rx_lock); 2105 spin_unlock_bh(&queue->rx_lock);
2098 } 2106 }
2099 2107