diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
| -rw-r--r-- | drivers/net/xen-netfront.c | 27 | 
1 files changed, 16 insertions, 11 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2ccb4a02368b..055222bae6e4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c  | |||
| @@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
| 1439 | unsigned int i = 0; | 1439 | unsigned int i = 0; | 
| 1440 | unsigned int num_queues = info->netdev->real_num_tx_queues; | 1440 | unsigned int num_queues = info->netdev->real_num_tx_queues; | 
| 1441 | 1441 | ||
| 1442 | netif_carrier_off(info->netdev); | ||
| 1443 | |||
| 1442 | for (i = 0; i < num_queues; ++i) { | 1444 | for (i = 0; i < num_queues; ++i) { | 
| 1443 | struct netfront_queue *queue = &info->queues[i]; | 1445 | struct netfront_queue *queue = &info->queues[i]; | 
| 1444 | 1446 | ||
| 1445 | /* Stop old i/f to prevent errors whilst we rebuild the state. */ | ||
| 1446 | spin_lock_bh(&queue->rx_lock); | ||
| 1447 | spin_lock_irq(&queue->tx_lock); | ||
| 1448 | netif_carrier_off(queue->info->netdev); | ||
| 1449 | spin_unlock_irq(&queue->tx_lock); | ||
| 1450 | spin_unlock_bh(&queue->rx_lock); | ||
| 1451 | |||
| 1452 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) | 1447 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) | 
| 1453 | unbind_from_irqhandler(queue->tx_irq, queue); | 1448 | unbind_from_irqhandler(queue->tx_irq, queue); | 
| 1454 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { | 1449 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { | 
| @@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
| 1458 | queue->tx_evtchn = queue->rx_evtchn = 0; | 1453 | queue->tx_evtchn = queue->rx_evtchn = 0; | 
| 1459 | queue->tx_irq = queue->rx_irq = 0; | 1454 | queue->tx_irq = queue->rx_irq = 0; | 
| 1460 | 1455 | ||
| 1456 | napi_synchronize(&queue->napi); | ||
| 1457 | |||
| 1461 | /* End access and free the pages */ | 1458 | /* End access and free the pages */ | 
| 1462 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); | 1459 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); | 
| 1463 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); | 1460 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); | 
| @@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev) | |||
| 2046 | /* By now, the queue structures have been set up */ | 2043 | /* By now, the queue structures have been set up */ | 
| 2047 | for (j = 0; j < num_queues; ++j) { | 2044 | for (j = 0; j < num_queues; ++j) { | 
| 2048 | queue = &np->queues[j]; | 2045 | queue = &np->queues[j]; | 
| 2049 | spin_lock_bh(&queue->rx_lock); | ||
| 2050 | spin_lock_irq(&queue->tx_lock); | ||
| 2051 | 2046 | ||
| 2052 | /* Step 1: Discard all pending TX packet fragments. */ | 2047 | /* Step 1: Discard all pending TX packet fragments. */ | 
| 2048 | spin_lock_irq(&queue->tx_lock); | ||
| 2053 | xennet_release_tx_bufs(queue); | 2049 | xennet_release_tx_bufs(queue); | 
| 2050 | spin_unlock_irq(&queue->tx_lock); | ||
| 2054 | 2051 | ||
| 2055 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | 2052 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | 
| 2053 | spin_lock_bh(&queue->rx_lock); | ||
| 2054 | |||
| 2056 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | 2055 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | 
| 2057 | skb_frag_t *frag; | 2056 | skb_frag_t *frag; | 
| 2058 | const struct page *page; | 2057 | const struct page *page; | 
| @@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev) | |||
| 2076 | } | 2075 | } | 
| 2077 | 2076 | ||
| 2078 | queue->rx.req_prod_pvt = requeue_idx; | 2077 | queue->rx.req_prod_pvt = requeue_idx; | 
| 2078 | |||
| 2079 | spin_unlock_bh(&queue->rx_lock); | ||
| 2079 | } | 2080 | } | 
| 2080 | 2081 | ||
| 2081 | /* | 2082 | /* | 
| @@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev) | |||
| 2087 | netif_carrier_on(np->netdev); | 2088 | netif_carrier_on(np->netdev); | 
| 2088 | for (j = 0; j < num_queues; ++j) { | 2089 | for (j = 0; j < num_queues; ++j) { | 
| 2089 | queue = &np->queues[j]; | 2090 | queue = &np->queues[j]; | 
| 2091 | |||
| 2090 | notify_remote_via_irq(queue->tx_irq); | 2092 | notify_remote_via_irq(queue->tx_irq); | 
| 2091 | if (queue->tx_irq != queue->rx_irq) | 2093 | if (queue->tx_irq != queue->rx_irq) | 
| 2092 | notify_remote_via_irq(queue->rx_irq); | 2094 | notify_remote_via_irq(queue->rx_irq); | 
| 2093 | xennet_tx_buf_gc(queue); | ||
| 2094 | xennet_alloc_rx_buffers(queue); | ||
| 2095 | 2095 | ||
| 2096 | spin_lock_irq(&queue->tx_lock); | ||
| 2097 | xennet_tx_buf_gc(queue); | ||
| 2096 | spin_unlock_irq(&queue->tx_lock); | 2098 | spin_unlock_irq(&queue->tx_lock); | 
| 2099 | |||
| 2100 | spin_lock_bh(&queue->rx_lock); | ||
| 2101 | xennet_alloc_rx_buffers(queue); | ||
| 2097 | spin_unlock_bh(&queue->rx_lock); | 2102 | spin_unlock_bh(&queue->rx_lock); | 
| 2098 | } | 2103 | } | 
| 2099 | 2104 | ||
