diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
| -rw-r--r-- | drivers/net/xen-netfront.c | 81 |
1 files changed, 14 insertions, 67 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 055222bae6e4..ca82f545ec2c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -628,9 +628,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 628 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + | 628 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + |
| 629 | xennet_count_skb_frag_slots(skb); | 629 | xennet_count_skb_frag_slots(skb); |
| 630 | if (unlikely(slots > MAX_SKB_FRAGS + 1)) { | 630 | if (unlikely(slots > MAX_SKB_FRAGS + 1)) { |
| 631 | net_alert_ratelimited( | 631 | net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", |
| 632 | "xennet: skb rides the rocket: %d slots\n", slots); | 632 | slots, skb->len); |
| 633 | goto drop; | 633 | if (skb_linearize(skb)) |
| 634 | goto drop; | ||
| 634 | } | 635 | } |
| 635 | 636 | ||
| 636 | spin_lock_irqsave(&queue->tx_lock, flags); | 637 | spin_lock_irqsave(&queue->tx_lock, flags); |
| @@ -1196,22 +1197,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) | |||
| 1196 | spin_unlock_bh(&queue->rx_lock); | 1197 | spin_unlock_bh(&queue->rx_lock); |
| 1197 | } | 1198 | } |
| 1198 | 1199 | ||
| 1199 | static void xennet_uninit(struct net_device *dev) | ||
| 1200 | { | ||
| 1201 | struct netfront_info *np = netdev_priv(dev); | ||
| 1202 | unsigned int num_queues = dev->real_num_tx_queues; | ||
| 1203 | struct netfront_queue *queue; | ||
| 1204 | unsigned int i; | ||
| 1205 | |||
| 1206 | for (i = 0; i < num_queues; ++i) { | ||
| 1207 | queue = &np->queues[i]; | ||
| 1208 | xennet_release_tx_bufs(queue); | ||
| 1209 | xennet_release_rx_bufs(queue); | ||
| 1210 | gnttab_free_grant_references(queue->gref_tx_head); | ||
| 1211 | gnttab_free_grant_references(queue->gref_rx_head); | ||
| 1212 | } | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | static netdev_features_t xennet_fix_features(struct net_device *dev, | 1200 | static netdev_features_t xennet_fix_features(struct net_device *dev, |
| 1216 | netdev_features_t features) | 1201 | netdev_features_t features) |
| 1217 | { | 1202 | { |
| @@ -1313,7 +1298,6 @@ static void xennet_poll_controller(struct net_device *dev) | |||
| 1313 | 1298 | ||
| 1314 | static const struct net_device_ops xennet_netdev_ops = { | 1299 | static const struct net_device_ops xennet_netdev_ops = { |
| 1315 | .ndo_open = xennet_open, | 1300 | .ndo_open = xennet_open, |
| 1316 | .ndo_uninit = xennet_uninit, | ||
| 1317 | .ndo_stop = xennet_close, | 1301 | .ndo_stop = xennet_close, |
| 1318 | .ndo_start_xmit = xennet_start_xmit, | 1302 | .ndo_start_xmit = xennet_start_xmit, |
| 1319 | .ndo_change_mtu = xennet_change_mtu, | 1303 | .ndo_change_mtu = xennet_change_mtu, |
| @@ -1455,6 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
| 1455 | 1439 | ||
| 1456 | napi_synchronize(&queue->napi); | 1440 | napi_synchronize(&queue->napi); |
| 1457 | 1441 | ||
| 1442 | xennet_release_tx_bufs(queue); | ||
| 1443 | xennet_release_rx_bufs(queue); | ||
| 1444 | gnttab_free_grant_references(queue->gref_tx_head); | ||
| 1445 | gnttab_free_grant_references(queue->gref_rx_head); | ||
| 1446 | |||
| 1458 | /* End access and free the pages */ | 1447 | /* End access and free the pages */ |
| 1459 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); | 1448 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); |
| 1460 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); | 1449 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); |
| @@ -1827,8 +1816,8 @@ static int xennet_create_queues(struct netfront_info *info, | |||
| 1827 | 1816 | ||
| 1828 | ret = xennet_init_queue(queue); | 1817 | ret = xennet_init_queue(queue); |
| 1829 | if (ret < 0) { | 1818 | if (ret < 0) { |
| 1830 | dev_warn(&info->netdev->dev, "only created %d queues\n", | 1819 | dev_warn(&info->netdev->dev, |
| 1831 | num_queues); | 1820 | "only created %d queues\n", i); |
| 1832 | num_queues = i; | 1821 | num_queues = i; |
| 1833 | break; | 1822 | break; |
| 1834 | } | 1823 | } |
| @@ -2001,7 +1990,7 @@ abort_transaction_no_dev_fatal: | |||
| 2001 | info->queues = NULL; | 1990 | info->queues = NULL; |
| 2002 | rtnl_lock(); | 1991 | rtnl_lock(); |
| 2003 | netif_set_real_num_tx_queues(info->netdev, 0); | 1992 | netif_set_real_num_tx_queues(info->netdev, 0); |
| 2004 | rtnl_lock(); | 1993 | rtnl_unlock(); |
| 2005 | out: | 1994 | out: |
| 2006 | return err; | 1995 | return err; |
| 2007 | } | 1996 | } |
| @@ -2010,10 +1999,7 @@ static int xennet_connect(struct net_device *dev) | |||
| 2010 | { | 1999 | { |
| 2011 | struct netfront_info *np = netdev_priv(dev); | 2000 | struct netfront_info *np = netdev_priv(dev); |
| 2012 | unsigned int num_queues = 0; | 2001 | unsigned int num_queues = 0; |
| 2013 | int i, requeue_idx, err; | 2002 | int err; |
| 2014 | struct sk_buff *skb; | ||
| 2015 | grant_ref_t ref; | ||
| 2016 | struct xen_netif_rx_request *req; | ||
| 2017 | unsigned int feature_rx_copy; | 2003 | unsigned int feature_rx_copy; |
| 2018 | unsigned int j = 0; | 2004 | unsigned int j = 0; |
| 2019 | struct netfront_queue *queue = NULL; | 2005 | struct netfront_queue *queue = NULL; |
| @@ -2040,47 +2026,8 @@ static int xennet_connect(struct net_device *dev) | |||
| 2040 | netdev_update_features(dev); | 2026 | netdev_update_features(dev); |
| 2041 | rtnl_unlock(); | 2027 | rtnl_unlock(); |
| 2042 | 2028 | ||
| 2043 | /* By now, the queue structures have been set up */ | ||
| 2044 | for (j = 0; j < num_queues; ++j) { | ||
| 2045 | queue = &np->queues[j]; | ||
| 2046 | |||
| 2047 | /* Step 1: Discard all pending TX packet fragments. */ | ||
| 2048 | spin_lock_irq(&queue->tx_lock); | ||
| 2049 | xennet_release_tx_bufs(queue); | ||
| 2050 | spin_unlock_irq(&queue->tx_lock); | ||
| 2051 | |||
| 2052 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | ||
| 2053 | spin_lock_bh(&queue->rx_lock); | ||
| 2054 | |||
| 2055 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | ||
| 2056 | skb_frag_t *frag; | ||
| 2057 | const struct page *page; | ||
| 2058 | if (!queue->rx_skbs[i]) | ||
| 2059 | continue; | ||
| 2060 | |||
| 2061 | skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i); | ||
| 2062 | ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i); | ||
| 2063 | req = RING_GET_REQUEST(&queue->rx, requeue_idx); | ||
| 2064 | |||
| 2065 | frag = &skb_shinfo(skb)->frags[0]; | ||
| 2066 | page = skb_frag_page(frag); | ||
| 2067 | gnttab_grant_foreign_access_ref( | ||
| 2068 | ref, queue->info->xbdev->otherend_id, | ||
| 2069 | pfn_to_mfn(page_to_pfn(page)), | ||
| 2070 | 0); | ||
| 2071 | req->gref = ref; | ||
| 2072 | req->id = requeue_idx; | ||
| 2073 | |||
| 2074 | requeue_idx++; | ||
| 2075 | } | ||
| 2076 | |||
| 2077 | queue->rx.req_prod_pvt = requeue_idx; | ||
| 2078 | |||
| 2079 | spin_unlock_bh(&queue->rx_lock); | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | /* | 2029 | /* |
| 2083 | * Step 3: All public and private state should now be sane. Get | 2030 | * All public and private state should now be sane. Get |
| 2084 | * ready to start sending and receiving packets and give the driver | 2031 | * ready to start sending and receiving packets and give the driver |
| 2085 | * domain a kick because we've probably just requeued some | 2032 | * domain a kick because we've probably just requeued some |
| 2086 | * packets. | 2033 | * packets. |
