aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c60
1 files changed, 36 insertions, 24 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4dd0668003e7..5b97cc946d70 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,7 +87,7 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 90static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91 91
92struct netfront_stats { 92struct netfront_stats {
93 u64 packets; 93 u64 packets;
@@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t)
239static int netfront_tx_slot_available(struct netfront_queue *queue) 239static int netfront_tx_slot_available(struct netfront_queue *queue)
240{ 240{
241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
242 (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); 242 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
243} 243}
244 244
245static void xennet_maybe_wake_tx(struct netfront_queue *queue) 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
545} 545}
546 546
547static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 547static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
548 void *accel_priv, select_queue_fallback_t fallback) 548 struct net_device *sb_dev,
549 select_queue_fallback_t fallback)
549{ 550{
550 unsigned int num_queues = dev->real_num_tx_queues; 551 unsigned int num_queues = dev->real_num_tx_queues;
551 u32 hash; 552 u32 hash;
@@ -564,7 +565,7 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
564 565
565#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 566#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
566 567
567static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 568static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
568{ 569{
569 struct netfront_info *np = netdev_priv(dev); 570 struct netfront_info *np = netdev_priv(dev);
570 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 571 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
@@ -790,7 +791,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
790 RING_IDX cons = queue->rx.rsp_cons; 791 RING_IDX cons = queue->rx.rsp_cons;
791 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 792 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
792 grant_ref_t ref = xennet_get_rx_ref(queue, cons); 793 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
793 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 794 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
794 int slots = 1; 795 int slots = 1;
795 int err = 0; 796 int err = 0;
796 unsigned long ret; 797 unsigned long ret;
@@ -893,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
893 struct sk_buff *skb, 894 struct sk_buff *skb,
894 struct sk_buff_head *list) 895 struct sk_buff_head *list)
895{ 896{
896 struct skb_shared_info *shinfo = skb_shinfo(skb);
897 RING_IDX cons = queue->rx.rsp_cons; 897 RING_IDX cons = queue->rx.rsp_cons;
898 struct sk_buff *nskb; 898 struct sk_buff *nskb;
899 899
@@ -902,15 +902,20 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
902 RING_GET_RESPONSE(&queue->rx, ++cons); 902 RING_GET_RESPONSE(&queue->rx, ++cons);
903 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 903 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
904 904
905 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 905 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
906 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 906 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
907 907
908 BUG_ON(pull_to <= skb_headlen(skb)); 908 BUG_ON(pull_to < skb_headlen(skb));
909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
910 } 910 }
911 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 911 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
912 queue->rx.rsp_cons = ++cons;
913 kfree_skb(nskb);
914 return ~0U;
915 }
912 916
913 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 917 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
918 skb_frag_page(nfrag),
914 rx->offset, rx->status, PAGE_SIZE); 919 rx->offset, rx->status, PAGE_SIZE);
915 920
916 skb_shinfo(nskb)->nr_frags = 0; 921 skb_shinfo(nskb)->nr_frags = 0;
@@ -1044,6 +1049,8 @@ err:
1044 skb->len += rx->status; 1049 skb->len += rx->status;
1045 1050
1046 i = xennet_fill_frags(queue, skb, &tmpq); 1051 i = xennet_fill_frags(queue, skb, &tmpq);
1052 if (unlikely(i == ~0U))
1053 goto err;
1047 1054
1048 if (rx->flags & XEN_NETRXF_csum_blank) 1055 if (rx->flags & XEN_NETRXF_csum_blank)
1049 skb->ip_summed = CHECKSUM_PARTIAL; 1056 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1330,6 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1330 netif_carrier_off(netdev); 1337 netif_carrier_off(netdev);
1331 1338
1332 xenbus_switch_state(dev, XenbusStateInitialising); 1339 xenbus_switch_state(dev, XenbusStateInitialising);
1340 wait_event(module_wq,
1341 xenbus_read_driver_state(dev->otherend) !=
1342 XenbusStateClosed &&
1343 xenbus_read_driver_state(dev->otherend) !=
1344 XenbusStateUnknown);
1333 return netdev; 1345 return netdev;
1334 1346
1335 exit: 1347 exit:
@@ -1597,14 +1609,16 @@ static int xennet_init_queue(struct netfront_queue *queue)
1597{ 1609{
1598 unsigned short i; 1610 unsigned short i;
1599 int err = 0; 1611 int err = 0;
1612 char *devid;
1600 1613
1601 spin_lock_init(&queue->tx_lock); 1614 spin_lock_init(&queue->tx_lock);
1602 spin_lock_init(&queue->rx_lock); 1615 spin_lock_init(&queue->rx_lock);
1603 1616
1604 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 1617 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1605 1618
1606 snprintf(queue->name, sizeof(queue->name), "%s-q%u", 1619 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1607 queue->info->netdev->name, queue->id); 1620 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1621 devid, queue->id);
1608 1622
1609 /* Initialise tx_skbs as a free chain containing every entry. */ 1623 /* Initialise tx_skbs as a free chain containing every entry. */
1610 queue->tx_skb_freelist = 0; 1624 queue->tx_skb_freelist = 0;
@@ -1810,7 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1810 err = xen_net_read_mac(dev, info->netdev->dev_addr); 1824 err = xen_net_read_mac(dev, info->netdev->dev_addr);
1811 if (err) { 1825 if (err) {
1812 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1826 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1813 goto out; 1827 goto out_unlocked;
1814 } 1828 }
1815 1829
1816 rtnl_lock(); 1830 rtnl_lock();
@@ -1925,6 +1939,7 @@ abort_transaction_no_dev_fatal:
1925 xennet_destroy_queues(info); 1939 xennet_destroy_queues(info);
1926 out: 1940 out:
1927 rtnl_unlock(); 1941 rtnl_unlock();
1942out_unlocked:
1928 device_unregister(&dev->dev); 1943 device_unregister(&dev->dev);
1929 return err; 1944 return err;
1930} 1945}
@@ -1950,10 +1965,6 @@ static int xennet_connect(struct net_device *dev)
1950 /* talk_to_netback() sets the correct number of queues */ 1965 /* talk_to_netback() sets the correct number of queues */
1951 num_queues = dev->real_num_tx_queues; 1966 num_queues = dev->real_num_tx_queues;
1952 1967
1953 rtnl_lock();
1954 netdev_update_features(dev);
1955 rtnl_unlock();
1956
1957 if (dev->reg_state == NETREG_UNINITIALIZED) { 1968 if (dev->reg_state == NETREG_UNINITIALIZED) {
1958 err = register_netdev(dev); 1969 err = register_netdev(dev);
1959 if (err) { 1970 if (err) {
@@ -1963,6 +1974,10 @@ static int xennet_connect(struct net_device *dev)
1963 } 1974 }
1964 } 1975 }
1965 1976
1977 rtnl_lock();
1978 netdev_update_features(dev);
1979 rtnl_unlock();
1980
1966 /* 1981 /*
1967 * All public and private state should now be sane. Get 1982 * All public and private state should now be sane. Get
1968 * ready to start sending and receiving packets and give the driver 1983 * ready to start sending and receiving packets and give the driver
@@ -2000,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
2000 2015
2001 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2016 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2002 2017
2018 wake_up_all(&module_wq);
2019
2003 switch (backend_state) { 2020 switch (backend_state) {
2004 case XenbusStateInitialising: 2021 case XenbusStateInitialising:
2005 case XenbusStateInitialised: 2022 case XenbusStateInitialised:
2006 case XenbusStateReconfiguring: 2023 case XenbusStateReconfiguring:
2007 case XenbusStateReconfigured: 2024 case XenbusStateReconfigured:
2008 break;
2009
2010 case XenbusStateUnknown: 2025 case XenbusStateUnknown:
2011 wake_up_all(&module_unload_q);
2012 break; 2026 break;
2013 2027
2014 case XenbusStateInitWait: 2028 case XenbusStateInitWait:
@@ -2024,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
2024 break; 2038 break;
2025 2039
2026 case XenbusStateClosed: 2040 case XenbusStateClosed:
2027 wake_up_all(&module_unload_q);
2028 if (dev->state == XenbusStateClosed) 2041 if (dev->state == XenbusStateClosed)
2029 break; 2042 break;
2030 /* Missed the backend's CLOSING state -- fallthrough */ 2043 /* Missed the backend's CLOSING state -- fallthrough */
2031 case XenbusStateClosing: 2044 case XenbusStateClosing:
2032 wake_up_all(&module_unload_q);
2033 xenbus_frontend_closed(dev); 2045 xenbus_frontend_closed(dev);
2034 break; 2046 break;
2035 } 2047 }
@@ -2137,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
2137 2149
2138 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2150 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2139 xenbus_switch_state(dev, XenbusStateClosing); 2151 xenbus_switch_state(dev, XenbusStateClosing);
2140 wait_event(module_unload_q, 2152 wait_event(module_wq,
2141 xenbus_read_driver_state(dev->otherend) == 2153 xenbus_read_driver_state(dev->otherend) ==
2142 XenbusStateClosing || 2154 XenbusStateClosing ||
2143 xenbus_read_driver_state(dev->otherend) == 2155 xenbus_read_driver_state(dev->otherend) ==
2144 XenbusStateUnknown); 2156 XenbusStateUnknown);
2145 2157
2146 xenbus_switch_state(dev, XenbusStateClosed); 2158 xenbus_switch_state(dev, XenbusStateClosed);
2147 wait_event(module_unload_q, 2159 wait_event(module_wq,
2148 xenbus_read_driver_state(dev->otherend) == 2160 xenbus_read_driver_state(dev->otherend) ==
2149 XenbusStateClosed || 2161 XenbusStateClosed ||
2150 xenbus_read_driver_state(dev->otherend) == 2162 xenbus_read_driver_state(dev->otherend) ==