aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 12:38:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 12:38:14 -0400
commitae045e2455429c418a418a3376301a9e5753a0a8 (patch)
treeb445bdeecd3f38aa0d0a29c9585cee49e4ccb0f1 /drivers/net/xen-netback/interface.c
parentf4f142ed4ef835709c7e6d12eaca10d190bcebed (diff)
parentd247b6ab3ce6dd43665780865ec5fa145d9ab6bd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Steady transitioning of the BPF instructure to a generic spot so all kernel subsystems can make use of it, from Alexei Starovoitov. 2) SFC driver supports busy polling, from Alexandre Rames. 3) Take advantage of hash table in UDP multicast delivery, from David Held. 4) Lighten locking, in particular by getting rid of the LRU lists, in inet frag handling. From Florian Westphal. 5) Add support for various RFC6458 control messages in SCTP, from Geir Ola Vaagland. 6) Allow to filter bridge forwarding database dumps by device, from Jamal Hadi Salim. 7) virtio-net also now supports busy polling, from Jason Wang. 8) Some low level optimization tweaks in pktgen from Jesper Dangaard Brouer. 9) Add support for ipv6 address generation modes, so that userland can have some input into the process. From Jiri Pirko. 10) Consolidate common TCP connection request code in ipv4 and ipv6, from Octavian Purdila. 11) New ARP packet logger in netfilter, from Pablo Neira Ayuso. 12) Generic resizable RCU hash table, with intial users in netlink and nftables. From Thomas Graf. 13) Maintain a name assignment type so that userspace can see where a network device name came from (enumerated by kernel, assigned explicitly by userspace, etc.) From Tom Gundersen. 14) Automatic flow label generation on transmit in ipv6, from Tom Herbert. 15) New packet timestamping facilities from Willem de Bruijn, meant to assist in measuring latencies going into/out-of the packet scheduler, latency from TCP data transmission to ACK, etc" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1536 commits) cxgb4 : Disable recursive mailbox commands when enabling vi net: reduce USB network driver config options. tg3: Modify tg3_tso_bug() to handle multiple TX rings amd-xgbe: Perform phy connect/disconnect at dev open/stop amd-xgbe: Use dma_set_mask_and_coherent to set DMA mask net: sun4i-emac: fix memory leak on bad packet sctp: fix possible seqlock seadlock in sctp_packet_transmit() Revert "net: phy: Set the driver when registering an MDIO bus device" cxgb4vf: Turn off SGE RX/TX Callback Timers and interrupts in PCI shutdown routine team: Simplify return path of team_newlink bridge: Update outdated comment on promiscuous mode net-timestamp: ACK timestamp for bytestreams net-timestamp: TCP timestamping net-timestamp: SCHED timestamp on entering packet scheduler net-timestamp: add key to disambiguate concurrent datagrams net-timestamp: move timestamp flags out of sk_flags net-timestamp: extend SCM_TIMESTAMPING ancillary data struct cxgb4i : Move stray CPL definitions to cxgb4 driver tcp: reduce spurious retransmits due to transient SACK reneging qlcnic: Initialize dcbnl_ops before register_netdev ...
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c74
1 files changed, 42 insertions, 32 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9e97c7ca0ddd..48a55cda979b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
55 55
56int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
57{ 57{
58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) &&
59 test_bit(VIF_STATUS_CONNECTED, &vif->status);
59} 60}
60 61
61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 62static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -77,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget)
77 /* This vif is rogue, we pretend we've there is nothing to do 78 /* This vif is rogue, we pretend we've there is nothing to do
78 * for this vif to deschedule it from NAPI. But this interface 79 * for this vif to deschedule it from NAPI. But this interface
79 * will be turned off in thread context later. 80 * will be turned off in thread context later.
81 * Also, if a guest doesn't post enough slots to receive data on one of
82 * its queues, the carrier goes down and NAPI is descheduled here so
83 * the guest can't send more packets until it's ready to receive.
80 */ 84 */
81 if (unlikely(queue->vif->disabled)) { 85 if (unlikely(queue->vif->disabled ||
86 !netif_carrier_ok(queue->vif->dev))) {
82 napi_complete(napi); 87 napi_complete(napi);
83 return 0; 88 return 0;
84 } 89 }
@@ -96,13 +101,22 @@ int xenvif_poll(struct napi_struct *napi, int budget)
96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
97{ 102{
98 struct xenvif_queue *queue = dev_id; 103 struct xenvif_queue *queue = dev_id;
104 struct netdev_queue *net_queue =
105 netdev_get_tx_queue(queue->vif->dev, queue->id);
99 106
107 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
108 * the carrier went down and this queue was previously blocked
109 */
110 if (unlikely(netif_tx_queue_stopped(net_queue) ||
111 (!netif_carrier_ok(queue->vif->dev) &&
112 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
113 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
100 xenvif_kick_thread(queue); 114 xenvif_kick_thread(queue);
101 115
102 return IRQ_HANDLED; 116 return IRQ_HANDLED;
103} 117}
104 118
105static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 119irqreturn_t xenvif_interrupt(int irq, void *dev_id)
106{ 120{
107 xenvif_tx_interrupt(irq, dev_id); 121 xenvif_tx_interrupt(irq, dev_id);
108 xenvif_rx_interrupt(irq, dev_id); 122 xenvif_rx_interrupt(irq, dev_id);
@@ -124,16 +138,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 138 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125} 139}
126 140
127/* Callback to wake the queue and drain it on timeout */ 141/* Callback to wake the queue's thread and turn the carrier off on timeout */
128static void xenvif_wake_queue_callback(unsigned long data) 142static void xenvif_rx_stalled(unsigned long data)
129{ 143{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data; 144 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131 145
132 if (xenvif_queue_stopped(queue)) { 146 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n"); 147 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue); 148 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
137 } 149 }
138} 150}
139 151
@@ -182,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
182 * drain. 194 * drain.
183 */ 195 */
184 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { 196 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
185 queue->wake_queue.function = xenvif_wake_queue_callback; 197 queue->rx_stalled.function = xenvif_rx_stalled;
186 queue->wake_queue.data = (unsigned long)queue; 198 queue->rx_stalled.data = (unsigned long)queue;
187 xenvif_stop_queue(queue); 199 xenvif_stop_queue(queue);
188 mod_timer(&queue->wake_queue, 200 mod_timer(&queue->rx_stalled,
189 jiffies + rx_drain_timeout_jiffies); 201 jiffies + rx_drain_timeout_jiffies);
190 } 202 }
191 203
192 skb_queue_tail(&queue->rx_queue, skb); 204 skb_queue_tail(&queue->rx_queue, skb);
@@ -267,7 +279,7 @@ static void xenvif_down(struct xenvif *vif)
267static int xenvif_open(struct net_device *dev) 279static int xenvif_open(struct net_device *dev)
268{ 280{
269 struct xenvif *vif = netdev_priv(dev); 281 struct xenvif *vif = netdev_priv(dev);
270 if (netif_carrier_ok(dev)) 282 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
271 xenvif_up(vif); 283 xenvif_up(vif);
272 netif_tx_start_all_queues(dev); 284 netif_tx_start_all_queues(dev);
273 return 0; 285 return 0;
@@ -276,7 +288,7 @@ static int xenvif_open(struct net_device *dev)
276static int xenvif_close(struct net_device *dev) 288static int xenvif_close(struct net_device *dev)
277{ 289{
278 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
279 if (netif_carrier_ok(dev)) 291 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
280 xenvif_down(vif); 292 xenvif_down(vif);
281 netif_tx_stop_all_queues(dev); 293 netif_tx_stop_all_queues(dev);
282 return 0; 294 return 0;
@@ -418,8 +430,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
418 * When the guest selects the desired number, it will be updated 430 * When the guest selects the desired number, it will be updated
419 * via netif_set_real_num_*_queues(). 431 * via netif_set_real_num_*_queues().
420 */ 432 */
421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 433 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
422 xenvif_max_queues); 434 ether_setup, xenvif_max_queues);
423 if (dev == NULL) { 435 if (dev == NULL) {
424 pr_warn("Could not allocate netdev for %s\n", name); 436 pr_warn("Could not allocate netdev for %s\n", name);
425 return ERR_PTR(-ENOMEM); 437 return ERR_PTR(-ENOMEM);
@@ -514,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
514 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 526 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
515 } 527 }
516 528
517 init_timer(&queue->wake_queue); 529 init_timer(&queue->rx_stalled);
518 530
519 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, 531 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
520 XENVIF_NAPI_WEIGHT); 532 XENVIF_NAPI_WEIGHT);
@@ -528,6 +540,7 @@ void xenvif_carrier_on(struct xenvif *vif)
528 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 540 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
529 dev_set_mtu(vif->dev, ETH_DATA_LEN); 541 dev_set_mtu(vif->dev, ETH_DATA_LEN);
530 netdev_update_features(vif->dev); 542 netdev_update_features(vif->dev);
543 set_bit(VIF_STATUS_CONNECTED, &vif->status);
531 netif_carrier_on(vif->dev); 544 netif_carrier_on(vif->dev);
532 if (netif_running(vif->dev)) 545 if (netif_running(vif->dev))
533 xenvif_up(vif); 546 xenvif_up(vif);
@@ -625,9 +638,11 @@ void xenvif_carrier_off(struct xenvif *vif)
625 struct net_device *dev = vif->dev; 638 struct net_device *dev = vif->dev;
626 639
627 rtnl_lock(); 640 rtnl_lock();
628 netif_carrier_off(dev); /* discard queued packets */ 641 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
629 if (netif_running(dev)) 642 netif_carrier_off(dev); /* discard queued packets */
630 xenvif_down(vif); 643 if (netif_running(dev))
644 xenvif_down(vif);
645 }
631 rtnl_unlock(); 646 rtnl_unlock();
632} 647}
633 648
@@ -656,14 +671,13 @@ void xenvif_disconnect(struct xenvif *vif)
656 unsigned int num_queues = vif->num_queues; 671 unsigned int num_queues = vif->num_queues;
657 unsigned int queue_index; 672 unsigned int queue_index;
658 673
659 if (netif_carrier_ok(vif->dev)) 674 xenvif_carrier_off(vif);
660 xenvif_carrier_off(vif);
661 675
662 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 676 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
663 queue = &vif->queues[queue_index]; 677 queue = &vif->queues[queue_index];
664 678
665 if (queue->task) { 679 if (queue->task) {
666 del_timer_sync(&queue->wake_queue); 680 del_timer_sync(&queue->rx_stalled);
667 kthread_stop(queue->task); 681 kthread_stop(queue->task);
668 queue->task = NULL; 682 queue->task = NULL;
669 } 683 }
@@ -705,16 +719,12 @@ void xenvif_free(struct xenvif *vif)
705 /* Here we want to avoid timeout messages if an skb can be legitimately 719 /* Here we want to avoid timeout messages if an skb can be legitimately
706 * stuck somewhere else. Realistically this could be an another vif's 720 * stuck somewhere else. Realistically this could be an another vif's
707 * internal or QDisc queue. That another vif also has this 721 * internal or QDisc queue. That another vif also has this
708 * rx_drain_timeout_msecs timeout, but the timer only ditches the 722 * rx_drain_timeout_msecs timeout, so give it time to drain out.
709 * internal queue. After that, the QDisc queue can put in worst case 723 * Although if that other guest wakes up just before its timeout happens
710 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's 724 * and takes only one skb from QDisc, it can hold onto other skbs for a
711 * internal queue, so we need several rounds of such timeouts until we 725 * longer period.
712 * can be sure that no another vif should have skb's from us. We are
713 * not sending more skb's, so newly stuck packets are not interesting
714 * for us here.
715 */ 726 */
716 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 727 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
717 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
718 728
719 unregister_netdev(vif->dev); 729 unregister_netdev(vif->dev);
720 730