aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-27 18:42:20 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-29 17:58:37 -0400
commit5efeac44cfca62f66a1b2919fc8ec7f7c726d15b (patch)
treeb4d7d338f3ace293b905014afc70afc3189a89ff
parent080b3c19a4ffe4677d7449880f4d0cea07182474 (diff)
netpoll: Respect NETIF_F_LLTX
Stop taking the transmit lock when a network device has specified NETIF_F_LLTX. If no locks needed to trasnmit a packet this is the ideal scenario for netpoll as all packets can be trasnmitted immediately. Even if some locks are needed in ndo_start_xmit skipping any unnecessary serialization is desirable for netpoll as it makes it more likely a debugging packet may be trasnmitted immediately instead of being deferred until later. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--net/core/netpoll.c10
2 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0d8c8718980a..4cd5e9e13c87 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2909,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
2909 } \ 2909 } \
2910} 2910}
2911 2911
2912#define HARD_TX_TRYLOCK(dev, txq) \
2913 (((dev->features & NETIF_F_LLTX) == 0) ? \
2914 __netif_tx_trylock(txq) : \
2915 true )
2916
2912#define HARD_TX_UNLOCK(dev, txq) { \ 2917#define HARD_TX_UNLOCK(dev, txq) { \
2913 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2918 if ((dev->features & NETIF_F_LLTX) == 0) { \
2914 __netif_tx_unlock(txq); \ 2919 __netif_tx_unlock(txq); \
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d44af2306f23..ed7740f7a94d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work)
119 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 119 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
120 120
121 local_irq_save(flags); 121 local_irq_save(flags);
122 __netif_tx_lock(txq, smp_processor_id()); 122 HARD_TX_LOCK(dev, txq, smp_processor_id());
123 if (netif_xmit_frozen_or_stopped(txq) || 123 if (netif_xmit_frozen_or_stopped(txq) ||
124 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 124 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
125 skb_queue_head(&npinfo->txq, skb); 125 skb_queue_head(&npinfo->txq, skb);
126 __netif_tx_unlock(txq); 126 HARD_TX_UNLOCK(dev, txq);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128 128
129 schedule_delayed_work(&npinfo->tx_work, HZ/10); 129 schedule_delayed_work(&npinfo->tx_work, HZ/10);
130 return; 130 return;
131 } 131 }
132 __netif_tx_unlock(txq); 132 HARD_TX_UNLOCK(dev, txq);
133 local_irq_restore(flags); 133 local_irq_restore(flags);
134 } 134 }
135} 135}
@@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
345 /* try until next clock tick */ 345 /* try until next clock tick */
346 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 346 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
347 tries > 0; --tries) { 347 tries > 0; --tries) {
348 if (__netif_tx_trylock(txq)) { 348 if (HARD_TX_TRYLOCK(dev, txq)) {
349 if (!netif_xmit_stopped(txq)) 349 if (!netif_xmit_stopped(txq))
350 status = netpoll_start_xmit(skb, dev, txq); 350 status = netpoll_start_xmit(skb, dev, txq);
351 351
352 __netif_tx_unlock(txq); 352 HARD_TX_UNLOCK(dev, txq);
353 353
354 if (status == NETDEV_TX_OK) 354 if (status == NETDEV_TX_OK)
355 break; 355 break;