diff options
author | Daniel Borkmann <dborkman@redhat.com> | 2014-03-27 11:38:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-28 16:49:48 -0400 |
commit | 43279500decad66ccdddacae7948a1d23be1bef6 (patch) | |
tree | 5832fb327c95c8bb04a0aca9fcba7690e17c4dea /net/packet | |
parent | b7d47ca2fdfd3b613b800c923553a8437dd8da40 (diff) |
packet: respect devices with LLTX flag in direct xmit
Quite often it can be useful to test with dummy or similar
devices as a blackhole sink for skbs. Such devices are only
equipped with a single txq, but marked as NETIF_F_LLTX as
they do not require locking their internal queues on xmit
(or implement locking themselves). Therefore, rather use
HARD_TX_{UN,}LOCK API, so that NETIF_F_LLTX will be respected.
trafgen mmap/TX_RING example against dummy device with config
foo: { fill(0xff, 64) } results in the following performance
improvements for such scenarios on an ordinary Core i7/2.80GHz:
Before:
Performance counter stats for 'trafgen -i foo -o du0 -n100000000' (10 runs):
160,975,944,159 instructions:k # 0.55 insns per cycle ( +- 0.09% )
293,319,390,278 cycles:k # 0.000 GHz ( +- 0.35% )
192,501,104 branch-misses:k ( +- 1.63% )
831 context-switches:k ( +- 9.18% )
7 cpu-migrations:k ( +- 7.40% )
69,382 cache-misses:k # 0.010 % of all cache refs ( +- 2.18% )
671,552,021 cache-references:k ( +- 1.29% )
22.856401569 seconds time elapsed ( +- 0.33% )
After:
Performance counter stats for 'trafgen -i foo -o du0 -n100000000' (10 runs):
133,788,739,692 instructions:k # 0.92 insns per cycle ( +- 0.06% )
145,853,213,256 cycles:k # 0.000 GHz ( +- 0.17% )
59,867,100 branch-misses:k ( +- 4.72% )
384 context-switches:k ( +- 3.76% )
6 cpu-migrations:k ( +- 6.28% )
70,304 cache-misses:k # 0.077 % of all cache refs ( +- 1.73% )
90,879,408 cache-references:k ( +- 1.35% )
11.719372413 seconds time elapsed ( +- 0.24% )
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r-- | net/packet/af_packet.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 097a354ec8cd..01039d2b1695 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -243,40 +243,40 @@ static int packet_direct_xmit(struct sk_buff *skb) | |||
243 | const struct net_device_ops *ops = dev->netdev_ops; | 243 | const struct net_device_ops *ops = dev->netdev_ops; |
244 | netdev_features_t features; | 244 | netdev_features_t features; |
245 | struct netdev_queue *txq; | 245 | struct netdev_queue *txq; |
246 | int ret = NETDEV_TX_BUSY; | ||
246 | u16 queue_map; | 247 | u16 queue_map; |
247 | int ret; | ||
248 | 248 | ||
249 | if (unlikely(!netif_running(dev) || | 249 | if (unlikely(!netif_running(dev) || |
250 | !netif_carrier_ok(dev))) { | 250 | !netif_carrier_ok(dev))) |
251 | kfree_skb(skb); | 251 | goto drop; |
252 | return NET_XMIT_DROP; | ||
253 | } | ||
254 | 252 | ||
255 | features = netif_skb_features(skb); | 253 | features = netif_skb_features(skb); |
256 | if (skb_needs_linearize(skb, features) && | 254 | if (skb_needs_linearize(skb, features) && |
257 | __skb_linearize(skb)) { | 255 | __skb_linearize(skb)) |
258 | kfree_skb(skb); | 256 | goto drop; |
259 | return NET_XMIT_DROP; | ||
260 | } | ||
261 | 257 | ||
262 | queue_map = skb_get_queue_mapping(skb); | 258 | queue_map = skb_get_queue_mapping(skb); |
263 | txq = netdev_get_tx_queue(dev, queue_map); | 259 | txq = netdev_get_tx_queue(dev, queue_map); |
264 | 260 | ||
265 | __netif_tx_lock_bh(txq); | 261 | local_bh_disable(); |
266 | if (unlikely(netif_xmit_frozen_or_stopped(txq))) { | 262 | |
267 | ret = NETDEV_TX_BUSY; | 263 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
268 | kfree_skb(skb); | 264 | if (!netif_xmit_frozen_or_stopped(txq)) { |
269 | goto out; | 265 | ret = ops->ndo_start_xmit(skb, dev); |
266 | if (ret == NETDEV_TX_OK) | ||
267 | txq_trans_update(txq); | ||
270 | } | 268 | } |
269 | HARD_TX_UNLOCK(dev, txq); | ||
271 | 270 | ||
272 | ret = ops->ndo_start_xmit(skb, dev); | 271 | local_bh_enable(); |
273 | if (likely(dev_xmit_complete(ret))) | 272 | |
274 | txq_trans_update(txq); | 273 | if (!dev_xmit_complete(ret)) |
275 | else | ||
276 | kfree_skb(skb); | 274 | kfree_skb(skb); |
277 | out: | 275 | |
278 | __netif_tx_unlock_bh(txq); | ||
279 | return ret; | 276 | return ret; |
277 | drop: | ||
278 | kfree_skb(skb); | ||
279 | return NET_XMIT_DROP; | ||
280 | } | 280 | } |
281 | 281 | ||
282 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) | 282 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) |