diff options
Diffstat (limited to 'net/packet/af_packet.c')
-rw-r--r-- | net/packet/af_packet.c | 42 |
1 files changed, 5 insertions, 37 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 01f3515cada0..611a26d5235c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -209,7 +209,7 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *, | |||
209 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | 209 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, |
210 | struct tpacket3_hdr *); | 210 | struct tpacket3_hdr *); |
211 | static void packet_flush_mclist(struct sock *sk); | 211 | static void packet_flush_mclist(struct sock *sk); |
212 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb); | 212 | static u16 packet_pick_tx_queue(struct sk_buff *skb); |
213 | 213 | ||
214 | struct packet_skb_cb { | 214 | struct packet_skb_cb { |
215 | union { | 215 | union { |
@@ -243,40 +243,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); | |||
243 | 243 | ||
244 | static int packet_direct_xmit(struct sk_buff *skb) | 244 | static int packet_direct_xmit(struct sk_buff *skb) |
245 | { | 245 | { |
246 | struct net_device *dev = skb->dev; | 246 | return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); |
247 | struct sk_buff *orig_skb = skb; | ||
248 | struct netdev_queue *txq; | ||
249 | int ret = NETDEV_TX_BUSY; | ||
250 | bool again = false; | ||
251 | |||
252 | if (unlikely(!netif_running(dev) || | ||
253 | !netif_carrier_ok(dev))) | ||
254 | goto drop; | ||
255 | |||
256 | skb = validate_xmit_skb_list(skb, dev, &again); | ||
257 | if (skb != orig_skb) | ||
258 | goto drop; | ||
259 | |||
260 | packet_pick_tx_queue(dev, skb); | ||
261 | txq = skb_get_tx_queue(dev, skb); | ||
262 | |||
263 | local_bh_disable(); | ||
264 | |||
265 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | ||
266 | if (!netif_xmit_frozen_or_drv_stopped(txq)) | ||
267 | ret = netdev_start_xmit(skb, dev, txq, false); | ||
268 | HARD_TX_UNLOCK(dev, txq); | ||
269 | |||
270 | local_bh_enable(); | ||
271 | |||
272 | if (!dev_xmit_complete(ret)) | ||
273 | kfree_skb(skb); | ||
274 | |||
275 | return ret; | ||
276 | drop: | ||
277 | atomic_long_inc(&dev->tx_dropped); | ||
278 | kfree_skb_list(skb); | ||
279 | return NET_XMIT_DROP; | ||
280 | } | 247 | } |
281 | 248 | ||
282 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) | 249 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) |
@@ -313,8 +280,9 @@ static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) | |||
313 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; | 280 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; |
314 | } | 281 | } |
315 | 282 | ||
316 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) | 283 | static u16 packet_pick_tx_queue(struct sk_buff *skb) |
317 | { | 284 | { |
285 | struct net_device *dev = skb->dev; | ||
318 | const struct net_device_ops *ops = dev->netdev_ops; | 286 | const struct net_device_ops *ops = dev->netdev_ops; |
319 | u16 queue_index; | 287 | u16 queue_index; |
320 | 288 | ||
@@ -326,7 +294,7 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) | |||
326 | queue_index = __packet_pick_tx_queue(dev, skb); | 294 | queue_index = __packet_pick_tx_queue(dev, skb); |
327 | } | 295 | } |
328 | 296 | ||
329 | skb_set_queue_mapping(skb, queue_index); | 297 | return queue_index; |
330 | } | 298 | } |
331 | 299 | ||
332 | /* __register_prot_hook must be invoked through register_prot_hook | 300 | /* __register_prot_hook must be invoked through register_prot_hook |