aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c2
7 files changed, 39 insertions, 22 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ba3b7ea5ebb3..0ce469e5ec80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
2539} 2539}
2540 2540
2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2542 struct netdev_queue *txq, void *accel_priv) 2542 struct netdev_queue *txq)
2543{ 2543{
2544 const struct net_device_ops *ops = dev->netdev_ops; 2544 const struct net_device_ops *ops = dev->netdev_ops;
2545 int rc = NETDEV_TX_OK; 2545 int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2605 dev_queue_xmit_nit(skb, dev); 2605 dev_queue_xmit_nit(skb, dev);
2606 2606
2607 skb_len = skb->len; 2607 skb_len = skb->len;
2608 if (accel_priv)
2609 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2610 else
2611 rc = ops->ndo_start_xmit(skb, dev); 2608 rc = ops->ndo_start_xmit(skb, dev);
2612 2609
2613 trace_net_dev_xmit(skb, rc, dev, skb_len); 2610 trace_net_dev_xmit(skb, rc, dev, skb_len);
2614 if (rc == NETDEV_TX_OK && txq) 2611 if (rc == NETDEV_TX_OK)
2615 txq_trans_update(txq); 2612 txq_trans_update(txq);
2616 return rc; 2613 return rc;
2617 } 2614 }
@@ -2627,10 +2624,7 @@ gso:
2627 dev_queue_xmit_nit(nskb, dev); 2624 dev_queue_xmit_nit(nskb, dev);
2628 2625
2629 skb_len = nskb->len; 2626 skb_len = nskb->len;
2630 if (accel_priv) 2627 rc = ops->ndo_start_xmit(nskb, dev);
2631 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2632 else
2633 rc = ops->ndo_start_xmit(nskb, dev);
2634 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2628 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2635 if (unlikely(rc != NETDEV_TX_OK)) { 2629 if (unlikely(rc != NETDEV_TX_OK)) {
2636 if (rc & ~NETDEV_TX_MASK) 2630 if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2811 * the BH enable code must have IRQs enabled so that it will not deadlock. 2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2812 * --BLG 2806 * --BLG
2813 */ 2807 */
2814int dev_queue_xmit(struct sk_buff *skb) 2808int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2815{ 2809{
2816 struct net_device *dev = skb->dev; 2810 struct net_device *dev = skb->dev;
2817 struct netdev_queue *txq; 2811 struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2827 2821
2828 skb_update_prio(skb); 2822 skb_update_prio(skb);
2829 2823
2830 txq = netdev_pick_tx(dev, skb); 2824 txq = netdev_pick_tx(dev, skb, accel_priv);
2831 q = rcu_dereference_bh(txq->qdisc); 2825 q = rcu_dereference_bh(txq->qdisc);
2832 2826
2833#ifdef CONFIG_NET_CLS_ACT 2827#ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2863 2857
2864 if (!netif_xmit_stopped(txq)) { 2858 if (!netif_xmit_stopped(txq)) {
2865 __this_cpu_inc(xmit_recursion); 2859 __this_cpu_inc(xmit_recursion);
2866 rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2860 rc = dev_hard_start_xmit(skb, dev, txq);
2867 __this_cpu_dec(xmit_recursion); 2861 __this_cpu_dec(xmit_recursion);
2868 if (dev_xmit_complete(rc)) { 2862 if (dev_xmit_complete(rc)) {
2869 HARD_TX_UNLOCK(dev, txq); 2863 HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
2892 rcu_read_unlock_bh(); 2886 rcu_read_unlock_bh();
2893 return rc; 2887 return rc;
2894} 2888}
2889
2890int dev_queue_xmit(struct sk_buff *skb)
2891{
2892 return __dev_queue_xmit(skb, NULL);
2893}
2895EXPORT_SYMBOL(dev_queue_xmit); 2894EXPORT_SYMBOL(dev_queue_xmit);
2896 2895
2896int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2897{
2898 return __dev_queue_xmit(skb, accel_priv);
2899}
2900EXPORT_SYMBOL(dev_queue_xmit_accel);
2901
2897 2902
2898/*======================================================================= 2903/*=======================================================================
2899 Receiver routines 2904 Receiver routines
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4500{ 4505{
4501 struct netdev_adjacent *upper; 4506 struct netdev_adjacent *upper;
4502 4507
4503 WARN_ON_ONCE(!rcu_read_lock_held()); 4508 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4504 4509
4505 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4510 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4506 4511
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 95897183226e..e70301eb7a4a 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
64 .hdrsize = 0, 64 .hdrsize = 0,
65 .name = "NET_DM", 65 .name = "NET_DM",
66 .version = 2, 66 .version = 2,
67 .maxattr = NET_DM_CMD_MAX,
68}; 67};
69 68
70static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); 69static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef17322500..2fc5beaf5783 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
395EXPORT_SYMBOL(__netdev_pick_tx); 395EXPORT_SYMBOL(__netdev_pick_tx);
396 396
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 397struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb) 398 struct sk_buff *skb,
399 void *accel_priv)
399{ 400{
400 int queue_index = 0; 401 int queue_index = 0;
401 402
402 if (dev->real_num_tx_queues != 1) { 403 if (dev->real_num_tx_queues != 1) {
403 const struct net_device_ops *ops = dev->netdev_ops; 404 const struct net_device_ops *ops = dev->netdev_ops;
404 if (ops->ndo_select_queue) 405 if (ops->ndo_select_queue)
405 queue_index = ops->ndo_select_queue(dev, skb); 406 queue_index = ops->ndo_select_queue(dev, skb,
407 accel_priv);
406 else 408 else
407 queue_index = __netdev_pick_tx(dev, skb); 409 queue_index = __netdev_pick_tx(dev, skb);
408 queue_index = dev_cap_txqueue(dev, queue_index); 410
411 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index);
409 } 413 }
410 414
411 skb_set_queue_mapping(skb, queue_index); 415 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca15f32821fb..932c6d7cf666 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1161 neigh->parms->reachable_time : 1161 neigh->parms->reachable_time :
1162 0))); 1162 0)));
1163 neigh->nud_state = new; 1163 neigh->nud_state = new;
1164 notify = 1;
1164 } 1165 }
1165 1166
1166 if (lladdr != neigh->ha) { 1167 if (lladdr != neigh->ha) {
@@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1274 1275
1275 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, 1276 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1276 skb->len) < 0 && 1277 skb->len) < 0 &&
1277 dev->header_ops->rebuild(skb)) 1278 dev_rebuild_header(skb))
1278 return 0; 1279 return 0;
1279 1280
1280 return dev_queue_xmit(skb); 1281 return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f971990677c..19fe9c717ced 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
376 struct netdev_queue *txq; 376 struct netdev_queue *txq;
377 377
378 txq = netdev_pick_tx(dev, skb); 378 txq = netdev_pick_tx(dev, skb, NULL);
379 379
380 /* try until next clock tick */ 380 /* try until next clock tick */
381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
386 !vlan_hw_offload_capable(netif_skb_features(skb), 386 !vlan_hw_offload_capable(netif_skb_features(skb),
387 skb->vlan_proto)) { 387 skb->vlan_proto)) {
388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); 388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
389 if (unlikely(!skb)) 389 if (unlikely(!skb)) {
390 break; 390 /* This is actually a packet drop, but we
391 * don't want the code at the end of this
392 * function to try and re-queue a NULL skb.
393 */
394 status = NETDEV_TX_OK;
395 goto unlock_txq;
396 }
391 skb->vlan_tci = 0; 397 skb->vlan_tci = 0;
392 } 398 }
393 399
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
395 if (status == NETDEV_TX_OK) 401 if (status == NETDEV_TX_OK)
396 txq_trans_update(txq); 402 txq_trans_update(txq);
397 } 403 }
404 unlock_txq:
398 __netif_tx_unlock(txq); 405 __netif_tx_unlock(txq);
399 406
400 if (status == NETDEV_TX_OK) 407 if (status == NETDEV_TX_OK)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2718fed53d8c..06e72d3cdf60 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3584 skb->tstamp.tv64 = 0; 3584 skb->tstamp.tv64 = 0;
3585 skb->pkt_type = PACKET_HOST; 3585 skb->pkt_type = PACKET_HOST;
3586 skb->skb_iif = 0; 3586 skb->skb_iif = 0;
3587 skb->local_df = 0;
3587 skb_dst_drop(skb); 3588 skb_dst_drop(skb);
3588 skb->mark = 0; 3589 skb->mark = 0;
3589 secpath_reset(skb); 3590 secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ab20ed9b0f31..5393b4b719d7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -882,7 +882,7 @@ set_rcvbuf:
882 882
883 case SO_PEEK_OFF: 883 case SO_PEEK_OFF:
884 if (sock->ops->set_peek_off) 884 if (sock->ops->set_peek_off)
885 sock->ops->set_peek_off(sk, val); 885 ret = sock->ops->set_peek_off(sk, val);
886 else 886 else
887 ret = -EOPNOTSUPP; 887 ret = -EOPNOTSUPP;
888 break; 888 break;