aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c100
1 files changed, 76 insertions, 24 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..94c1eeed25e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1448,13 +1448,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1448 if (skb->len > (dev->mtu + dev->hard_header_len)) 1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP; 1449 return NET_RX_DROP;
1450 1450
1451 skb_dst_drop(skb); 1451 skb_set_dev(skb, dev);
1452 skb->tstamp.tv64 = 0; 1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST; 1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev); 1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb); 1455 return netif_rx(skb);
1459} 1456}
1460EXPORT_SYMBOL_GPL(dev_forward_skb); 1457EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1611,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1614 return false; 1611 return false;
1615} 1612}
1616 1613
1614/**
1615 * skb_dev_set -- assign a new device to a buffer
1616 * @skb: buffer for the new device
1617 * @dev: network device
1618 *
1619 * If an skb is owned by a device already, we have to reset
1620 * all data private to the namespace a device belongs to
1621 * before assigning it a new device.
1622 */
1623#ifdef CONFIG_NET_NS
1624void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1625{
1626 skb_dst_drop(skb);
1627 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1628 secpath_reset(skb);
1629 nf_reset(skb);
1630 skb_init_secmark(skb);
1631 skb->mark = 0;
1632 skb->priority = 0;
1633 skb->nf_trace = 0;
1634 skb->ipvs_property = 0;
1635#ifdef CONFIG_NET_SCHED
1636 skb->tc_index = 0;
1637#endif
1638 }
1639 skb->dev = dev;
1640}
1641EXPORT_SYMBOL(skb_set_dev);
1642#endif /* CONFIG_NET_NS */
1643
1617/* 1644/*
1618 * Invalidate hardware checksum when packet is to be mangled, and 1645 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path. 1646 * complete checksum manually on outgoing path.
@@ -1853,6 +1880,14 @@ gso:
1853 1880
1854 skb->next = nskb->next; 1881 skb->next = nskb->next;
1855 nskb->next = NULL; 1882 nskb->next = NULL;
1883
1884 /*
1885 * If device doesnt need nskb->dst, release it right now while
1886 * its hot in this cpu cache
1887 */
1888 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1889 skb_dst_drop(nskb);
1890
1856 rc = ops->ndo_start_xmit(nskb, dev); 1891 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) { 1892 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK) 1893 if (rc & ~NETDEV_TX_MASK)
@@ -1974,6 +2009,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1974 return rc; 2009 return rc;
1975} 2010}
1976 2011
2012/*
2013 * Returns true if either:
2014 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2015 * 2. skb is fragmented and the device does not support SG, or if
2016 * at least one of fragments is in highmem and device does not
2017 * support DMA from it.
2018 */
2019static inline int skb_needs_linearize(struct sk_buff *skb,
2020 struct net_device *dev)
2021{
2022 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2023 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2024 illegal_highdma(dev, skb)));
2025}
2026
1977/** 2027/**
1978 * dev_queue_xmit - transmit a buffer 2028 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit 2029 * @skb: buffer to transmit
@@ -2010,18 +2060,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2010 if (netif_needs_gso(dev, skb)) 2060 if (netif_needs_gso(dev, skb))
2011 goto gso; 2061 goto gso;
2012 2062
2013 if (skb_has_frags(skb) && 2063 /* Convert a paged skb to linear, if required */
2014 !(dev->features & NETIF_F_FRAGLIST) && 2064 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2015 __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb; 2065 goto out_kfree_skb;
2026 2066
2027 /* If packet is not checksummed and device does not support 2067 /* If packet is not checksummed and device does not support
@@ -2422,6 +2462,7 @@ int netif_receive_skb(struct sk_buff *skb)
2422 struct packet_type *ptype, *pt_prev; 2462 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev; 2463 struct net_device *orig_dev;
2424 struct net_device *null_or_orig; 2464 struct net_device *null_or_orig;
2465 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP; 2466 int ret = NET_RX_DROP;
2426 __be16 type; 2467 __be16 type;
2427 2468
@@ -2487,12 +2528,24 @@ ncls:
2487 if (!skb) 2528 if (!skb)
2488 goto out; 2529 goto out;
2489 2530
2531 /*
2532 * Make sure frames received on VLAN interfaces stacked on
2533 * bonding interfaces still make their way to any base bonding
2534 * device that may have registered for a specific ptype. The
2535 * handler may have to adjust skb->dev and orig_dev.
2536 */
2537 null_or_bond = NULL;
2538 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2539 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2540 null_or_bond = vlan_dev_real_dev(skb->dev);
2541 }
2542
2490 type = skb->protocol; 2543 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype, 2544 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2545 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type && 2546 if (ptype->type == type && (ptype->dev == null_or_orig ||
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2547 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2495 ptype->dev == orig_dev)) { 2548 ptype->dev == null_or_bond)) {
2496 if (pt_prev) 2549 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev); 2550 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype; 2551 pt_prev = ptype;
@@ -2561,7 +2614,7 @@ out:
2561 return netif_receive_skb(skb); 2614 return netif_receive_skb(skb);
2562} 2615}
2563 2616
2564void napi_gro_flush(struct napi_struct *napi) 2617static void napi_gro_flush(struct napi_struct *napi)
2565{ 2618{
2566 struct sk_buff *skb, *next; 2619 struct sk_buff *skb, *next;
2567 2620
@@ -2574,7 +2627,6 @@ void napi_gro_flush(struct napi_struct *napi)
2574 napi->gro_count = 0; 2627 napi->gro_count = 0;
2575 napi->gro_list = NULL; 2628 napi->gro_list = NULL;
2576} 2629}
2577EXPORT_SYMBOL(napi_gro_flush);
2578 2630
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2631enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{ 2632{
@@ -3185,7 +3237,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{ 3237{
3186 const struct net_device_stats *stats = dev_get_stats(dev); 3238 const struct net_device_stats *stats = dev_get_stats(dev);
3187 3239
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3240 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3241 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets, 3242 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors, 3243 stats->rx_errors,
@@ -3640,10 +3692,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3640 /* Unicast addresses changes may only happen under the rtnl, 3692 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe. 3693 * therefore calling __dev_set_promiscuity here is safe.
3642 */ 3694 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) { 3695 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1); 3696 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1; 3697 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) { 3698 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1); 3699 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0; 3700 dev->uc_promisc = 0;
3649 } 3701 }