aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c149
1 files changed, 79 insertions, 70 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269d2e35..a3ef808b5e36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
1732} 1732}
1733EXPORT_SYMBOL(netif_device_attach); 1733EXPORT_SYMBOL(netif_device_attach);
1734 1734
1735static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1736{
1737 return ((features & NETIF_F_NO_CSUM) ||
1738 ((features & NETIF_F_V4_CSUM) &&
1739 protocol == htons(ETH_P_IP)) ||
1740 ((features & NETIF_F_V6_CSUM) &&
1741 protocol == htons(ETH_P_IPV6)) ||
1742 ((features & NETIF_F_FCOE_CRC) &&
1743 protocol == htons(ETH_P_FCOE)));
1744}
1745
1746static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1747{
1748 __be16 protocol = skb->protocol;
1749 int features = dev->features;
1750
1751 if (vlan_tx_tag_present(skb)) {
1752 features &= dev->vlan_features;
1753 } else if (protocol == htons(ETH_P_8021Q)) {
1754 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1755 protocol = veh->h_vlan_encapsulated_proto;
1756 features &= dev->vlan_features;
1757 }
1758
1759 return can_checksum_protocol(features, protocol);
1760}
1761
1762/** 1735/**
1763 * skb_dev_set -- assign a new device to a buffer 1736 * skb_dev_set -- assign a new device to a buffer
1764 * @skb: buffer for the new device 1737 * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
1971/** 1944/**
1972 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1945 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1973 * @skb: buffer to segment 1946 * @skb: buffer to segment
1947 * @features: device features as applicable to this skb
1974 * 1948 *
1975 * This function segments the given skb and stores the list of segments 1949 * This function segments the given skb and stores the list of segments
1976 * in skb->next. 1950 * in skb->next.
1977 */ 1951 */
1978static int dev_gso_segment(struct sk_buff *skb) 1952static int dev_gso_segment(struct sk_buff *skb, int features)
1979{ 1953{
1980 struct net_device *dev = skb->dev;
1981 struct sk_buff *segs; 1954 struct sk_buff *segs;
1982 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1983 NETIF_F_SG : 0);
1984 1955
1985 segs = skb_gso_segment(skb, features); 1956 segs = skb_gso_segment(skb, features);
1986 1957
@@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
2017 } 1988 }
2018} 1989}
2019 1990
2020int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) 1991static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1992{
1993 return ((features & NETIF_F_GEN_CSUM) ||
1994 ((features & NETIF_F_V4_CSUM) &&
1995 protocol == htons(ETH_P_IP)) ||
1996 ((features & NETIF_F_V6_CSUM) &&
1997 protocol == htons(ETH_P_IPV6)) ||
1998 ((features & NETIF_F_FCOE_CRC) &&
1999 protocol == htons(ETH_P_FCOE)));
2000}
2001
2002static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2003{
2004 if (!can_checksum_protocol(protocol, features)) {
2005 features &= ~NETIF_F_ALL_CSUM;
2006 features &= ~NETIF_F_SG;
2007 } else if (illegal_highdma(skb->dev, skb)) {
2008 features &= ~NETIF_F_SG;
2009 }
2010
2011 return features;
2012}
2013
2014int netif_skb_features(struct sk_buff *skb)
2021{ 2015{
2022 __be16 protocol = skb->protocol; 2016 __be16 protocol = skb->protocol;
2017 int features = skb->dev->features;
2023 2018
2024 if (protocol == htons(ETH_P_8021Q)) { 2019 if (protocol == htons(ETH_P_8021Q)) {
2025 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2020 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2026 protocol = veh->h_vlan_encapsulated_proto; 2021 protocol = veh->h_vlan_encapsulated_proto;
2027 } else if (!skb->vlan_tci) 2022 } else if (!vlan_tx_tag_present(skb)) {
2028 return dev->features; 2023 return harmonize_features(skb, protocol, features);
2024 }
2029 2025
2030 if (protocol != htons(ETH_P_8021Q)) 2026 features &= skb->dev->vlan_features;
2031 return dev->features & dev->vlan_features; 2027
2032 else 2028 if (protocol != htons(ETH_P_8021Q)) {
2033 return 0; 2029 return harmonize_features(skb, protocol, features);
2030 } else {
2031 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2032 NETIF_F_GEN_CSUM;
2033 return harmonize_features(skb, protocol, features);
2034 }
2034} 2035}
2035EXPORT_SYMBOL(netif_get_vlan_features); 2036EXPORT_SYMBOL(netif_skb_features);
2036 2037
2037/* 2038/*
2038 * Returns true if either: 2039 * Returns true if either:
@@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
2042 * support DMA from it. 2043 * support DMA from it.
2043 */ 2044 */
2044static inline int skb_needs_linearize(struct sk_buff *skb, 2045static inline int skb_needs_linearize(struct sk_buff *skb,
2045 struct net_device *dev) 2046 int features)
2046{ 2047{
2047 if (skb_is_nonlinear(skb)) { 2048 return skb_is_nonlinear(skb) &&
2048 int features = dev->features; 2049 ((skb_has_frag_list(skb) &&
2049 2050 !(features & NETIF_F_FRAGLIST)) ||
2050 if (vlan_tx_tag_present(skb))
2051 features &= dev->vlan_features;
2052
2053 return (skb_has_frag_list(skb) &&
2054 !(features & NETIF_F_FRAGLIST)) ||
2055 (skb_shinfo(skb)->nr_frags && 2051 (skb_shinfo(skb)->nr_frags &&
2056 (!(features & NETIF_F_SG) || 2052 !(features & NETIF_F_SG)));
2057 illegal_highdma(dev, skb)));
2058 }
2059
2060 return 0;
2061} 2053}
2062 2054
2063int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2055int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2067 int rc = NETDEV_TX_OK; 2059 int rc = NETDEV_TX_OK;
2068 2060
2069 if (likely(!skb->next)) { 2061 if (likely(!skb->next)) {
2062 int features;
2063
2070 /* 2064 /*
2071 * If device doesnt need skb->dst, release it right now while 2065 * If device doesnt need skb->dst, release it right now while
2072 * its hot in this cpu cache 2066 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2079 2073
2080 skb_orphan_try(skb); 2074 skb_orphan_try(skb);
2081 2075
2076 features = netif_skb_features(skb);
2077
2082 if (vlan_tx_tag_present(skb) && 2078 if (vlan_tx_tag_present(skb) &&
2083 !(dev->features & NETIF_F_HW_VLAN_TX)) { 2079 !(features & NETIF_F_HW_VLAN_TX)) {
2084 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2080 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2085 if (unlikely(!skb)) 2081 if (unlikely(!skb))
2086 goto out; 2082 goto out;
@@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088 skb->vlan_tci = 0; 2084 skb->vlan_tci = 0;
2089 } 2085 }
2090 2086
2091 if (netif_needs_gso(dev, skb)) { 2087 if (netif_needs_gso(skb, features)) {
2092 if (unlikely(dev_gso_segment(skb))) 2088 if (unlikely(dev_gso_segment(skb, features)))
2093 goto out_kfree_skb; 2089 goto out_kfree_skb;
2094 if (skb->next) 2090 if (skb->next)
2095 goto gso; 2091 goto gso;
2096 } else { 2092 } else {
2097 if (skb_needs_linearize(skb, dev) && 2093 if (skb_needs_linearize(skb, features) &&
2098 __skb_linearize(skb)) 2094 __skb_linearize(skb))
2099 goto out_kfree_skb; 2095 goto out_kfree_skb;
2100 2096
@@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2105 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2101 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2106 skb_set_transport_header(skb, 2102 skb_set_transport_header(skb,
2107 skb_checksum_start_offset(skb)); 2103 skb_checksum_start_offset(skb));
2108 if (!dev_can_checksum(dev, skb) && 2104 if (!(features & NETIF_F_ALL_CSUM) &&
2109 skb_checksum_help(skb)) 2105 skb_checksum_help(skb))
2110 goto out_kfree_skb; 2106 goto out_kfree_skb;
2111 } 2107 }
@@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2301 */ 2297 */
2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2303 skb_dst_force(skb); 2299 skb_dst_force(skb);
2304 __qdisc_update_bstats(q, skb->len); 2300
2301 qdisc_skb_cb(skb)->pkt_len = skb->len;
2302 qdisc_bstats_update(q, skb);
2303
2305 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2306 if (unlikely(contended)) { 2305 if (unlikely(contended)) {
2307 spin_unlock(&q->busylock); 2306 spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5621} 5620}
5622 5621
5623/** 5622/**
5624 * alloc_netdev_mq - allocate network device 5623 * alloc_netdev_mqs - allocate network device
5625 * @sizeof_priv: size of private data to allocate space for 5624 * @sizeof_priv: size of private data to allocate space for
5626 * @name: device name format string 5625 * @name: device name format string
5627 * @setup: callback to initialize device 5626 * @setup: callback to initialize device
5628 * @queue_count: the number of subqueues to allocate 5627 * @txqs: the number of TX subqueues to allocate
5628 * @rxqs: the number of RX subqueues to allocate
5629 * 5629 *
5630 * Allocates a struct net_device with private data area for driver use 5630 * Allocates a struct net_device with private data area for driver use
5631 * and performs basic initialization. Also allocates subquue structs 5631 * and performs basic initialization. Also allocates subquue structs
5632 * for each queue on the device at the end of the netdevice. 5632 * for each queue on the device.
5633 */ 5633 */
5634struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5634struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5635 void (*setup)(struct net_device *), unsigned int queue_count) 5635 void (*setup)(struct net_device *),
5636 unsigned int txqs, unsigned int rxqs)
5636{ 5637{
5637 struct net_device *dev; 5638 struct net_device *dev;
5638 size_t alloc_size; 5639 size_t alloc_size;
@@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5640 5641
5641 BUG_ON(strlen(name) >= sizeof(dev->name)); 5642 BUG_ON(strlen(name) >= sizeof(dev->name));
5642 5643
5643 if (queue_count < 1) { 5644 if (txqs < 1) {
5644 pr_err("alloc_netdev: Unable to allocate device " 5645 pr_err("alloc_netdev: Unable to allocate device "
5645 "with zero queues.\n"); 5646 "with zero queues.\n");
5646 return NULL; 5647 return NULL;
5647 } 5648 }
5648 5649
5650#ifdef CONFIG_RPS
5651 if (rxqs < 1) {
5652 pr_err("alloc_netdev: Unable to allocate device "
5653 "with zero RX queues.\n");
5654 return NULL;
5655 }
5656#endif
5657
5649 alloc_size = sizeof(struct net_device); 5658 alloc_size = sizeof(struct net_device);
5650 if (sizeof_priv) { 5659 if (sizeof_priv) {
5651 /* ensure 32-byte alignment of private area */ 5660 /* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5676 5685
5677 dev_net_set(dev, &init_net); 5686 dev_net_set(dev, &init_net);
5678 5687
5679 dev->num_tx_queues = queue_count; 5688 dev->num_tx_queues = txqs;
5680 dev->real_num_tx_queues = queue_count; 5689 dev->real_num_tx_queues = txqs;
5681 if (netif_alloc_netdev_queues(dev)) 5690 if (netif_alloc_netdev_queues(dev))
5682 goto free_pcpu; 5691 goto free_pcpu;
5683 5692
5684#ifdef CONFIG_RPS 5693#ifdef CONFIG_RPS
5685 dev->num_rx_queues = queue_count; 5694 dev->num_rx_queues = rxqs;
5686 dev->real_num_rx_queues = queue_count; 5695 dev->real_num_rx_queues = rxqs;
5687 if (netif_alloc_rx_queues(dev)) 5696 if (netif_alloc_rx_queues(dev))
5688 goto free_pcpu; 5697 goto free_pcpu;
5689#endif 5698#endif
@@ -5711,7 +5720,7 @@ free_p:
5711 kfree(p); 5720 kfree(p);
5712 return NULL; 5721 return NULL;
5713} 5722}
5714EXPORT_SYMBOL(alloc_netdev_mq); 5723EXPORT_SYMBOL(alloc_netdev_mqs);
5715 5724
5716/** 5725/**
5717 * free_netdev - free network device 5726 * free_netdev - free network device