aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c80
1 files changed, 74 insertions, 6 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7c6a46f80372..d162ba8d622d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@ EXPORT_SYMBOL(dev_get_by_index);
749 * @ha: hardware address 749 * @ha: hardware address
750 * 750 *
751 * Search for an interface by MAC address. Returns NULL if the device 751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold RCU 752 * is not found or a pointer to the device.
753 * The caller must hold RCU or RTNL.
753 * The returned device has not had its ref count increased 754 * The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking 755 * and the caller must therefore be careful about locking
755 * 756 *
@@ -1285,7 +1286,7 @@ static int __dev_close(struct net_device *dev)
1285 return __dev_close_many(&single); 1286 return __dev_close_many(&single);
1286} 1287}
1287 1288
1288int dev_close_many(struct list_head *head) 1289static int dev_close_many(struct list_head *head)
1289{ 1290{
1290 struct net_device *dev, *tmp; 1291 struct net_device *dev, *tmp;
1291 LIST_HEAD(tmp_list); 1292 LIST_HEAD(tmp_list);
@@ -1593,6 +1594,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1593 rcu_read_unlock(); 1594 rcu_read_unlock();
1594} 1595}
1595 1596
1597/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1598 * @dev: Network device
1599 * @txq: number of queues available
1600 *
1601 * If real_num_tx_queues is changed the tc mappings may no longer be
1602 * valid. To resolve this verify the tc mapping remains valid and if
1603 * not NULL the mapping. With no priorities mapping to this
1604 * offset/count pair it will no longer be used. In the worst case TC0
1605 * is invalid nothing can be done so disable priority mappings. If is
1606 * expected that drivers will fix this mapping if they can before
1607 * calling netif_set_real_num_tx_queues.
1608 */
1609static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1610{
1611 int i;
1612 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1613
1614 /* If TC0 is invalidated disable TC mapping */
1615 if (tc->offset + tc->count > txq) {
1616 pr_warning("Number of in use tx queues changed "
1617 "invalidating tc mappings. Priority "
1618 "traffic classification disabled!\n");
1619 dev->num_tc = 0;
1620 return;
1621 }
1622
1623 /* Invalidated prio to tc mappings set to TC0 */
1624 for (i = 1; i < TC_BITMASK + 1; i++) {
1625 int q = netdev_get_prio_tc_map(dev, i);
1626
1627 tc = &dev->tc_to_txq[q];
1628 if (tc->offset + tc->count > txq) {
1629 pr_warning("Number of in use tx queues "
1630 "changed. Priority %i to tc "
1631 "mapping %i is no longer valid "
1632 "setting map to 0\n",
1633 i, q);
1634 netdev_set_prio_tc_map(dev, i, 0);
1635 }
1636 }
1637}
1638
1596/* 1639/*
1597 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1640 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1598 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1641 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1612,6 +1655,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1612 if (rc) 1655 if (rc)
1613 return rc; 1656 return rc;
1614 1657
1658 if (dev->num_tc)
1659 netif_setup_tc(dev, txq);
1660
1615 if (txq < dev->real_num_tx_queues) 1661 if (txq < dev->real_num_tx_queues)
1616 qdisc_reset_all_tx_gt(dev, txq); 1662 qdisc_reset_all_tx_gt(dev, txq);
1617 } 1663 }
@@ -2161,6 +2207,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2161 unsigned int num_tx_queues) 2207 unsigned int num_tx_queues)
2162{ 2208{
2163 u32 hash; 2209 u32 hash;
2210 u16 qoffset = 0;
2211 u16 qcount = num_tx_queues;
2164 2212
2165 if (skb_rx_queue_recorded(skb)) { 2213 if (skb_rx_queue_recorded(skb)) {
2166 hash = skb_get_rx_queue(skb); 2214 hash = skb_get_rx_queue(skb);
@@ -2169,13 +2217,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2169 return hash; 2217 return hash;
2170 } 2218 }
2171 2219
2220 if (dev->num_tc) {
2221 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2222 qoffset = dev->tc_to_txq[tc].offset;
2223 qcount = dev->tc_to_txq[tc].count;
2224 }
2225
2172 if (skb->sk && skb->sk->sk_hash) 2226 if (skb->sk && skb->sk->sk_hash)
2173 hash = skb->sk->sk_hash; 2227 hash = skb->sk->sk_hash;
2174 else 2228 else
2175 hash = (__force u16) skb->protocol ^ skb->rxhash; 2229 hash = (__force u16) skb->protocol ^ skb->rxhash;
2176 hash = jhash_1word(hash, hashrnd); 2230 hash = jhash_1word(hash, hashrnd);
2177 2231
2178 return (u16) (((u64) hash * num_tx_queues) >> 32); 2232 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2179} 2233}
2180EXPORT_SYMBOL(__skb_tx_hash); 2234EXPORT_SYMBOL(__skb_tx_hash);
2181 2235
@@ -2272,15 +2326,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2272 struct netdev_queue *txq) 2326 struct netdev_queue *txq)
2273{ 2327{
2274 spinlock_t *root_lock = qdisc_lock(q); 2328 spinlock_t *root_lock = qdisc_lock(q);
2275 bool contended = qdisc_is_running(q); 2329 bool contended;
2276 int rc; 2330 int rc;
2277 2331
2332 qdisc_skb_cb(skb)->pkt_len = skb->len;
2333 qdisc_calculate_pkt_len(skb, q);
2278 /* 2334 /*
2279 * Heuristic to force contended enqueues to serialize on a 2335 * Heuristic to force contended enqueues to serialize on a
2280 * separate lock before trying to get qdisc main lock. 2336 * separate lock before trying to get qdisc main lock.
2281 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2337 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2282 * and dequeue packets faster. 2338 * and dequeue packets faster.
2283 */ 2339 */
2340 contended = qdisc_is_running(q);
2284 if (unlikely(contended)) 2341 if (unlikely(contended))
2285 spin_lock(&q->busylock); 2342 spin_lock(&q->busylock);
2286 2343
@@ -2298,7 +2355,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2355 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2299 skb_dst_force(skb); 2356 skb_dst_force(skb);
2300 2357
2301 qdisc_skb_cb(skb)->pkt_len = skb->len;
2302 qdisc_bstats_update(q, skb); 2358 qdisc_bstats_update(q, skb);
2303 2359
2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2360 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2313,7 +2369,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2313 rc = NET_XMIT_SUCCESS; 2369 rc = NET_XMIT_SUCCESS;
2314 } else { 2370 } else {
2315 skb_dst_force(skb); 2371 skb_dst_force(skb);
2316 rc = qdisc_enqueue_root(skb, q); 2372 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2317 if (qdisc_run_begin(q)) { 2373 if (qdisc_run_begin(q)) {
2318 if (unlikely(contended)) { 2374 if (unlikely(contended)) {
2319 spin_unlock(&q->busylock); 2375 spin_unlock(&q->busylock);
@@ -4572,6 +4628,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
4572EXPORT_SYMBOL(dev_set_mtu); 4628EXPORT_SYMBOL(dev_set_mtu);
4573 4629
4574/** 4630/**
4631 * dev_set_group - Change group this device belongs to
4632 * @dev: device
4633 * @new_group: group this device should belong to
4634 */
4635void dev_set_group(struct net_device *dev, int new_group)
4636{
4637 dev->group = new_group;
4638}
4639EXPORT_SYMBOL(dev_set_group);
4640
4641/**
4575 * dev_set_mac_address - Change Media Access Control Address 4642 * dev_set_mac_address - Change Media Access Control Address
4576 * @dev: device 4643 * @dev: device
4577 * @sa: new address 4644 * @sa: new address
@@ -5678,6 +5745,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5678 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5745 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5679 setup(dev); 5746 setup(dev);
5680 strcpy(dev->name, name); 5747 strcpy(dev->name, name);
5748 dev->group = INIT_NETDEV_GROUP;
5681 return dev; 5749 return dev;
5682 5750
5683free_pcpu: 5751free_pcpu: