diff options
author | John Fastabend <john.r.fastabend@intel.com> | 2010-07-01 09:21:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-07-03 00:59:07 -0400 |
commit | f0796d5c73e59786d09a1e617689d1d415f2db44 (patch) | |
tree | ecb17c65c5b6a162824a1e11fee24364852837f8 /net/core | |
parent | 4ef6acff83222f4496ceef7d1f0ee9e50a5bb403 (diff) |
net: decreasing real_num_tx_queues needs to flush qdisc
Reducing real_num_queues needs to flush the qdisc otherwise
skbs with queue_mappings greater then real_num_tx_queues can
be sent to the underlying driver.
The flow for this is,
dev_queue_xmit()
dev_pick_tx()
skb_tx_hash() => hash using real_num_tx_queues
skb_set_queue_mapping()
...
qdisc_enqueue_root() => enqueue skb on txq from hash
...
dev->real_num_tx_queues -= n
...
sch_direct_xmit()
dev_hard_start_xmit()
ndo_start_xmit(skb,dev) => skb queue set with old hash
skbs are enqueued on the qdisc with skb->queue_mapping set
0 < queue_mappings < real_num_tx_queues. When the driver
decreases real_num_tx_queues skb's may be dequeued from the
qdisc with a queue_mapping greater then real_num_tx_queues.
This fixes a case in ixgbe where this was occurring with DCB
and FCoE. Because the driver is using queue_mapping to map
skbs to tx descriptor rings we can potentially map skbs to
rings that no longer exist.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 2b3bf53bc687..723a34710ad4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1553 | rcu_read_unlock(); | 1553 | rcu_read_unlock(); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | /* | ||
1557 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | ||
1558 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | ||
1559 | */ | ||
1560 | void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | ||
1561 | { | ||
1562 | unsigned int real_num = dev->real_num_tx_queues; | ||
1563 | |||
1564 | if (unlikely(txq > dev->num_tx_queues)) | ||
1565 | ; | ||
1566 | else if (txq > real_num) | ||
1567 | dev->real_num_tx_queues = txq; | ||
1568 | else if (txq < real_num) { | ||
1569 | dev->real_num_tx_queues = txq; | ||
1570 | qdisc_reset_all_tx_gt(dev, txq); | ||
1571 | } | ||
1572 | } | ||
1573 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | ||
1556 | 1574 | ||
1557 | static inline void __netif_reschedule(struct Qdisc *q) | 1575 | static inline void __netif_reschedule(struct Qdisc *q) |
1558 | { | 1576 | { |