aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 04:56:23 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:07 -0400
commitfd2ea0a79faad824258af5dcec1927aa24d81c16 (patch)
tree644fd4ce92227cc319c7a54c63ea07a96b8c6b8d /net/core/dev.c
parent24344d2600108b9b79a60c0e4c43b3c499856d14 (diff)
net: Use queue aware tests throughout.
This effectively "flips the switch" by making the core networking and multiqueue-aware drivers use the new TX multiqueue structures. Non-multiqueue drivers need no changes. The interfaces they use such as netif_stop_queue() degenerate into an operation on TX queue zero. So everything "just works" for them. Code that really wants to do "X" to all TX queues now invokes a routine that does so, such as netif_tx_wake_all_queues(), netif_tx_stop_all_queues(), etc. pktgen and netpoll required a little bit more surgery than the others. In particular the pktgen changes, whilst functional, could be largely improved. The initial check in pktgen_xmit() will sometimes check the wrong queue, which is mostly harmless. The thing to do is probably to invoke fill_packet() earlier. The bulk of the netpoll changes is to make the code operate solely on the TX queue indicated by by the SKB queue mapping. Setting of the SKB queue mapping is entirely confined inside of net/core/dev.c:dev_pick_tx(). If we end up needing any kind of special semantics (drops, for example) it will be implemented here. Finally, we now have a "real_num_tx_queues" which is where the driver indicates how many TX queues are actually active. With IGB changes from Jeff Kirsher. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 69378f250695..f027a1ac4fbb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1598,7 +1598,8 @@ static int dev_gso_segment(struct sk_buff *skb)
1598 return 0; 1598 return 0;
1599} 1599}
1600 1600
1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1602 struct netdev_queue *txq)
1602{ 1603{
1603 if (likely(!skb->next)) { 1604 if (likely(!skb->next)) {
1604 if (!list_empty(&ptype_all)) 1605 if (!list_empty(&ptype_all))
@@ -1627,9 +1628,7 @@ gso:
1627 skb->next = nskb; 1628 skb->next = nskb;
1628 return rc; 1629 return rc;
1629 } 1630 }
1630 if (unlikely((netif_queue_stopped(dev) || 1631 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1631 netif_subqueue_stopped(dev, skb)) &&
1632 skb->next))
1633 return NETDEV_TX_BUSY; 1632 return NETDEV_TX_BUSY;
1634 } while (skb->next); 1633 } while (skb->next);
1635 1634
@@ -1669,7 +1668,10 @@ out_kfree_skb:
1669static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1668static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb) 1669 struct sk_buff *skb)
1671{ 1670{
1672 return netdev_get_tx_queue(dev, 0); 1671 u16 queue_index = 0;
1672
1673 skb_set_queue_mapping(skb, queue_index);
1674 return netdev_get_tx_queue(dev, queue_index);
1673} 1675}
1674 1676
1675int dev_queue_xmit(struct sk_buff *skb) 1677int dev_queue_xmit(struct sk_buff *skb)
@@ -1737,8 +1739,6 @@ gso:
1737 spin_lock(&txq->lock); 1739 spin_lock(&txq->lock);
1738 q = txq->qdisc; 1740 q = txq->qdisc;
1739 if (q->enqueue) { 1741 if (q->enqueue) {
1740 /* reset queue_mapping to zero */
1741 skb_set_queue_mapping(skb, 0);
1742 rc = q->enqueue(skb, q); 1742 rc = q->enqueue(skb, q);
1743 qdisc_run(txq); 1743 qdisc_run(txq);
1744 spin_unlock(&txq->lock); 1744 spin_unlock(&txq->lock);
@@ -1768,10 +1768,9 @@ gso:
1768 1768
1769 HARD_TX_LOCK(dev, txq, cpu); 1769 HARD_TX_LOCK(dev, txq, cpu);
1770 1770
1771 if (!netif_queue_stopped(dev) && 1771 if (!netif_tx_queue_stopped(txq)) {
1772 !netif_subqueue_stopped(dev, skb)) {
1773 rc = 0; 1772 rc = 0;
1774 if (!dev_hard_start_xmit(skb, dev)) { 1773 if (!dev_hard_start_xmit(skb, dev, txq)) {
1775 HARD_TX_UNLOCK(dev, txq); 1774 HARD_TX_UNLOCK(dev, txq);
1776 goto out; 1775 goto out;
1777 } 1776 }
@@ -4160,8 +4159,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4160 4159
4161 BUG_ON(strlen(name) >= sizeof(dev->name)); 4160 BUG_ON(strlen(name) >= sizeof(dev->name));
4162 4161
4163 alloc_size = sizeof(struct net_device) + 4162 alloc_size = sizeof(struct net_device);
4164 sizeof(struct net_device_subqueue) * (queue_count - 1);
4165 if (sizeof_priv) { 4163 if (sizeof_priv) {
4166 /* ensure 32-byte alignment of private area */ 4164 /* ensure 32-byte alignment of private area */
4167 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 4165 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
@@ -4191,16 +4189,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4191 4189
4192 dev->_tx = tx; 4190 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count; 4191 dev->num_tx_queues = queue_count;
4192 dev->real_num_tx_queues = queue_count;
4194 4193
4195 if (sizeof_priv) { 4194 if (sizeof_priv) {
4196 dev->priv = ((char *)dev + 4195 dev->priv = ((char *)dev +
4197 ((sizeof(struct net_device) + 4196 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4198 (sizeof(struct net_device_subqueue) *
4199 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4200 & ~NETDEV_ALIGN_CONST)); 4197 & ~NETDEV_ALIGN_CONST));
4201 } 4198 }
4202 4199
4203 dev->egress_subqueue_count = queue_count;
4204 dev->gso_max_size = GSO_MAX_SIZE; 4200 dev->gso_max_size = GSO_MAX_SIZE;
4205 4201
4206 netdev_init_queues(dev); 4202 netdev_init_queues(dev);