aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/netpoll.c24
-rw-r--r--net/core/pktgen.c69
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_teql.c6
5 files changed, 74 insertions, 58 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 69378f25069..f027a1ac4fb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1598,7 +1598,8 @@ static int dev_gso_segment(struct sk_buff *skb)
1598 return 0; 1598 return 0;
1599} 1599}
1600 1600
1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1602 struct netdev_queue *txq)
1602{ 1603{
1603 if (likely(!skb->next)) { 1604 if (likely(!skb->next)) {
1604 if (!list_empty(&ptype_all)) 1605 if (!list_empty(&ptype_all))
@@ -1627,9 +1628,7 @@ gso:
1627 skb->next = nskb; 1628 skb->next = nskb;
1628 return rc; 1629 return rc;
1629 } 1630 }
1630 if (unlikely((netif_queue_stopped(dev) || 1631 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1631 netif_subqueue_stopped(dev, skb)) &&
1632 skb->next))
1633 return NETDEV_TX_BUSY; 1632 return NETDEV_TX_BUSY;
1634 } while (skb->next); 1633 } while (skb->next);
1635 1634
@@ -1669,7 +1668,10 @@ out_kfree_skb:
1669static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1668static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb) 1669 struct sk_buff *skb)
1671{ 1670{
1672 return netdev_get_tx_queue(dev, 0); 1671 u16 queue_index = 0;
1672
1673 skb_set_queue_mapping(skb, queue_index);
1674 return netdev_get_tx_queue(dev, queue_index);
1673} 1675}
1674 1676
1675int dev_queue_xmit(struct sk_buff *skb) 1677int dev_queue_xmit(struct sk_buff *skb)
@@ -1737,8 +1739,6 @@ gso:
1737 spin_lock(&txq->lock); 1739 spin_lock(&txq->lock);
1738 q = txq->qdisc; 1740 q = txq->qdisc;
1739 if (q->enqueue) { 1741 if (q->enqueue) {
1740 /* reset queue_mapping to zero */
1741 skb_set_queue_mapping(skb, 0);
1742 rc = q->enqueue(skb, q); 1742 rc = q->enqueue(skb, q);
1743 qdisc_run(txq); 1743 qdisc_run(txq);
1744 spin_unlock(&txq->lock); 1744 spin_unlock(&txq->lock);
@@ -1768,10 +1768,9 @@ gso:
1768 1768
1769 HARD_TX_LOCK(dev, txq, cpu); 1769 HARD_TX_LOCK(dev, txq, cpu);
1770 1770
1771 if (!netif_queue_stopped(dev) && 1771 if (!netif_tx_queue_stopped(txq)) {
1772 !netif_subqueue_stopped(dev, skb)) {
1773 rc = 0; 1772 rc = 0;
1774 if (!dev_hard_start_xmit(skb, dev)) { 1773 if (!dev_hard_start_xmit(skb, dev, txq)) {
1775 HARD_TX_UNLOCK(dev, txq); 1774 HARD_TX_UNLOCK(dev, txq);
1776 goto out; 1775 goto out;
1777 } 1776 }
@@ -4160,8 +4159,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4160 4159
4161 BUG_ON(strlen(name) >= sizeof(dev->name)); 4160 BUG_ON(strlen(name) >= sizeof(dev->name));
4162 4161
4163 alloc_size = sizeof(struct net_device) + 4162 alloc_size = sizeof(struct net_device);
4164 sizeof(struct net_device_subqueue) * (queue_count - 1);
4165 if (sizeof_priv) { 4163 if (sizeof_priv) {
4166 /* ensure 32-byte alignment of private area */ 4164 /* ensure 32-byte alignment of private area */
4167 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 4165 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
@@ -4191,16 +4189,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4191 4189
4192 dev->_tx = tx; 4190 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count; 4191 dev->num_tx_queues = queue_count;
4192 dev->real_num_tx_queues = queue_count;
4194 4193
4195 if (sizeof_priv) { 4194 if (sizeof_priv) {
4196 dev->priv = ((char *)dev + 4195 dev->priv = ((char *)dev +
4197 ((sizeof(struct net_device) + 4196 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4198 (sizeof(struct net_device_subqueue) *
4199 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4200 & ~NETDEV_ALIGN_CONST)); 4197 & ~NETDEV_ALIGN_CONST));
4201 } 4198 }
4202 4199
4203 dev->egress_subqueue_count = queue_count;
4204 dev->gso_max_size = GSO_MAX_SIZE; 4200 dev->gso_max_size = GSO_MAX_SIZE;
4205 4201
4206 netdev_init_queues(dev); 4202 netdev_init_queues(dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8fb134da034..c12720895ec 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work)
58 58
59 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 60 struct net_device *dev = skb->dev;
61 struct netdev_queue *txq;
61 62
62 if (!netif_device_present(dev) || !netif_running(dev)) { 63 if (!netif_device_present(dev) || !netif_running(dev)) {
63 __kfree_skb(skb); 64 __kfree_skb(skb);
64 continue; 65 continue;
65 } 66 }
66 67
68 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
69
67 local_irq_save(flags); 70 local_irq_save(flags);
68 netif_tx_lock(dev); 71 __netif_tx_lock(txq, smp_processor_id());
69 if ((netif_queue_stopped(dev) || 72 if (netif_tx_queue_stopped(txq) ||
70 netif_subqueue_stopped(dev, skb)) || 73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb); 74 skb_queue_head(&npinfo->txq, skb);
73 netif_tx_unlock(dev); 75 __netif_tx_unlock(txq);
74 local_irq_restore(flags); 76 local_irq_restore(flags);
75 77
76 schedule_delayed_work(&npinfo->tx_work, HZ/10); 78 schedule_delayed_work(&npinfo->tx_work, HZ/10);
77 return; 79 return;
78 } 80 }
79 netif_tx_unlock(dev); 81 __netif_tx_unlock(txq);
80 local_irq_restore(flags); 82 local_irq_restore(flags);
81 } 83 }
82} 84}
@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
278 280
279 /* don't get messages out of order, and no recursion */ 281 /* don't get messages out of order, and no recursion */
280 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 282 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
283 struct netdev_queue *txq;
281 unsigned long flags; 284 unsigned long flags;
282 285
286 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
287
283 local_irq_save(flags); 288 local_irq_save(flags);
284 /* try until next clock tick */ 289 /* try until next clock tick */
285 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 290 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
286 tries > 0; --tries) { 291 tries > 0; --tries) {
287 if (netif_tx_trylock(dev)) { 292 if (__netif_tx_trylock(txq)) {
288 if (!netif_queue_stopped(dev) && 293 if (!netif_tx_queue_stopped(txq))
289 !netif_subqueue_stopped(dev, skb))
290 status = dev->hard_start_xmit(skb, dev); 294 status = dev->hard_start_xmit(skb, dev);
291 netif_tx_unlock(dev); 295 __netif_tx_unlock(txq);
292 296
293 if (status == NETDEV_TX_OK) 297 if (status == NETDEV_TX_OK)
294 break; 298 break;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fdf537707e5..906802db4ed 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2123 } 2123 }
2124} 2124}
2125#endif 2125#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() %
2132 (pkt_dev->queue_map_max -
2133 pkt_dev->queue_map_min + 1)
2134 + pkt_dev->queue_map_min;
2135 } else {
2136 t = pkt_dev->cur_queue_map + 1;
2137 if (t > pkt_dev->queue_map_max)
2138 t = pkt_dev->queue_map_min;
2139 }
2140 pkt_dev->cur_queue_map = t;
2141 }
2142}
2143
2126/* Increment/randomize headers according to flags and current values 2144/* Increment/randomize headers according to flags and current values
2127 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2145 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2128 */ 2146 */
@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2325 pkt_dev->cur_pkt_size = t; 2343 pkt_dev->cur_pkt_size = t;
2326 } 2344 }
2327 2345
2328 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2346 set_cur_queue_map(pkt_dev);
2329 __u16 t;
2330 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2331 t = random32() %
2332 (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1)
2333 + pkt_dev->queue_map_min;
2334 } else {
2335 t = pkt_dev->cur_queue_map + 1;
2336 if (t > pkt_dev->queue_map_max)
2337 t = pkt_dev->queue_map_min;
2338 }
2339 pkt_dev->cur_queue_map = t;
2340 }
2341 2347
2342 pkt_dev->flows[flow].count++; 2348 pkt_dev->flows[flow].count++;
2343} 2349}
@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2458 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2464 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2459 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2465 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2460 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2466 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2461 2467 u16 queue_map;
2462 2468
2463 if (pkt_dev->nr_labels) 2469 if (pkt_dev->nr_labels)
2464 protocol = htons(ETH_P_MPLS_UC); 2470 protocol = htons(ETH_P_MPLS_UC);
@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2469 /* Update any of the values, used when we're incrementing various 2475 /* Update any of the values, used when we're incrementing various
2470 * fields. 2476 * fields.
2471 */ 2477 */
2478 queue_map = pkt_dev->cur_queue_map;
2472 mod_cur_headers(pkt_dev); 2479 mod_cur_headers(pkt_dev);
2473 2480
2474 datalen = (odev->hard_header_len + 16) & ~0xf; 2481 datalen = (odev->hard_header_len + 16) & ~0xf;
@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2507 skb->network_header = skb->tail; 2514 skb->network_header = skb->tail;
2508 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2515 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2509 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2516 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2510 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2517 skb_set_queue_mapping(skb, queue_map);
2511 iph = ip_hdr(skb); 2518 iph = ip_hdr(skb);
2512 udph = udp_hdr(skb); 2519 udph = udp_hdr(skb);
2513 2520
@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2797 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2804 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2798 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2805 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2799 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2806 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2807 u16 queue_map;
2800 2808
2801 if (pkt_dev->nr_labels) 2809 if (pkt_dev->nr_labels)
2802 protocol = htons(ETH_P_MPLS_UC); 2810 protocol = htons(ETH_P_MPLS_UC);
@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2807 /* Update any of the values, used when we're incrementing various 2815 /* Update any of the values, used when we're incrementing various
2808 * fields. 2816 * fields.
2809 */ 2817 */
2818 queue_map = pkt_dev->cur_queue_map;
2810 mod_cur_headers(pkt_dev); 2819 mod_cur_headers(pkt_dev);
2811 2820
2812 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2821 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2844 skb->network_header = skb->tail; 2853 skb->network_header = skb->tail;
2845 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2854 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2846 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2855 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2847 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2856 skb_set_queue_mapping(skb, queue_map);
2848 iph = ipv6_hdr(skb); 2857 iph = ipv6_hdr(skb);
2849 udph = udp_hdr(skb); 2858 udph = udp_hdr(skb);
2850 2859
@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3263static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3272static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3264{ 3273{
3265 struct net_device *odev = NULL; 3274 struct net_device *odev = NULL;
3275 struct netdev_queue *txq;
3266 __u64 idle_start = 0; 3276 __u64 idle_start = 0;
3277 u16 queue_map;
3267 int ret; 3278 int ret;
3268 3279
3269 odev = pkt_dev->odev; 3280 odev = pkt_dev->odev;
@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3285 } 3296 }
3286 } 3297 }
3287 3298
3288 if ((netif_queue_stopped(odev) || 3299 if (!pkt_dev->skb) {
3289 (pkt_dev->skb && 3300 set_cur_queue_map(pkt_dev);
3290 netif_subqueue_stopped(odev, pkt_dev->skb))) || 3301 queue_map = pkt_dev->cur_queue_map;
3302 } else {
3303 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3304 }
3305
3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) ||
3291 need_resched()) { 3308 need_resched()) {
3292 idle_start = getCurUs(); 3309 idle_start = getCurUs();
3293 3310
@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3303 3320
3304 pkt_dev->idle_acc += getCurUs() - idle_start; 3321 pkt_dev->idle_acc += getCurUs() - idle_start;
3305 3322
3306 if (netif_queue_stopped(odev) || 3323 if (netif_tx_queue_stopped(txq)) {
3307 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3308 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3309 pkt_dev->next_tx_ns = 0; 3325 pkt_dev->next_tx_ns = 0;
3310 goto out; /* Try the next interface */ 3326 goto out; /* Try the next interface */
@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3347 }
3332 } 3348 }
3333 3349
3334 netif_tx_lock_bh(odev); 3350 /* fill_packet() might have changed the queue */
3335 if (!netif_queue_stopped(odev) && 3351 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3336 !netif_subqueue_stopped(odev, pkt_dev->skb)) { 3352 txq = netdev_get_tx_queue(odev, queue_map);
3353
3354 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) {
3337 3356
3338 atomic_inc(&(pkt_dev->skb->users)); 3357 atomic_inc(&(pkt_dev->skb->users));
3339 retry_now: 3358 retry_now:
@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3377 pkt_dev->next_tx_ns = 0; 3396 pkt_dev->next_tx_ns = 0;
3378 } 3397 }
3379 3398
3380 netif_tx_unlock_bh(odev); 3399 __netif_tx_unlock_bh(txq);
3381 3400
3382 /* If pkt_dev->count is zero, then run forever */ 3401 /* If pkt_dev->count is zero, then run forever */
3383 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3402 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4e2b865cbba..2f575b9017d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -166,7 +166,7 @@ static inline int qdisc_restart(struct netdev_queue *txq)
166 166
167 HARD_TX_LOCK(dev, txq, smp_processor_id()); 167 HARD_TX_LOCK(dev, txq, smp_processor_id());
168 if (!netif_subqueue_stopped(dev, skb)) 168 if (!netif_subqueue_stopped(dev, skb))
169 ret = dev_hard_start_xmit(skb, dev); 169 ret = dev_hard_start_xmit(skb, dev, txq);
170 HARD_TX_UNLOCK(dev, txq); 170 HARD_TX_UNLOCK(dev, txq);
171 171
172 spin_lock(&txq->lock); 172 spin_lock(&txq->lock);
@@ -198,11 +198,10 @@ static inline int qdisc_restart(struct netdev_queue *txq)
198 198
199void __qdisc_run(struct netdev_queue *txq) 199void __qdisc_run(struct netdev_queue *txq)
200{ 200{
201 struct net_device *dev = txq->dev;
202 unsigned long start_time = jiffies; 201 unsigned long start_time = jiffies;
203 202
204 while (qdisc_restart(txq)) { 203 while (qdisc_restart(txq)) {
205 if (netif_queue_stopped(dev)) 204 if (netif_tx_queue_stopped(txq))
206 break; 205 break;
207 206
208 /* 207 /*
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 44a2c3451f4..ade3372221c 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -295,8 +295,7 @@ restart:
295 slave_txq = netdev_get_tx_queue(slave, 0); 295 slave_txq = netdev_get_tx_queue(slave, 0);
296 if (slave_txq->qdisc_sleeping != q) 296 if (slave_txq->qdisc_sleeping != q)
297 continue; 297 continue;
298 if (netif_queue_stopped(slave) || 298 if (__netif_subqueue_stopped(slave, subq) ||
299 __netif_subqueue_stopped(slave, subq) ||
300 !netif_running(slave)) { 299 !netif_running(slave)) {
301 busy = 1; 300 busy = 1;
302 continue; 301 continue;
@@ -305,8 +304,7 @@ restart:
305 switch (teql_resolve(skb, skb_res, slave)) { 304 switch (teql_resolve(skb, skb_res, slave)) {
306 case 0: 305 case 0:
307 if (netif_tx_trylock(slave)) { 306 if (netif_tx_trylock(slave)) {
308 if (!netif_queue_stopped(slave) && 307 if (!__netif_subqueue_stopped(slave, subq) &&
309 !__netif_subqueue_stopped(slave, subq) &&
310 slave->hard_start_xmit(skb, slave) == 0) { 308 slave->hard_start_xmit(skb, slave) == 0) {
311 netif_tx_unlock(slave); 309 netif_tx_unlock(slave);
312 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);