aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 04:56:23 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:07 -0400
commitfd2ea0a79faad824258af5dcec1927aa24d81c16 (patch)
tree644fd4ce92227cc319c7a54c63ea07a96b8c6b8d /net/core/pktgen.c
parent24344d2600108b9b79a60c0e4c43b3c499856d14 (diff)
net: Use queue aware tests throughout.
This effectively "flips the switch" by making the core networking and multiqueue-aware drivers use the new TX multiqueue structures. Non-multiqueue drivers need no changes. The interfaces they use such as netif_stop_queue() degenerate into an operation on TX queue zero. So everything "just works" for them. Code that really wants to do "X" to all TX queues now invokes a routine that does so, such as netif_tx_wake_all_queues(), netif_tx_stop_all_queues(), etc. pktgen and netpoll required a little bit more surgery than the others. In particular the pktgen changes, whilst functional, could be largely improved. The initial check in pktgen_xmit() will sometimes check the wrong queue, which is mostly harmless. The thing to do is probably to invoke fill_packet() earlier. The bulk of the netpoll changes is to make the code operate solely on the TX queue indicated by by the SKB queue mapping. Setting of the SKB queue mapping is entirely confined inside of net/core/dev.c:dev_pick_tx(). If we end up needing any kind of special semantics (drops, for example) it will be implemented here. Finally, we now have a "real_num_tx_queues" which is where the driver indicates how many TX queues are actually active. With IGB changes from Jeff Kirsher. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c69
1 files changed, 44 insertions, 25 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fdf537707e51..906802db4ed4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2123 } 2123 }
2124} 2124}
2125#endif 2125#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() %
2132 (pkt_dev->queue_map_max -
2133 pkt_dev->queue_map_min + 1)
2134 + pkt_dev->queue_map_min;
2135 } else {
2136 t = pkt_dev->cur_queue_map + 1;
2137 if (t > pkt_dev->queue_map_max)
2138 t = pkt_dev->queue_map_min;
2139 }
2140 pkt_dev->cur_queue_map = t;
2141 }
2142}
2143
2126/* Increment/randomize headers according to flags and current values 2144/* Increment/randomize headers according to flags and current values
2127 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2145 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2128 */ 2146 */
@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2325 pkt_dev->cur_pkt_size = t; 2343 pkt_dev->cur_pkt_size = t;
2326 } 2344 }
2327 2345
2328 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2346 set_cur_queue_map(pkt_dev);
2329 __u16 t;
2330 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2331 t = random32() %
2332 (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1)
2333 + pkt_dev->queue_map_min;
2334 } else {
2335 t = pkt_dev->cur_queue_map + 1;
2336 if (t > pkt_dev->queue_map_max)
2337 t = pkt_dev->queue_map_min;
2338 }
2339 pkt_dev->cur_queue_map = t;
2340 }
2341 2347
2342 pkt_dev->flows[flow].count++; 2348 pkt_dev->flows[flow].count++;
2343} 2349}
@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2458 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2464 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2459 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2465 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2460 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2466 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2461 2467 u16 queue_map;
2462 2468
2463 if (pkt_dev->nr_labels) 2469 if (pkt_dev->nr_labels)
2464 protocol = htons(ETH_P_MPLS_UC); 2470 protocol = htons(ETH_P_MPLS_UC);
@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2469 /* Update any of the values, used when we're incrementing various 2475 /* Update any of the values, used when we're incrementing various
2470 * fields. 2476 * fields.
2471 */ 2477 */
2478 queue_map = pkt_dev->cur_queue_map;
2472 mod_cur_headers(pkt_dev); 2479 mod_cur_headers(pkt_dev);
2473 2480
2474 datalen = (odev->hard_header_len + 16) & ~0xf; 2481 datalen = (odev->hard_header_len + 16) & ~0xf;
@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2507 skb->network_header = skb->tail; 2514 skb->network_header = skb->tail;
2508 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2515 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2509 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2516 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2510 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2517 skb_set_queue_mapping(skb, queue_map);
2511 iph = ip_hdr(skb); 2518 iph = ip_hdr(skb);
2512 udph = udp_hdr(skb); 2519 udph = udp_hdr(skb);
2513 2520
@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2797 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2804 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2798 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2805 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2799 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2806 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2807 u16 queue_map;
2800 2808
2801 if (pkt_dev->nr_labels) 2809 if (pkt_dev->nr_labels)
2802 protocol = htons(ETH_P_MPLS_UC); 2810 protocol = htons(ETH_P_MPLS_UC);
@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2807 /* Update any of the values, used when we're incrementing various 2815 /* Update any of the values, used when we're incrementing various
2808 * fields. 2816 * fields.
2809 */ 2817 */
2818 queue_map = pkt_dev->cur_queue_map;
2810 mod_cur_headers(pkt_dev); 2819 mod_cur_headers(pkt_dev);
2811 2820
2812 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2821 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2844 skb->network_header = skb->tail; 2853 skb->network_header = skb->tail;
2845 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2854 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2846 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2855 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2847 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2856 skb_set_queue_mapping(skb, queue_map);
2848 iph = ipv6_hdr(skb); 2857 iph = ipv6_hdr(skb);
2849 udph = udp_hdr(skb); 2858 udph = udp_hdr(skb);
2850 2859
@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3263static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3272static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3264{ 3273{
3265 struct net_device *odev = NULL; 3274 struct net_device *odev = NULL;
3275 struct netdev_queue *txq;
3266 __u64 idle_start = 0; 3276 __u64 idle_start = 0;
3277 u16 queue_map;
3267 int ret; 3278 int ret;
3268 3279
3269 odev = pkt_dev->odev; 3280 odev = pkt_dev->odev;
@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3285 } 3296 }
3286 } 3297 }
3287 3298
3288 if ((netif_queue_stopped(odev) || 3299 if (!pkt_dev->skb) {
3289 (pkt_dev->skb && 3300 set_cur_queue_map(pkt_dev);
3290 netif_subqueue_stopped(odev, pkt_dev->skb))) || 3301 queue_map = pkt_dev->cur_queue_map;
3302 } else {
3303 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3304 }
3305
3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) ||
3291 need_resched()) { 3308 need_resched()) {
3292 idle_start = getCurUs(); 3309 idle_start = getCurUs();
3293 3310
@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3303 3320
3304 pkt_dev->idle_acc += getCurUs() - idle_start; 3321 pkt_dev->idle_acc += getCurUs() - idle_start;
3305 3322
3306 if (netif_queue_stopped(odev) || 3323 if (netif_tx_queue_stopped(txq)) {
3307 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3308 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3309 pkt_dev->next_tx_ns = 0; 3325 pkt_dev->next_tx_ns = 0;
3310 goto out; /* Try the next interface */ 3326 goto out; /* Try the next interface */
@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3347 }
3332 } 3348 }
3333 3349
3334 netif_tx_lock_bh(odev); 3350 /* fill_packet() might have changed the queue */
3335 if (!netif_queue_stopped(odev) && 3351 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3336 !netif_subqueue_stopped(odev, pkt_dev->skb)) { 3352 txq = netdev_get_tx_queue(odev, queue_map);
3353
3354 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) {
3337 3356
3338 atomic_inc(&(pkt_dev->skb->users)); 3357 atomic_inc(&(pkt_dev->skb->users));
3339 retry_now: 3358 retry_now:
@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3377 pkt_dev->next_tx_ns = 0; 3396 pkt_dev->next_tx_ns = 0;
3378 } 3397 }
3379 3398
3380 netif_tx_unlock_bh(odev); 3399 __netif_tx_unlock_bh(txq);
3381 3400
3382 /* If pkt_dev->count is zero, then run forever */ 3401 /* If pkt_dev->count is zero, then run forever */
3383 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3402 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {