diff options
author | Daniel Borkmann <dborkman@redhat.com> | 2013-06-08 08:18:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-06-12 03:47:25 -0400 |
commit | 7a6e288d2745611bef5b614acf19644283765732 (patch) | |
tree | e99fd60ae5781b8cddffac6e30f41927ee172512 /net/core | |
parent | da5bab079f9b7d90ba234965a14914ace55e45e9 (diff) |
pktgen: ipv6: numa: consolidate skb allocation to pktgen_alloc_skb
We currently allow for numa-node aware skb allocation only within the
fill_packet_ipv4() path, but not in fill_packet_ipv6(). Consolidate that
code to a common allocation helper to enable numa-node aware skb
allocation for ipv6, and use it in both paths. This also makes both
functions a bit more readable.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/pktgen.c | 52 |
1 files changed, 27 insertions, 25 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 303412d8332b..9640972ec50e 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2627,6 +2627,29 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, | |||
2627 | pgh->tv_usec = htonl(timestamp.tv_usec); | 2627 | pgh->tv_usec = htonl(timestamp.tv_usec); |
2628 | } | 2628 | } |
2629 | 2629 | ||
2630 | static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, | ||
2631 | struct pktgen_dev *pkt_dev, | ||
2632 | unsigned int extralen) | ||
2633 | { | ||
2634 | struct sk_buff *skb = NULL; | ||
2635 | unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen + | ||
2636 | pkt_dev->pkt_overhead; | ||
2637 | |||
2638 | if (pkt_dev->flags & F_NODE) { | ||
2639 | int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); | ||
2640 | |||
2641 | skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node); | ||
2642 | if (likely(skb)) { | ||
2643 | skb_reserve(skb, NET_SKB_PAD); | ||
2644 | skb->dev = dev; | ||
2645 | } | ||
2646 | } else { | ||
2647 | skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); | ||
2648 | } | ||
2649 | |||
2650 | return skb; | ||
2651 | } | ||
2652 | |||
2630 | static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | 2653 | static struct sk_buff *fill_packet_ipv4(struct net_device *odev, |
2631 | struct pktgen_dev *pkt_dev) | 2654 | struct pktgen_dev *pkt_dev) |
2632 | { | 2655 | { |
@@ -2657,32 +2680,13 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2657 | 2680 | ||
2658 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2681 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2659 | 2682 | ||
2660 | if (pkt_dev->flags & F_NODE) { | 2683 | skb = pktgen_alloc_skb(odev, pkt_dev, datalen); |
2661 | int node; | ||
2662 | |||
2663 | if (pkt_dev->node >= 0) | ||
2664 | node = pkt_dev->node; | ||
2665 | else | ||
2666 | node = numa_node_id(); | ||
2667 | |||
2668 | skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64 | ||
2669 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node); | ||
2670 | if (likely(skb)) { | ||
2671 | skb_reserve(skb, NET_SKB_PAD); | ||
2672 | skb->dev = odev; | ||
2673 | } | ||
2674 | } | ||
2675 | else | ||
2676 | skb = __netdev_alloc_skb(odev, | ||
2677 | pkt_dev->cur_pkt_size + 64 | ||
2678 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2679 | |||
2680 | if (!skb) { | 2684 | if (!skb) { |
2681 | sprintf(pkt_dev->result, "No memory"); | 2685 | sprintf(pkt_dev->result, "No memory"); |
2682 | return NULL; | 2686 | return NULL; |
2683 | } | 2687 | } |
2684 | prefetchw(skb->data); | ||
2685 | 2688 | ||
2689 | prefetchw(skb->data); | ||
2686 | skb_reserve(skb, datalen); | 2690 | skb_reserve(skb, datalen); |
2687 | 2691 | ||
2688 | /* Reserve for ethernet and IP header */ | 2692 | /* Reserve for ethernet and IP header */ |
@@ -2786,15 +2790,13 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2786 | mod_cur_headers(pkt_dev); | 2790 | mod_cur_headers(pkt_dev); |
2787 | queue_map = pkt_dev->cur_queue_map; | 2791 | queue_map = pkt_dev->cur_queue_map; |
2788 | 2792 | ||
2789 | skb = __netdev_alloc_skb(odev, | 2793 | skb = pktgen_alloc_skb(odev, pkt_dev, 16); |
2790 | pkt_dev->cur_pkt_size + 64 | ||
2791 | + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2792 | if (!skb) { | 2794 | if (!skb) { |
2793 | sprintf(pkt_dev->result, "No memory"); | 2795 | sprintf(pkt_dev->result, "No memory"); |
2794 | return NULL; | 2796 | return NULL; |
2795 | } | 2797 | } |
2796 | prefetchw(skb->data); | ||
2797 | 2798 | ||
2799 | prefetchw(skb->data); | ||
2798 | skb_reserve(skb, 16); | 2800 | skb_reserve(skb, 16); |
2799 | 2801 | ||
2800 | /* Reserve for ethernet and IP header */ | 2802 | /* Reserve for ethernet and IP header */ |