diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-06-05 00:04:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-08 03:21:48 -0400 |
commit | 042a53a9e437feaf2230dd2cadcecfae9c7bfe05 (patch) | |
tree | ae9078f61e390a3014aecb3fe80d3438ab25ee51 /net/core | |
parent | eae3f29cc73f83cc3f1891d3ad40021b5172c630 (diff) |
net: skb_shared_info optimization
skb_dma_unmap() is quite expensive for small packets,
because we use two different cache lines from skb_shared_info.
One to access nr_frags, one to access dma_maps[0]
Instead of dma_maps being an array of MAX_SKB_FRAGS + 1 elements,
let dma_head alone in a new dma_head field, close to nr_frags,
to reduce cache lines misses.
Tested on my dev machine (bnx2 & tg3 adapters), nice speedup !
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skb_dma_map.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c index 7adb623ef664..79687dfd6957 100644 --- a/net/core/skb_dma_map.c +++ b/net/core/skb_dma_map.c | |||
@@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, | |||
20 | if (dma_mapping_error(dev, map)) | 20 | if (dma_mapping_error(dev, map)) |
21 | goto out_err; | 21 | goto out_err; |
22 | 22 | ||
23 | sp->dma_maps[0] = map; | 23 | sp->dma_head = map; |
24 | for (i = 0; i < sp->nr_frags; i++) { | 24 | for (i = 0; i < sp->nr_frags; i++) { |
25 | skb_frag_t *fp = &sp->frags[i]; | 25 | skb_frag_t *fp = &sp->frags[i]; |
26 | 26 | ||
@@ -28,7 +28,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, | |||
28 | fp->size, dir); | 28 | fp->size, dir); |
29 | if (dma_mapping_error(dev, map)) | 29 | if (dma_mapping_error(dev, map)) |
30 | goto unwind; | 30 | goto unwind; |
31 | sp->dma_maps[i + 1] = map; | 31 | sp->dma_maps[i] = map; |
32 | } | 32 | } |
33 | 33 | ||
34 | return 0; | 34 | return 0; |
@@ -37,10 +37,10 @@ unwind: | |||
37 | while (--i >= 0) { | 37 | while (--i >= 0) { |
38 | skb_frag_t *fp = &sp->frags[i]; | 38 | skb_frag_t *fp = &sp->frags[i]; |
39 | 39 | ||
40 | dma_unmap_page(dev, sp->dma_maps[i + 1], | 40 | dma_unmap_page(dev, sp->dma_maps[i], |
41 | fp->size, dir); | 41 | fp->size, dir); |
42 | } | 42 | } |
43 | dma_unmap_single(dev, sp->dma_maps[0], | 43 | dma_unmap_single(dev, sp->dma_head, |
44 | skb_headlen(skb), dir); | 44 | skb_headlen(skb), dir); |
45 | out_err: | 45 | out_err: |
46 | return -ENOMEM; | 46 | return -ENOMEM; |
@@ -53,12 +53,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb, | |||
53 | struct skb_shared_info *sp = skb_shinfo(skb); | 53 | struct skb_shared_info *sp = skb_shinfo(skb); |
54 | int i; | 54 | int i; |
55 | 55 | ||
56 | dma_unmap_single(dev, sp->dma_maps[0], | 56 | dma_unmap_single(dev, sp->dma_head, |
57 | skb_headlen(skb), dir); | 57 | skb_headlen(skb), dir); |
58 | for (i = 0; i < sp->nr_frags; i++) { | 58 | for (i = 0; i < sp->nr_frags; i++) { |
59 | skb_frag_t *fp = &sp->frags[i]; | 59 | skb_frag_t *fp = &sp->frags[i]; |
60 | 60 | ||
61 | dma_unmap_page(dev, sp->dma_maps[i + 1], | 61 | dma_unmap_page(dev, sp->dma_maps[i], |
62 | fp->size, dir); | 62 | fp->size, dir); |
63 | } | 63 | } |
64 | } | 64 | } |