aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-06-05 00:04:16 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-08 03:21:48 -0400
commit042a53a9e437feaf2230dd2cadcecfae9c7bfe05 (patch)
treeae9078f61e390a3014aecb3fe80d3438ab25ee51 /include/linux/skbuff.h
parenteae3f29cc73f83cc3f1891d3ad40021b5172c630 (diff)
net: skb_shared_info optimization
skb_dma_unmap() is quite expensive for small packets, because we use two different cache lines from skb_shared_info. One to access nr_frags, one to access dma_maps[0] Instead of dma_maps being an array of MAX_SKB_FRAGS + 1 elements, let dma_head alone in a new dma_head field, close to nr_frags, to reduce cache lines misses. Tested on my dev machine (bnx2 & tg3 adapters), nice speedup ! Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7485058125e3..aad484cd5863 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -189,6 +189,9 @@ struct skb_shared_info {
189 atomic_t dataref; 189 atomic_t dataref;
190 unsigned short nr_frags; 190 unsigned short nr_frags;
191 unsigned short gso_size; 191 unsigned short gso_size;
192#ifdef CONFIG_HAS_DMA
193 dma_addr_t dma_head;
194#endif
192 /* Warning: this field is not always filled in (UFO)! */ 195 /* Warning: this field is not always filled in (UFO)! */
193 unsigned short gso_segs; 196 unsigned short gso_segs;
194 unsigned short gso_type; 197 unsigned short gso_type;
@@ -198,7 +201,7 @@ struct skb_shared_info {
198 struct skb_shared_hwtstamps hwtstamps; 201 struct skb_shared_hwtstamps hwtstamps;
199 skb_frag_t frags[MAX_SKB_FRAGS]; 202 skb_frag_t frags[MAX_SKB_FRAGS];
200#ifdef CONFIG_HAS_DMA 203#ifdef CONFIG_HAS_DMA
201 dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; 204 dma_addr_t dma_maps[MAX_SKB_FRAGS];
202#endif 205#endif
203 /* Intermediate layers must ensure that destructor_arg 206 /* Intermediate layers must ensure that destructor_arg
204 * remains valid until skb destructor */ 207 * remains valid until skb destructor */