diff options
author | Patrick McHardy <kaber@trash.net> | 2007-06-25 07:35:20 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-11 01:15:37 -0400 |
commit | 334a8132d9950f769f390f0f35c233d099688e7a (patch) | |
tree | 0a9e988971d4c20e720e99bccfa6f5feeca5d94a /net | |
parent | e50c41b53d7aa48152dd9c633b04fc7abd536f1f (diff) |
[SKBUFF]: Keep track of writable header len of headerless clones
Currently NAT (and others) that want to modify cloned skbs copy them,
even if in the vast majority of cases its not necessary because the
skb is a clone made by TCP and the portion NAT wants to modify is
actually writable because TCP release the header reference before
cloning.
The problem is that there is no clean way for NAT to find out how
long the writable header area is, so this patch introduces skb->hdr_len
to hold this length. When a headerless skb is cloned skb->hdr_len
is set to the current headroom, for regular clones it is copied from
the original. A new function skb_clone_writable(skb, len) returns
whether the skb is writable up to len bytes from skb->data. To avoid
enlarging the skb the mac_len field is reduced to 16 bit and the
new hdr_len field is put in the remaining 16 bit.
I've done a few rough benchmarks of NAT (not with this exact patch,
but a very similar one). As expected it saves huge amounts of system
time in case of sendfile, bringing it down to basically the same
amount as without NAT, with sendmsg it only helps on loopback,
probably because of the large MTU.
Transmit a 1GB file using sendfile/sendmsg over eth0/lo with and
without NAT:
- sendfile eth0, no NAT: sys 0m0.388s
- sendfile eth0, NAT: sys 0m1.835s
- sendfile eth0: NAT + path: sys 0m0.370s (~ -80%)
- sendfile lo, no NAT: sys 0m0.258s
- sendfile lo, NAT: sys 0m2.609s
- sendfile lo, NAT + patch: sys 0m0.260s (~ -90%)
- sendmsg eth0, no NAT: sys 0m2.508s
- sendmsg eth0, NAT: sys 0m2.539s
- sendmsg eth0, NAT + patch: sys 0m2.445s (no change)
- sendmsg lo, no NAT: sys 0m2.151s
- sendmsg lo, NAT: sys 0m3.557s
- sendmsg lo, NAT + patch: sys 0m2.159s (~ -40%)
I expect other users can see a similar performance improvement,
packet mangling iptables targets, ipip and ip_gre come to mind ..
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 2 | ||||
-rw-r--r-- | net/netfilter/core.c | 4 |
2 files changed, 5 insertions, 1 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3943c3ad9145..c989c3a0f907 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -415,6 +415,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
415 | C(csum); | 415 | C(csum); |
416 | C(local_df); | 416 | C(local_df); |
417 | n->cloned = 1; | 417 | n->cloned = 1; |
418 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | ||
418 | n->nohdr = 0; | 419 | n->nohdr = 0; |
419 | C(pkt_type); | 420 | C(pkt_type); |
420 | C(ip_summed); | 421 | C(ip_summed); |
@@ -676,6 +677,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
676 | skb->network_header += off; | 677 | skb->network_header += off; |
677 | skb->mac_header += off; | 678 | skb->mac_header += off; |
678 | skb->cloned = 0; | 679 | skb->cloned = 0; |
680 | skb->hdr_len = 0; | ||
679 | skb->nohdr = 0; | 681 | skb->nohdr = 0; |
680 | atomic_set(&skb_shinfo(skb)->dataref, 1); | 682 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
681 | return 0; | 683 | return 0; |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index a84478ee2ded..3aaabec70d19 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -203,7 +203,9 @@ int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len) | |||
203 | return 0; | 203 | return 0; |
204 | 204 | ||
205 | /* Not exclusive use of packet? Must copy. */ | 205 | /* Not exclusive use of packet? Must copy. */ |
206 | if (skb_shared(*pskb) || skb_cloned(*pskb)) | 206 | if (skb_cloned(*pskb) && !skb_clone_writable(*pskb, writable_len)) |
207 | goto copy_skb; | ||
208 | if (skb_shared(*pskb)) | ||
207 | goto copy_skb; | 209 | goto copy_skb; |
208 | 210 | ||
209 | return pskb_may_pull(*pskb, writable_len); | 211 | return pskb_may_pull(*pskb, writable_len); |