aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2011-04-05 01:30:30 -0400
committerDavid S. Miller <davem@davemloft.net>2011-04-05 01:30:30 -0400
commitc6e1a0d12ca7b4f22c58e55a16beacfb7d3d8462 (patch)
tree6955c20538050329d0bdffdf24a787507ae6fdf1 /include/net/sock.h
parent14f98f258f1936e0dba77474bd7eda63f61a9826 (diff)
net: Allow no-cache copy from user on transmit
This patch uses __copy_from_user_nocache on transmit to bypass data cache for a performance improvement. skb_add_data_nocache and skb_copy_to_page_nocache can be called by sendmsg functions to use this feature, initial support is in tcp_sendmsg. This functionality is configurable per device using ethtool. Presumably, this feature would only be useful when the driver does not touch the data. The feature is turned on by default if a device indicates that it does some form of checksum offload; it is off by default for devices that do no checksum offload or indicate no checksum is necessary. For the former case copy-checksum is probably done anyway, in the latter case the device is likely loopback in which case the no cache copy is probably not beneficial. This patch was tested using 200 instances of netperf TCP_RR with 1400 byte request and one byte reply. Platform is 16 core AMD x86. No-cache copy disabled: 672703 tps, 97.13% utilization 50/90/99% latency:244.31 484.205 1028.41 No-cache copy enabled: 702113 tps, 96.16% utilization, 50/90/99% latency 238.56 467.56 956.955 Using 14000 byte request and response sizes demonstrate the effects more dramatically: No-cache copy disabled: 79571 tps, 34.34 %utlization 50/90/95% latency 1584.46 2319.59 5001.76 No-cache copy enabled: 83856 tps, 34.81% utilization 50/90/95% latency 2508.42 2622.62 2735.88 Note especially the effect on latency tail (95th percentile). This seems to provide a nice performance improvement and is consistent in the tests I ran. Presumably, this would provide the greatest benfits in the presence of an application workload stressing the cache and a lot of transmit data happening. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h53
1 files changed, 53 insertions, 0 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index da0534d3401c..43bd515e92fd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -52,6 +52,7 @@
52#include <linux/mm.h> 52#include <linux/mm.h>
53#include <linux/security.h> 53#include <linux/security.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/uaccess.h>
55 56
56#include <linux/filter.h> 57#include <linux/filter.h>
57#include <linux/rculist_nulls.h> 58#include <linux/rculist_nulls.h>
@@ -1389,6 +1390,58 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
1389 sk->sk_route_caps &= ~flags; 1390 sk->sk_route_caps &= ~flags;
1390} 1391}
1391 1392
1393static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1394 char __user *from, char *to,
1395 int copy)
1396{
1397 if (skb->ip_summed == CHECKSUM_NONE) {
1398 int err = 0;
1399 __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
1400 if (err)
1401 return err;
1402 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1403 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1404 if (!access_ok(VERIFY_READ, from, copy) ||
1405 __copy_from_user_nocache(to, from, copy))
1406 return -EFAULT;
1407 } else if (copy_from_user(to, from, copy))
1408 return -EFAULT;
1409
1410 return 0;
1411}
1412
1413static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1414 char __user *from, int copy)
1415{
1416 int err;
1417
1418 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), copy);
1419 if (err)
1420 __skb_trim(skb, skb->len);
1421
1422 return err;
1423}
1424
1425static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
1426 struct sk_buff *skb,
1427 struct page *page,
1428 int off, int copy)
1429{
1430 int err;
1431
1432 err = skb_do_copy_data_nocache(sk, skb, from,
1433 page_address(page) + off, copy);
1434 if (err)
1435 return err;
1436
1437 skb->len += copy;
1438 skb->data_len += copy;
1439 skb->truesize += copy;
1440 sk->sk_wmem_queued += copy;
1441 sk_mem_charge(sk, copy);
1442 return 0;
1443}
1444
1392static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1445static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1393 struct sk_buff *skb, struct page *page, 1446 struct sk_buff *skb, struct page *page,
1394 int off, int copy) 1447 int off, int copy)