aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/net-sysfs.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-11-07 00:09:44 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-10 12:05:59 -0500
commit3b47d30396bae4f0bd1ff0dbcd7c4f5077e7df4e (patch)
treeab6ce3881e5672496f89f556d01d3d316bf6e7eb /net/core/net-sysfs.c
parentbe955b2984822e2a66176bccb3e0dbaf9cd569b6 (diff)
net: gro: add a per device gro flush timer
Tuning coalescing parameters on NIC can be really hard. Servers can handle both bulk and RPC like traffic, with conflicting goals : bulk flows want as big GRO packets as possible, RPC want minimal latencies. To reach big GRO packets on 10Gbe NIC, one can use : ethtool -C eth0 rx-usecs 4 rx-frames 44 But this penalizes rpc sessions, with an increase of latencies, up to 50% in some cases, as NICs generally do not force an interrupt when a packet with TCP Push flag is received. Some NICs do not have an absolute timer, only a timer rearmed for every incoming packet. This patch uses a different strategy : Let GRO stack decides what do do, based on traffic pattern. Packets with Push flag wont be delayed. Packets without Push flag might be held in GRO engine, if we keep receiving data. This new mechanism is off by default, and shall be enabled by setting /sys/class/net/ethX/gro_flush_timeout to a value in nanosecond. To fully enable this mechanism, drivers should use napi_complete_done() instead of napi_complete(). Tested: Ran 200 netperf TCP_STREAM from A to B (10Gbe mlx4 link, 8 RX queues) Without this feature, we send back about 305,000 ACK per second. GRO aggregation ratio is low (811/305 = 2.65 segments per GRO packet) Setting a timer of 2000 nsec is enough to increase GRO packet sizes and reduce number of ACK packets. (811/19.2 = 42) Receiver performs less calls to upper stacks, less wakes up. This also reduces cpu usage on the sender, as it receives less ACK packets. Note that reducing number of wakes up increases cpu efficiency, but can decrease QPS, as applications wont have the chance to warmup cpu caches doing a partial read of RPC requests/answers if they fit in one skb. B:~# sar -n DEV 1 10 | grep eth0 | tail -1 Average: eth0 811269.80 305732.30 1199462.57 19705.72 0.00 0.00 0.50 B:~# echo 2000 >/sys/class/net/eth0/gro_flush_timeout B:~# sar -n DEV 1 10 | grep eth0 | tail -1 Average: eth0 811577.30 19230.80 1199916.51 1239.80 0.00 0.00 0.50 Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/net-sysfs.c')
-rw-r--r--net/core/net-sysfs.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9dd06699b09c..1a24602cd54e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -325,6 +325,23 @@ static ssize_t tx_queue_len_store(struct device *dev,
325} 325}
326NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong); 326NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
327 327
328static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
329{
330 dev->gro_flush_timeout = val;
331 return 0;
332}
333
334static ssize_t gro_flush_timeout_store(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t len)
337{
338 if (!capable(CAP_NET_ADMIN))
339 return -EPERM;
340
341 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
342}
343NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
344
328static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 345static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
329 const char *buf, size_t len) 346 const char *buf, size_t len)
330{ 347{
@@ -422,6 +439,7 @@ static struct attribute *net_class_attrs[] = {
422 &dev_attr_mtu.attr, 439 &dev_attr_mtu.attr,
423 &dev_attr_flags.attr, 440 &dev_attr_flags.attr,
424 &dev_attr_tx_queue_len.attr, 441 &dev_attr_tx_queue_len.attr,
442 &dev_attr_gro_flush_timeout.attr,
425 &dev_attr_phys_port_id.attr, 443 &dev_attr_phys_port_id.attr,
426 NULL, 444 NULL,
427}; 445};