aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_cubic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /net/ipv4/tcp_cubic.c
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'net/ipv4/tcp_cubic.c')
-rw-r--r--net/ipv4/tcp_cubic.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9bd8a4828a9..20de0118c98e 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -82,12 +82,13 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse
82/* BIC TCP Parameters */ 82/* BIC TCP Parameters */
83struct bictcp { 83struct bictcp {
84 u32 cnt; /* increase cwnd by 1 after ACKs */ 84 u32 cnt; /* increase cwnd by 1 after ACKs */
85 u32 last_max_cwnd; /* last maximum snd_cwnd */ 85 u32 last_max_cwnd; /* last maximum snd_cwnd */
86 u32 loss_cwnd; /* congestion window at last loss */ 86 u32 loss_cwnd; /* congestion window at last loss */
87 u32 last_cwnd; /* the last snd_cwnd */ 87 u32 last_cwnd; /* the last snd_cwnd */
88 u32 last_time; /* time when updated last_cwnd */ 88 u32 last_time; /* time when updated last_cwnd */
89 u32 bic_origin_point;/* origin point of bic function */ 89 u32 bic_origin_point;/* origin point of bic function */
90 u32 bic_K; /* time to origin point from the beginning of the current epoch */ 90 u32 bic_K; /* time to origin point
91 from the beginning of the current epoch */
91 u32 delay_min; /* min delay (msec << 3) */ 92 u32 delay_min; /* min delay (msec << 3) */
92 u32 epoch_start; /* beginning of an epoch */ 93 u32 epoch_start; /* beginning of an epoch */
93 u32 ack_cnt; /* number of acks */ 94 u32 ack_cnt; /* number of acks */
@@ -219,7 +220,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
219 ca->last_time = tcp_time_stamp; 220 ca->last_time = tcp_time_stamp;
220 221
221 if (ca->epoch_start == 0) { 222 if (ca->epoch_start == 0) {
222 ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */ 223 ca->epoch_start = tcp_time_stamp; /* record beginning */
223 ca->ack_cnt = 1; /* start counting */ 224 ca->ack_cnt = 1; /* start counting */
224 ca->tcp_cwnd = cwnd; /* syn with cubic */ 225 ca->tcp_cwnd = cwnd; /* syn with cubic */
225 226
@@ -263,9 +264,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
263 264
264 /* c/rtt * (t-K)^3 */ 265 /* c/rtt * (t-K)^3 */
265 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); 266 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
266 if (t < ca->bic_K) /* below origin*/ 267 if (t < ca->bic_K) /* below origin*/
267 bic_target = ca->bic_origin_point - delta; 268 bic_target = ca->bic_origin_point - delta;
268 else /* above origin*/ 269 else /* above origin*/
269 bic_target = ca->bic_origin_point + delta; 270 bic_target = ca->bic_origin_point + delta;
270 271
271 /* cubic function - calc bictcp_cnt*/ 272 /* cubic function - calc bictcp_cnt*/
@@ -285,13 +286,14 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
285 /* TCP Friendly */ 286 /* TCP Friendly */
286 if (tcp_friendliness) { 287 if (tcp_friendliness) {
287 u32 scale = beta_scale; 288 u32 scale = beta_scale;
289
288 delta = (cwnd * scale) >> 3; 290 delta = (cwnd * scale) >> 3;
289 while (ca->ack_cnt > delta) { /* update tcp cwnd */ 291 while (ca->ack_cnt > delta) { /* update tcp cwnd */
290 ca->ack_cnt -= delta; 292 ca->ack_cnt -= delta;
291 ca->tcp_cwnd++; 293 ca->tcp_cwnd++;
292 } 294 }
293 295
294 if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ 296 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
295 delta = ca->tcp_cwnd - cwnd; 297 delta = ca->tcp_cwnd - cwnd;
296 max_cnt = cwnd / delta; 298 max_cnt = cwnd / delta;
297 if (ca->cnt > max_cnt) 299 if (ca->cnt > max_cnt)
@@ -320,7 +322,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
320 bictcp_update(ca, tp->snd_cwnd); 322 bictcp_update(ca, tp->snd_cwnd);
321 tcp_cong_avoid_ai(tp, ca->cnt); 323 tcp_cong_avoid_ai(tp, ca->cnt);
322 } 324 }
323
324} 325}
325 326
326static u32 bictcp_recalc_ssthresh(struct sock *sk) 327static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -452,7 +453,8 @@ static int __init cubictcp_register(void)
452 * based on SRTT of 100ms 453 * based on SRTT of 100ms
453 */ 454 */
454 455
455 beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta); 456 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
457 / (BICTCP_BETA_SCALE - beta);
456 458
457 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ 459 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
458 460