aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_westwood.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /net/ipv4/tcp_westwood.c
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'net/ipv4/tcp_westwood.c')
-rw-r--r--net/ipv4/tcp_westwood.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index b94a04ae2ed5..bb63fba47d47 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -42,7 +42,6 @@ struct westwood {
42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ 42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
43}; 43};
44 44
45
46/* TCP Westwood functions and constants */ 45/* TCP Westwood functions and constants */
47#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ 46#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
48#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ 47#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
@@ -153,7 +152,6 @@ static inline void update_rtt_min(struct westwood *w)
153 w->rtt_min = min(w->rtt, w->rtt_min); 152 w->rtt_min = min(w->rtt, w->rtt_min);
154} 153}
155 154
156
157/* 155/*
158 * @westwood_fast_bw 156 * @westwood_fast_bw
159 * It is called when we are in fast path. In particular it is called when 157 * It is called when we are in fast path. In particular it is called when
@@ -208,7 +206,6 @@ static inline u32 westwood_acked_count(struct sock *sk)
208 return w->cumul_ack; 206 return w->cumul_ack;
209} 207}
210 208
211
212/* 209/*
213 * TCP Westwood 210 * TCP Westwood
214 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it 211 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
@@ -219,47 +216,51 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
219{ 216{
220 const struct tcp_sock *tp = tcp_sk(sk); 217 const struct tcp_sock *tp = tcp_sk(sk);
221 const struct westwood *w = inet_csk_ca(sk); 218 const struct westwood *w = inet_csk_ca(sk);
219
222 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 220 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
223} 221}
224 222
223static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
224{
225 if (ack_flags & CA_ACK_SLOWPATH) {
226 struct westwood *w = inet_csk_ca(sk);
227
228 westwood_update_window(sk);
229 w->bk += westwood_acked_count(sk);
230
231 update_rtt_min(w);
232 return;
233 }
234
235 westwood_fast_bw(sk);
236}
237
225static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) 238static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
226{ 239{
227 struct tcp_sock *tp = tcp_sk(sk); 240 struct tcp_sock *tp = tcp_sk(sk);
228 struct westwood *w = inet_csk_ca(sk); 241 struct westwood *w = inet_csk_ca(sk);
229 242
230 switch (event) { 243 switch (event) {
231 case CA_EVENT_FAST_ACK:
232 westwood_fast_bw(sk);
233 break;
234
235 case CA_EVENT_COMPLETE_CWR: 244 case CA_EVENT_COMPLETE_CWR:
236 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 245 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
237 break; 246 break;
238
239 case CA_EVENT_LOSS: 247 case CA_EVENT_LOSS:
240 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 248 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
241 /* Update RTT_min when next ack arrives */ 249 /* Update RTT_min when next ack arrives */
242 w->reset_rtt_min = 1; 250 w->reset_rtt_min = 1;
243 break; 251 break;
244
245 case CA_EVENT_SLOW_ACK:
246 westwood_update_window(sk);
247 w->bk += westwood_acked_count(sk);
248 update_rtt_min(w);
249 break;
250
251 default: 252 default:
252 /* don't care */ 253 /* don't care */
253 break; 254 break;
254 } 255 }
255} 256}
256 257
257
258/* Extract info for Tcp socket info provided via netlink. */ 258/* Extract info for Tcp socket info provided via netlink. */
259static void tcp_westwood_info(struct sock *sk, u32 ext, 259static void tcp_westwood_info(struct sock *sk, u32 ext,
260 struct sk_buff *skb) 260 struct sk_buff *skb)
261{ 261{
262 const struct westwood *ca = inet_csk_ca(sk); 262 const struct westwood *ca = inet_csk_ca(sk);
263
263 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 264 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
264 struct tcpvegas_info info = { 265 struct tcpvegas_info info = {
265 .tcpv_enabled = 1, 266 .tcpv_enabled = 1,
@@ -271,12 +272,12 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
271 } 272 }
272} 273}
273 274
274
275static struct tcp_congestion_ops tcp_westwood __read_mostly = { 275static struct tcp_congestion_ops tcp_westwood __read_mostly = {
276 .init = tcp_westwood_init, 276 .init = tcp_westwood_init,
277 .ssthresh = tcp_reno_ssthresh, 277 .ssthresh = tcp_reno_ssthresh,
278 .cong_avoid = tcp_reno_cong_avoid, 278 .cong_avoid = tcp_reno_cong_avoid,
279 .cwnd_event = tcp_westwood_event, 279 .cwnd_event = tcp_westwood_event,
280 .in_ack_event = tcp_westwood_ack,
280 .get_info = tcp_westwood_info, 281 .get_info = tcp_westwood_info,
281 .pkts_acked = tcp_westwood_pkts_acked, 282 .pkts_acked = tcp_westwood_pkts_acked,
282 283