From 30cfd0baf0a0c4329fff1ef4b622919297969ec8 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 25 Jul 2007 23:49:34 -0700 Subject: [TCP]: congestion control API pass RTT in microseconds This patch changes the API for the callback that is done after an ACK is received. It solves a couple of issues: * Some congestion controls want higher resolution value of RTT (controlled by TCP_CONG_RTT_SAMPLE flag). These don't really want a ktime, but all compute a RTT in microseconds. * Other congestion control could use RTT at jiffies resolution. To keep API consistent the units should be the same for both cases, just the resolution should change. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/tcp_bic.c | 2 +- net/ipv4/tcp_cubic.c | 2 +- net/ipv4/tcp_htcp.c | 2 +- net/ipv4/tcp_illinois.c | 8 +++----- net/ipv4/tcp_input.c | 21 ++++++++++++++++----- net/ipv4/tcp_lp.c | 6 +++--- net/ipv4/tcp_vegas.c | 6 +++--- net/ipv4/tcp_vegas.h | 2 +- net/ipv4/tcp_veno.c | 6 +++--- net/ipv4/tcp_westwood.c | 7 ++++--- net/ipv4/tcp_yeah.c | 4 ++-- 11 files changed, 38 insertions(+), 28 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 519de091a9..4586211e37 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) +static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) { const struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index d17da30d82..0c44bb67a6 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -334,7 +334,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) +static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 08a02e6045..fa61663ace 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -98,7 +98,7 @@ static inline void measure_rtt(struct sock *sk) } } -static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last) +static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index cc5de6f69d..64f1cbaf96 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -83,18 +83,16 @@ static void tcp_illinois_init(struct sock *sk) } /* Measure RTT for each ack. */ -static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) +static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) { struct illinois *ca = inet_csk_ca(sk); - u32 rtt; ca->acked = pkts_acked; - if (ktime_equal(last, net_invalid_timestamp())) + /* dup ack, no rtt sample */ + if (rtt < 0) return; - rtt = ktime_to_us(net_timedelta(last)); - /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) rtt = RTT_MAX; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fec8a7a4db..4b255fe999 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2490,12 +2490,23 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) tcp_ack_update_rtt(sk, acked, seq_rtt); tcp_ack_packets_out(sk); - /* Is the ACK triggering packet unambiguous? */ - if (acked & FLAG_RETRANS_DATA_ACKED) - last_ackt = net_invalid_timestamp(); + if (ca_ops->pkts_acked) { + s32 rtt_us = -1; + + /* Is the ACK triggering packet unambiguous? */ + if (!(acked & FLAG_RETRANS_DATA_ACKED)) { + /* High resolution needed and available? */ + if (ca_ops->flags & TCP_CONG_RTT_STAMP && + !ktime_equal(last_ackt, + net_invalid_timestamp())) + rtt_us = ktime_us_delta(ktime_get_real(), + last_ackt); + else if (seq_rtt > 0) + rtt_us = jiffies_to_usecs(seq_rtt); + } - if (ca_ops->pkts_acked) - ca_ops->pkts_acked(sk, pkts_acked, last_ackt); + ca_ops->pkts_acked(sk, pkts_acked, rtt_us); + } } #if FASTRETRANS_DEBUG > 0 diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 80e140e3ec..e7f5ef92cb 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt) * newReno in increase case. * We work it out by following the idea from TCP-LP's paper directly */ -static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last) +static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us) { struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); - if (!ktime_equal(last, net_invalid_timestamp())) - tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); + if (rtt_us > 0) + tcp_lp_rtt_sample(sk, rtt_us); /* calc inference */ if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 914e0307f7..b49dedcda5 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -112,16 +112,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init); * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ -void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) +void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) { struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; - if (ktime_equal(last, net_invalid_timestamp())) + if (rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ - vrtt = ktime_to_us(net_timedelta(last)) + 1; + vrtt = rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h index 502fa81836..6c0eea2f82 100644 --- a/net/ipv4/tcp_vegas.h +++ b/net/ipv4/tcp_vegas.h @@ -17,7 +17,7 @@ struct vegas { extern void tcp_vegas_init(struct sock *sk); extern void tcp_vegas_state(struct sock *sk, u8 ca_state); -extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last); +extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 7a55ddf860..8fb2aee0b1 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -69,16 +69,16 @@ static void tcp_veno_init(struct sock *sk) } /* Do rtt sampling needed for Veno. */ -static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) +static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) { struct veno *veno = inet_csk_ca(sk); u32 vrtt; - if (ktime_equal(last, net_invalid_timestamp())) + if (rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ - vrtt = ktime_to_us(net_timedelta(last)) + 1; + vrtt = rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < veno->basertt) diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index e61e09dd51..20151d6a62 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -100,11 +100,12 @@ static void westwood_filter(struct westwood *w, u32 delta) * Called after processing group of packets. * but all westwood needs is the last sample of srtt. */ -static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) +static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt) { struct westwood *w = inet_csk_ca(sk); - if (cnt > 0) - w->rtt = tcp_sk(sk)->srtt >> 3; + + if (rtt > 0) + w->rtt = usecs_to_jiffies(rtt); } /* diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index c04b7c6ec7..c107fba743 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -58,7 +58,7 @@ static void tcp_yeah_init(struct sock *sk) } -static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) +static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); struct yeah *yeah = inet_csk_ca(sk); @@ -66,7 +66,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) if (icsk->icsk_ca_state == TCP_CA_Open) yeah->pkts_acked = pkts_acked; - tcp_vegas_pkts_acked(sk, pkts_acked, last); + tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, -- cgit v1.2.2 From e7d0c88586a66cf03e70750a8119d984fdedf2aa Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 25 Jul 2007 23:50:06 -0700 Subject: [TCP]: cubic - eliminate use of receive time stamp Remove use of received timestamp option value from RTT calculation in Cubic. A hostile receiver may be returning a larger timestamp option than the original value. This would cause the sender to believe the malevolent receiver had a larger RTT and because Cubic tries to provide some RTT friendliness, the sender would then favor the liar. Instead, use the jiffie resolutionRTT value already computed and passed back after ack. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/tcp_cubic.c | 46 ++++++++++++++++++---------------------------- 1 file changed, 18 insertions(+), 28 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 0c44bb67a6..485d7ea35f 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -246,38 +246,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) ca->cnt = 1; } - -/* Keep track of minimum rtt */ -static inline void measure_delay(struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - struct bictcp *ca = inet_csk_ca(sk); - u32 delay; - - /* No time stamp */ - if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) || - /* Discard delay samples right after fast recovery */ - (s32)(tcp_time_stamp - ca->epoch_start) < HZ) - return; - - delay = (tcp_time_stamp - tp->rx_opt.rcv_tsecr)<<3; - if (delay == 0) - delay = 1; - - /* first time call or link delay decreases */ - if (ca->delay_min == 0 || ca->delay_min > delay) - ca->delay_min = delay; -} - static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int data_acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); - if (data_acked) - measure_delay(sk); - if (!tcp_is_cwnd_limited(sk, in_flight)) return; @@ -337,14 +311,30 @@ static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); + struct bictcp *ca = inet_csk_ca(sk); + u32 delay; if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { - struct bictcp *ca = inet_csk_ca(sk); cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; ca->delayed_ack += cnt; } -} + /* Some calls are for duplicates without timetamps */ + if (rtt_us < 0) + return; + + /* Discard delay samples right after fast recovery */ + if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) + return; + + delay = usecs_to_jiffies(rtt_us) << 3; + if (delay == 0) + delay = 1; + + /* first time call or link delay decreases */ + if (ca->delay_min == 0 || ca->delay_min > delay) + ca->delay_min = delay; +} static struct tcp_congestion_ops cubictcp = { .init = bictcp_init, -- cgit v1.2.2 From 113bbbd8d2da61b50417a1dd06d8e7c19047e54b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 25 Jul 2007 23:50:28 -0700 Subject: [TCP]: htcp - use measured rtt Change HTCP to use measured RTT rather than smooth RTT. Srtt is computed using the TCP receive timestamp options, so it is vulnerable to hostile receivers. To avoid any problems this might cause use the measured RTT instead. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/tcp_htcp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index fa61663ace..b66556c0a5 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -76,12 +76,11 @@ static u32 htcp_cwnd_undo(struct sock *sk) return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); } -static inline void measure_rtt(struct sock *sk) +static inline void measure_rtt(struct sock *sk, u32 srtt) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct htcp *ca = inet_csk_ca(sk); - u32 srtt = tp->srtt >> 3; /* keep track of minimum RTT seen so far, minRTT is zero at first */ if (ca->minRTT > srtt || !ca->minRTT) @@ -108,6 +107,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt if (icsk->icsk_ca_state == TCP_CA_Open) ca->pkts_acked = pkts_acked; + if (rtt > 0) + measure_rtt(sk, usecs_to_jiffies(rtt)); + if (!use_bandwidth_switch) return; @@ -237,8 +239,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else { - measure_rtt(sk); - /* In dangerous area, increase slowly. * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd */ -- cgit v1.2.2 From ccc7911fbd1c5af9b60453d3a8cca0a36402fee5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 30 Jul 2007 16:20:12 -0700 Subject: [IPVS]: Use skb_forward_csum As a path that forwards packets, IPVS should be using skb_forward_csum instead of directly setting ip_summed to CHECKSUM_NONE. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/ipv4/ipvs/ip_vs_xmit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4') diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index 900ce29db3..666e080a74 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c @@ -128,7 +128,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest) #define IP_VS_XMIT(skb, rt) \ do { \ (skb)->ipvs_property = 1; \ - (skb)->ip_summed = CHECKSUM_NONE; \ + skb_forward_csum(skb); \ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \ (rt)->u.dst.dev, dst_output); \ } while (0) -- cgit v1.2.2 From b217d616a15fcfb3caf2a72c1a071c6d3f182f8d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 30 Jul 2007 17:04:52 -0700 Subject: [IPV4/IPV6]: Fail registration if inet device construction fails Now that netdev notifications can fail, we can use this to signal errors during registration for IPv4/IPv6. In particular, if we fail to allocate memory for the inet device, we can fail the netdev registration. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/ipv4/devinet.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index abf6352f99..5b77bdaa57 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1056,10 +1056,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, if (!in_dev) { if (event == NETDEV_REGISTER) { in_dev = inetdev_init(dev); + if (!in_dev) + return notifier_from_errno(-ENOMEM); if (dev == &loopback_dev) { - if (!in_dev) - panic("devinet: " - "Failed to create loopback\n"); IN_DEV_CONF_SET(in_dev, NOXFRM, 1); IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); } -- cgit v1.2.2 From 1e757f9996114f713a79d3fbcd08739efcfc5c34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Mon, 30 Jul 2007 19:48:37 -0700 Subject: [TCP]: Fix ratehalving with bidirectional flows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Actually, the ratehalving seems to work too well, as cwnd is reduced on every second ACK even though the packets in flight remains unchanged. Recoveries in a bidirectional flows suffer quite badly because of this, both NewReno and SACK are affected. After this patch, rate halving is performed for ACK only if packets in flight was supposedly changed too. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4b255fe999..41163ddc31 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1851,19 +1851,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk) } /* Decrease cwnd each second ack. */ -static void tcp_cwnd_down(struct sock *sk) +static void tcp_cwnd_down(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); int decr = tp->snd_cwnd_cnt + 1; - tp->snd_cwnd_cnt = decr&1; - decr >>= 1; + if ((flag&FLAG_FORWARD_PROGRESS) || + (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { + tp->snd_cwnd_cnt = decr&1; + decr >>= 1; - if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) - tp->snd_cwnd -= decr; + if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) + tp->snd_cwnd -= decr; - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); - tp->snd_cwnd_stamp = tcp_time_stamp; + tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); + tp->snd_cwnd_stamp = tcp_time_stamp; + } } /* Nothing was retransmitted or returned timestamp is less @@ -2060,7 +2063,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) } tcp_moderate_cwnd(tp); } else { - tcp_cwnd_down(sk); + tcp_cwnd_down(sk, flag); } } @@ -2260,7 +2263,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, if (is_dupack || tcp_head_timedout(sk)) tcp_update_scoreboard(sk); - tcp_cwnd_down(sk); + tcp_cwnd_down(sk, flag); tcp_xmit_retransmit_queue(sk); } -- cgit v1.2.2 From b8ed601cefe7a4014b93560bd846caf44f25b1c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Mon, 30 Jul 2007 19:51:12 -0700 Subject: [TCP]: Bidir flow must not disregard SACK blocks for lost marking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's possible that new SACK blocks that should trigger new LOST markings arrive with new data (which previously made is_dupack false). In addition, I think this fixes a case where we get a cumulative ACK with enough SACK blocks to trigger the fast recovery (is_dupack would be false there too). I'm not completely pleased with this solution because readability of the code is somewhat questionable as 'is_dupack' in SACK case is no longer about dupacks only but would mean something like 'lost_marker_work_todo' too... But because of Eifel stuff done in CA_Recovery, the FLAG_DATA_SACKED check cannot be placed to the if statement which seems attractive solution. Nevertheless, I didn't like adding another variable just for that either... :-) Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net/ipv4') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 41163ddc31..378ca8a086 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2112,7 +2112,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); + int is_dupack = (tp->snd_una == prior_snd_una && + (!(flag&FLAG_NOT_DUP) || + ((flag&FLAG_DATA_SACKED) && + (tp->fackets_out > tp->reordering)))); /* Some technical things: * 1. Reno does not count dupacks (sacked_out) automatically. */ -- cgit v1.2.2 From 376407039c26caacc3e433437d25516ba8f3adc9 Mon Sep 17 00:00:00 2001 From: Mariusz Kozlowski Date: Tue, 31 Jul 2007 14:06:45 -0700 Subject: [IPV4] ip_options.c: kmalloc + memset conversion to kzalloc Signed-off-by: Mariusz Kozlowski Signed-off-by: David S. Miller --- net/ipv4/ip_options.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'net/ipv4') diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 251346828c..2f14745a9e 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -513,11 +513,8 @@ void ip_options_undo(struct ip_options * opt) static struct ip_options *ip_options_get_alloc(const int optlen) { - struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3), - GFP_KERNEL); - if (opt) - memset(opt, 0, sizeof(*opt)); - return opt; + return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), + GFP_KERNEL); } static int ip_options_get_finish(struct ip_options **optp, -- cgit v1.2.2