aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c18
-rw-r--r--net/ipv4/tcp_input.c45
8 files changed, 50 insertions, 41 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 68b72a7a1806..418862f1bf22 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -570,7 +570,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
570 * Allocate a buffer 570 * Allocate a buffer
571 */ 571 */
572 572
573 skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 573 skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
574 if (skb == NULL) 574 if (skb == NULL)
575 return NULL; 575 return NULL;
576 576
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 05afb576d935..2c0e4572cc90 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -338,7 +338,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
338 return -ENOENT; 338 return -ENOENT;
339 339
340 hash = cipso_v4_map_cache_hash(key, key_len); 340 hash = cipso_v4_map_cache_hash(key, key_len);
341 bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 341 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
342 spin_lock_bh(&cipso_v4_cache[bkt].lock); 342 spin_lock_bh(&cipso_v4_cache[bkt].lock);
343 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { 343 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
344 if (entry->hash == hash && 344 if (entry->hash == hash &&
@@ -417,7 +417,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
417 atomic_inc(&secattr->cache->refcount); 417 atomic_inc(&secattr->cache->refcount);
418 entry->lsm_data = secattr->cache; 418 entry->lsm_data = secattr->cache;
419 419
420 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 420 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
421 spin_lock_bh(&cipso_v4_cache[bkt].lock); 421 spin_lock_bh(&cipso_v4_cache[bkt].lock);
422 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { 422 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
423 list_add(&entry->list, &cipso_v4_cache[bkt].list); 423 list_add(&entry->list, &cipso_v4_cache[bkt].list);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6250f4239b61..2769dc4a4c84 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -292,7 +292,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
292 struct iphdr *pip; 292 struct iphdr *pip;
293 struct igmpv3_report *pig; 293 struct igmpv3_report *pig;
294 294
295 skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 295 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
296 if (skb == NULL) 296 if (skb == NULL)
297 return NULL; 297 return NULL;
298 298
@@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
653 return -1; 653 return -1;
654 } 654 }
655 655
656 skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC); 656 skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
657 if (skb == NULL) { 657 if (skb == NULL) {
658 ip_rt_put(rt); 658 ip_rt_put(rt);
659 return -1; 659 return -1;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 7b4bad6d572f..ff77a4a7f9ec 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -397,7 +397,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
397 iph = ip_hdr(skb); 397 iph = ip_hdr(skb);
398 398
399 /* 399 /*
400 * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. 400 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
401 * 401 *
402 * Is the datagram acceptable? 402 * Is the datagram acceptable?
403 * 403 *
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 89dee4346f60..ed45037ce9be 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -710,14 +710,14 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
710 struct net_device *dev = d->dev; 710 struct net_device *dev = d->dev;
711 struct sk_buff *skb; 711 struct sk_buff *skb;
712 struct bootp_pkt *b; 712 struct bootp_pkt *b;
713 int hh_len = LL_RESERVED_SPACE(dev);
714 struct iphdr *h; 713 struct iphdr *h;
715 714
716 /* Allocate packet */ 715 /* Allocate packet */
717 skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL); 716 skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
717 GFP_KERNEL);
718 if (!skb) 718 if (!skb)
719 return; 719 return;
720 skb_reserve(skb, hh_len); 720 skb_reserve(skb, LL_RESERVED_SPACE(dev));
721 b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); 721 b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
722 memset(b, 0, sizeof(struct bootp_pkt)); 722 memset(b, 0, sizeof(struct bootp_pkt));
723 723
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11d7f753a820..fead049daf43 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -322,7 +322,6 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
322 unsigned int flags) 322 unsigned int flags)
323{ 323{
324 struct inet_sock *inet = inet_sk(sk); 324 struct inet_sock *inet = inet_sk(sk);
325 int hh_len;
326 struct iphdr *iph; 325 struct iphdr *iph;
327 struct sk_buff *skb; 326 struct sk_buff *skb;
328 unsigned int iphlen; 327 unsigned int iphlen;
@@ -336,13 +335,12 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
336 if (flags&MSG_PROBE) 335 if (flags&MSG_PROBE)
337 goto out; 336 goto out;
338 337
339 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 338 skb = sock_alloc_send_skb(sk,
340 339 length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
341 skb = sock_alloc_send_skb(sk, length+hh_len+15, 340 flags & MSG_DONTWAIT, &err);
342 flags&MSG_DONTWAIT, &err);
343 if (skb == NULL) 341 if (skb == NULL)
344 goto error; 342 goto error;
345 skb_reserve(skb, hh_len); 343 skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
346 344
347 skb->priority = sk->sk_priority; 345 skb->priority = sk->sk_priority;
348 skb->mark = sk->sk_mark; 346 skb->mark = sk->sk_mark;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5e3685c5c407..92f90ae46f4a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1468,14 +1468,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1468 1468
1469 /* BSD 4.2 compatibility hack :-( */ 1469 /* BSD 4.2 compatibility hack :-( */
1470 if (mtu == 0 && 1470 if (mtu == 0 &&
1471 old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] && 1471 old_mtu >= dst_metric(&rth->u.dst, RTAX_MTU) &&
1472 old_mtu >= 68 + (iph->ihl << 2)) 1472 old_mtu >= 68 + (iph->ihl << 2))
1473 old_mtu -= iph->ihl << 2; 1473 old_mtu -= iph->ihl << 2;
1474 1474
1475 mtu = guess_mtu(old_mtu); 1475 mtu = guess_mtu(old_mtu);
1476 } 1476 }
1477 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { 1477 if (mtu <= dst_metric(&rth->u.dst, RTAX_MTU)) {
1478 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 1478 if (mtu < dst_metric(&rth->u.dst, RTAX_MTU)) {
1479 dst_confirm(&rth->u.dst); 1479 dst_confirm(&rth->u.dst);
1480 if (mtu < ip_rt_min_pmtu) { 1480 if (mtu < ip_rt_min_pmtu) {
1481 mtu = ip_rt_min_pmtu; 1481 mtu = ip_rt_min_pmtu;
@@ -1497,7 +1497,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1497 1497
1498static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 1498static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1499{ 1499{
1500 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 && 1500 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= 68 &&
1501 !(dst_metric_locked(dst, RTAX_MTU))) { 1501 !(dst_metric_locked(dst, RTAX_MTU))) {
1502 if (mtu < ip_rt_min_pmtu) { 1502 if (mtu < ip_rt_min_pmtu) {
1503 mtu = ip_rt_min_pmtu; 1503 mtu = ip_rt_min_pmtu;
@@ -1613,7 +1613,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1613 sizeof(rt->u.dst.metrics)); 1613 sizeof(rt->u.dst.metrics));
1614 if (fi->fib_mtu == 0) { 1614 if (fi->fib_mtu == 0) {
1615 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 1615 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1616 if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) && 1616 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1617 rt->rt_gateway != rt->rt_dst && 1617 rt->rt_gateway != rt->rt_dst &&
1618 rt->u.dst.dev->mtu > 576) 1618 rt->u.dst.dev->mtu > 576)
1619 rt->u.dst.metrics[RTAX_MTU-1] = 576; 1619 rt->u.dst.metrics[RTAX_MTU-1] = 576;
@@ -1624,14 +1624,14 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1624 } else 1624 } else
1625 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; 1625 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1626 1626
1627 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0) 1627 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1628 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1628 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1629 if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU) 1629 if (dst_metric(&rt->u.dst, RTAX_MTU) > IP_MAX_MTU)
1630 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1630 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1631 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0) 1631 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1632 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1632 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1633 ip_rt_min_advmss); 1633 ip_rt_min_advmss);
1634 if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40) 1634 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
1635 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; 1635 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1636 1636
1637#ifdef CONFIG_NET_CLS_ROUTE 1637#ifdef CONFIG_NET_CLS_ROUTE
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eda4f4a233f3..b54d9d37b636 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -66,6 +66,7 @@
66#include <linux/mm.h> 66#include <linux/mm.h>
67#include <linux/module.h> 67#include <linux/module.h>
68#include <linux/sysctl.h> 68#include <linux/sysctl.h>
69#include <net/dst.h>
69#include <net/tcp.h> 70#include <net/tcp.h>
70#include <net/inet_common.h> 71#include <net/inet_common.h>
71#include <linux/ipsec.h> 72#include <linux/ipsec.h>
@@ -113,8 +114,6 @@ int sysctl_tcp_abc __read_mostly;
113#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 114#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
114#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) 115#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
115 116
116#define IsSackFrto() (sysctl_tcp_frto == 0x2)
117
118#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 117#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
119#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 118#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
120 119
@@ -605,7 +604,7 @@ static u32 tcp_rto_min(struct sock *sk)
605 u32 rto_min = TCP_RTO_MIN; 604 u32 rto_min = TCP_RTO_MIN;
606 605
607 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 606 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
608 rto_min = dst->metrics[RTAX_RTO_MIN - 1]; 607 rto_min = dst_metric(dst, RTAX_RTO_MIN);
609 return rto_min; 608 return rto_min;
610} 609}
611 610
@@ -769,7 +768,7 @@ void tcp_update_metrics(struct sock *sk)
769 dst->metrics[RTAX_RTTVAR - 1] = m; 768 dst->metrics[RTAX_RTTVAR - 1] = m;
770 else 769 else
771 dst->metrics[RTAX_RTTVAR-1] -= 770 dst->metrics[RTAX_RTTVAR-1] -=
772 (dst->metrics[RTAX_RTTVAR-1] - m)>>2; 771 (dst_metric(dst, RTAX_RTTVAR) - m)>>2;
773 } 772 }
774 773
775 if (tp->snd_ssthresh >= 0xFFFF) { 774 if (tp->snd_ssthresh >= 0xFFFF) {
@@ -788,21 +787,21 @@ void tcp_update_metrics(struct sock *sk)
788 dst->metrics[RTAX_SSTHRESH-1] = 787 dst->metrics[RTAX_SSTHRESH-1] =
789 max(tp->snd_cwnd >> 1, tp->snd_ssthresh); 788 max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
790 if (!dst_metric_locked(dst, RTAX_CWND)) 789 if (!dst_metric_locked(dst, RTAX_CWND))
791 dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1; 790 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
792 } else { 791 } else {
793 /* Else slow start did not finish, cwnd is non-sense, 792 /* Else slow start did not finish, cwnd is non-sense,
794 ssthresh may be also invalid. 793 ssthresh may be also invalid.
795 */ 794 */
796 if (!dst_metric_locked(dst, RTAX_CWND)) 795 if (!dst_metric_locked(dst, RTAX_CWND))
797 dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1; 796 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
798 if (dst->metrics[RTAX_SSTHRESH-1] && 797 if (dst_metric(dst, RTAX_SSTHRESH) &&
799 !dst_metric_locked(dst, RTAX_SSTHRESH) && 798 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
800 tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1]) 799 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
801 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; 800 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
802 } 801 }
803 802
804 if (!dst_metric_locked(dst, RTAX_REORDERING)) { 803 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
805 if (dst->metrics[RTAX_REORDERING-1] < tp->reordering && 804 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
806 tp->reordering != sysctl_tcp_reordering) 805 tp->reordering != sysctl_tcp_reordering)
807 dst->metrics[RTAX_REORDERING-1] = tp->reordering; 806 dst->metrics[RTAX_REORDERING-1] = tp->reordering;
808 } 807 }
@@ -1685,6 +1684,11 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1685 tp->sacked_out = 0; 1684 tp->sacked_out = 0;
1686} 1685}
1687 1686
1687static int tcp_is_sackfrto(const struct tcp_sock *tp)
1688{
1689 return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
1690}
1691
1688/* F-RTO can only be used if TCP has never retransmitted anything other than 1692/* F-RTO can only be used if TCP has never retransmitted anything other than
1689 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 1693 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
1690 */ 1694 */
@@ -1701,7 +1705,7 @@ int tcp_use_frto(struct sock *sk)
1701 if (icsk->icsk_mtup.probe_size) 1705 if (icsk->icsk_mtup.probe_size)
1702 return 0; 1706 return 0;
1703 1707
1704 if (IsSackFrto()) 1708 if (tcp_is_sackfrto(tp))
1705 return 1; 1709 return 1;
1706 1710
1707 /* Avoid expensive walking of rexmit queue if possible */ 1711 /* Avoid expensive walking of rexmit queue if possible */
@@ -1791,7 +1795,7 @@ void tcp_enter_frto(struct sock *sk)
1791 /* Earlier loss recovery underway (see RFC4138; Appendix B). 1795 /* Earlier loss recovery underway (see RFC4138; Appendix B).
1792 * The last condition is necessary at least in tp->frto_counter case. 1796 * The last condition is necessary at least in tp->frto_counter case.
1793 */ 1797 */
1794 if (IsSackFrto() && (tp->frto_counter || 1798 if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
1795 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && 1799 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
1796 after(tp->high_seq, tp->snd_una)) { 1800 after(tp->high_seq, tp->snd_una)) {
1797 tp->frto_highmark = tp->high_seq; 1801 tp->frto_highmark = tp->high_seq;
@@ -1838,9 +1842,16 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1838 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1842 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1839 } 1843 }
1840 1844
1841 /* Don't lost mark skbs that were fwd transmitted after RTO */ 1845 /* Marking forward transmissions that were made after RTO lost
1842 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && 1846 * can cause unnecessary retransmissions in some scenarios,
1843 !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { 1847 * SACK blocks will mitigate that in some but not in all cases.
1848 * We used to not mark them but it was causing break-ups with
1849 * receivers that do only in-order receival.
1850 *
1851 * TODO: we could detect presence of such receiver and select
1852 * different behavior per flow.
1853 */
1854 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1844 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1855 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1845 tp->lost_out += tcp_skb_pcount(skb); 1856 tp->lost_out += tcp_skb_pcount(skb);
1846 } 1857 }
@@ -1856,7 +1867,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1856 tp->reordering = min_t(unsigned int, tp->reordering, 1867 tp->reordering = min_t(unsigned int, tp->reordering,
1857 sysctl_tcp_reordering); 1868 sysctl_tcp_reordering);
1858 tcp_set_ca_state(sk, TCP_CA_Loss); 1869 tcp_set_ca_state(sk, TCP_CA_Loss);
1859 tp->high_seq = tp->frto_highmark; 1870 tp->high_seq = tp->snd_nxt;
1860 TCP_ECN_queue_cwr(tp); 1871 TCP_ECN_queue_cwr(tp);
1861 1872
1862 tcp_clear_retrans_hints_partial(tp); 1873 tcp_clear_retrans_hints_partial(tp);
@@ -2478,7 +2489,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2478 2489
2479 tcp_verify_left_out(tp); 2490 tcp_verify_left_out(tp);
2480 2491
2481 if (tp->retrans_out == 0) 2492 if (!tp->frto_counter && tp->retrans_out == 0)
2482 tp->retrans_stamp = 0; 2493 tp->retrans_stamp = 0;
2483 2494
2484 if (flag & FLAG_ECE) 2495 if (flag & FLAG_ECE)
@@ -3123,7 +3134,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3123 return 1; 3134 return 1;
3124 } 3135 }
3125 3136
3126 if (!IsSackFrto() || tcp_is_reno(tp)) { 3137 if (!tcp_is_sackfrto(tp)) {
3127 /* RFC4138 shortcoming in step 2; should also have case c): 3138 /* RFC4138 shortcoming in step 2; should also have case c):
3128 * ACK isn't duplicate nor advances window, e.g., opposite dir 3139 * ACK isn't duplicate nor advances window, e.g., opposite dir
3129 * data, winupdate 3140 * data, winupdate