aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c140
-rw-r--r--net/ipv4/xfrm4_mode_beet.c6
5 files changed, 81 insertions, 83 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e73e5708e70..21515d4c49eb 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -575,7 +575,7 @@ static int esp_init_state(struct xfrm_state *x)
575 crypto_aead_ivsize(aead); 575 crypto_aead_ivsize(aead);
576 if (x->props.mode == XFRM_MODE_TUNNEL) 576 if (x->props.mode == XFRM_MODE_TUNNEL)
577 x->props.header_len += sizeof(struct iphdr); 577 x->props.header_len += sizeof(struct iphdr);
578 else if (x->props.mode == XFRM_MODE_BEET) 578 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
579 x->props.header_len += IPV4_BEET_PHMAXLEN; 579 x->props.header_len += IPV4_BEET_PHMAXLEN;
580 if (x->encap) { 580 if (x->encap) {
581 struct xfrm_encap_tmpl *encap = x->encap; 581 struct xfrm_encap_tmpl *encap = x->encap;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1bfa078ddbd0..16fc6f454a31 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1509,14 +1509,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1509 1509
1510 /* BSD 4.2 compatibility hack :-( */ 1510 /* BSD 4.2 compatibility hack :-( */
1511 if (mtu == 0 && 1511 if (mtu == 0 &&
1512 old_mtu >= dst_metric(&rth->u.dst, RTAX_MTU) && 1512 old_mtu >= dst_mtu(&rth->u.dst) &&
1513 old_mtu >= 68 + (iph->ihl << 2)) 1513 old_mtu >= 68 + (iph->ihl << 2))
1514 old_mtu -= iph->ihl << 2; 1514 old_mtu -= iph->ihl << 2;
1515 1515
1516 mtu = guess_mtu(old_mtu); 1516 mtu = guess_mtu(old_mtu);
1517 } 1517 }
1518 if (mtu <= dst_metric(&rth->u.dst, RTAX_MTU)) { 1518 if (mtu <= dst_mtu(&rth->u.dst)) {
1519 if (mtu < dst_metric(&rth->u.dst, RTAX_MTU)) { 1519 if (mtu < dst_mtu(&rth->u.dst)) {
1520 dst_confirm(&rth->u.dst); 1520 dst_confirm(&rth->u.dst);
1521 if (mtu < ip_rt_min_pmtu) { 1521 if (mtu < ip_rt_min_pmtu) {
1522 mtu = ip_rt_min_pmtu; 1522 mtu = ip_rt_min_pmtu;
@@ -1538,7 +1538,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1538 1538
1539static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 1539static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1540{ 1540{
1541 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= 68 && 1541 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1542 !(dst_metric_locked(dst, RTAX_MTU))) { 1542 !(dst_metric_locked(dst, RTAX_MTU))) {
1543 if (mtu < ip_rt_min_pmtu) { 1543 if (mtu < ip_rt_min_pmtu) {
1544 mtu = ip_rt_min_pmtu; 1544 mtu = ip_rt_min_pmtu;
@@ -1667,7 +1667,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1667 1667
1668 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1668 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1669 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1669 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1670 if (dst_metric(&rt->u.dst, RTAX_MTU) > IP_MAX_MTU) 1670 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1671 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1671 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1672 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) 1672 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1673 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1673 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
@@ -3223,9 +3223,7 @@ int __init ip_rt_init(void)
3223 */ 3223 */
3224void __init ip_static_sysctl_init(void) 3224void __init ip_static_sysctl_init(void)
3225{ 3225{
3226#ifdef CONFIG_SYSCTL
3227 register_sysctl_paths(ipv4_route_path, ipv4_route_table); 3226 register_sysctl_paths(ipv4_route_path, ipv4_route_table);
3228#endif
3229} 3227}
3230#endif 3228#endif
3231 3229
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 91a8cfddf1c4..44c1e934824b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -687,14 +687,14 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
687 inet_twsk_put(tw); 687 inet_twsk_put(tw);
688} 688}
689 689
690static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 690static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
691 struct request_sock *req) 691 struct request_sock *req)
692{ 692{
693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, 693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
695 req->ts_recent, 695 req->ts_recent,
696 0, 696 0,
697 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)); 697 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr));
698} 698}
699 699
700/* 700/*
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 204c42162660..f976fc57892c 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -609,7 +609,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
609 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { 609 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
610 /* Out of window: send ACK and drop. */ 610 /* Out of window: send ACK and drop. */
611 if (!(flg & TCP_FLAG_RST)) 611 if (!(flg & TCP_FLAG_RST))
612 req->rsk_ops->send_ack(skb, req); 612 req->rsk_ops->send_ack(sk, skb, req);
613 if (paws_reject) 613 if (paws_reject)
614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
615 return NULL; 615 return NULL;
@@ -618,89 +618,87 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
618 /* In sequence, PAWS is OK. */ 618 /* In sequence, PAWS is OK. */
619 619
620 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) 620 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
621 req->ts_recent = tmp_opt.rcv_tsval; 621 req->ts_recent = tmp_opt.rcv_tsval;
622 622
623 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 623 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
624 /* Truncate SYN, it is out of window starting 624 /* Truncate SYN, it is out of window starting
625 at tcp_rsk(req)->rcv_isn + 1. */ 625 at tcp_rsk(req)->rcv_isn + 1. */
626 flg &= ~TCP_FLAG_SYN; 626 flg &= ~TCP_FLAG_SYN;
627 } 627 }
628 628
629 /* RFC793: "second check the RST bit" and 629 /* RFC793: "second check the RST bit" and
630 * "fourth, check the SYN bit" 630 * "fourth, check the SYN bit"
631 */ 631 */
632 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 632 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
633 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 633 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
634 goto embryonic_reset; 634 goto embryonic_reset;
635 } 635 }
636 636
637 /* ACK sequence verified above, just make sure ACK is 637 /* ACK sequence verified above, just make sure ACK is
638 * set. If ACK not set, just silently drop the packet. 638 * set. If ACK not set, just silently drop the packet.
639 */ 639 */
640 if (!(flg & TCP_FLAG_ACK)) 640 if (!(flg & TCP_FLAG_ACK))
641 return NULL; 641 return NULL;
642
643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
646 inet_rsk(req)->acked = 1;
647 return NULL;
648 }
649 642
650 /* OK, ACK is valid, create big socket and 643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
651 * feed this segment to it. It will repeat all 644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
653 * ESTABLISHED STATE. If it will be dropped after 646 inet_rsk(req)->acked = 1;
654 * socket is created, wait for troubles. 647 return NULL;
655 */ 648 }
656 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, 649
657 req, NULL); 650 /* OK, ACK is valid, create big socket and
658 if (child == NULL) 651 * feed this segment to it. It will repeat all
659 goto listen_overflow; 652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
653 * ESTABLISHED STATE. If it will be dropped after
654 * socket is created, wait for troubles.
655 */
656 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
657 if (child == NULL)
658 goto listen_overflow;
660#ifdef CONFIG_TCP_MD5SIG 659#ifdef CONFIG_TCP_MD5SIG
661 else { 660 else {
662 /* Copy over the MD5 key from the original socket */ 661 /* Copy over the MD5 key from the original socket */
663 struct tcp_md5sig_key *key; 662 struct tcp_md5sig_key *key;
664 struct tcp_sock *tp = tcp_sk(sk); 663 struct tcp_sock *tp = tcp_sk(sk);
665 key = tp->af_specific->md5_lookup(sk, child); 664 key = tp->af_specific->md5_lookup(sk, child);
666 if (key != NULL) { 665 if (key != NULL) {
667 /* 666 /*
668 * We're using one, so create a matching key on the 667 * We're using one, so create a matching key on the
669 * newsk structure. If we fail to get memory then we 668 * newsk structure. If we fail to get memory then we
670 * end up not copying the key across. Shucks. 669 * end up not copying the key across. Shucks.
671 */ 670 */
672 char *newkey = kmemdup(key->key, key->keylen, 671 char *newkey = kmemdup(key->key, key->keylen,
673 GFP_ATOMIC); 672 GFP_ATOMIC);
674 if (newkey) { 673 if (newkey) {
675 if (!tcp_alloc_md5sig_pool()) 674 if (!tcp_alloc_md5sig_pool())
676 BUG(); 675 BUG();
677 tp->af_specific->md5_add(child, child, 676 tp->af_specific->md5_add(child, child, newkey,
678 newkey, 677 key->keylen);
679 key->keylen);
680 }
681 } 678 }
682 } 679 }
680 }
683#endif 681#endif
684 682
685 inet_csk_reqsk_queue_unlink(sk, req, prev); 683 inet_csk_reqsk_queue_unlink(sk, req, prev);
686 inet_csk_reqsk_queue_removed(sk, req); 684 inet_csk_reqsk_queue_removed(sk, req);
687 685
688 inet_csk_reqsk_queue_add(sk, req, child); 686 inet_csk_reqsk_queue_add(sk, req, child);
689 return child; 687 return child;
690 688
691 listen_overflow: 689listen_overflow:
692 if (!sysctl_tcp_abort_on_overflow) { 690 if (!sysctl_tcp_abort_on_overflow) {
693 inet_rsk(req)->acked = 1; 691 inet_rsk(req)->acked = 1;
694 return NULL; 692 return NULL;
695 } 693 }
696 694
697 embryonic_reset: 695embryonic_reset:
698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
699 if (!(flg & TCP_FLAG_RST)) 697 if (!(flg & TCP_FLAG_RST))
700 req->rsk_ops->send_reset(sk, skb); 698 req->rsk_ops->send_reset(sk, skb);
701 699
702 inet_csk_reqsk_queue_drop(sk, req, prev); 700 inet_csk_reqsk_queue_drop(sk, req, prev);
703 return NULL; 701 return NULL;
704} 702}
705 703
706/* 704/*
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 9c798abce736..63418185f524 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -47,8 +47,10 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
47 if (unlikely(optlen)) 47 if (unlikely(optlen))
48 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); 48 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
49 49
50 skb_set_network_header(skb, IPV4_BEET_PHMAXLEN - x->props.header_len - 50 skb_set_network_header(skb, -x->props.header_len -
51 hdrlen); 51 hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
52 if (x->sel.family != AF_INET6)
53 skb->network_header += IPV4_BEET_PHMAXLEN;
52 skb->mac_header = skb->network_header + 54 skb->mac_header = skb->network_header +
53 offsetof(struct iphdr, protocol); 55 offsetof(struct iphdr, protocol);
54 skb->transport_header = skb->network_header + sizeof(*top_iph); 56 skb->transport_header = skb->network_header + sizeof(*top_iph);