diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/devinet.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 14 | ||||
-rw-r--r-- | net/ipv4/ip_gre.c | 5 | ||||
-rw-r--r-- | net/ipv4/ip_sockglue.c | 2 | ||||
-rw-r--r-- | net/ipv4/ping.c | 4 | ||||
-rw-r--r-- | net/ipv4/raw.c | 4 | ||||
-rw-r--r-- | net/ipv4/route.c | 77 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 20 | ||||
-rw-r--r-- | net/ipv4/udp.c | 4 |
11 files changed, 101 insertions, 38 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index cebd9d31e65a..f6303b17546b 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, | |||
1847 | if (err < 0) | 1847 | if (err < 0) |
1848 | goto errout; | 1848 | goto errout; |
1849 | 1849 | ||
1850 | err = EINVAL; | 1850 | err = -EINVAL; |
1851 | if (!tb[NETCONFA_IFINDEX]) | 1851 | if (!tb[NETCONFA_IFINDEX]) |
1852 | goto errout; | 1852 | goto errout; |
1853 | 1853 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 46b9c887bede..64148914803a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req, | |||
789 | reqsk_put(req); | 789 | reqsk_put(req); |
790 | } | 790 | } |
791 | 791 | ||
792 | void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, | 792 | struct sock *inet_csk_reqsk_queue_add(struct sock *sk, |
793 | struct sock *child) | 793 | struct request_sock *req, |
794 | struct sock *child) | ||
794 | { | 795 | { |
795 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | 796 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
796 | 797 | ||
797 | spin_lock(&queue->rskq_lock); | 798 | spin_lock(&queue->rskq_lock); |
798 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | 799 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
799 | inet_child_forget(sk, req, child); | 800 | inet_child_forget(sk, req, child); |
801 | child = NULL; | ||
800 | } else { | 802 | } else { |
801 | req->sk = child; | 803 | req->sk = child; |
802 | req->dl_next = NULL; | 804 | req->dl_next = NULL; |
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, | |||
808 | sk_acceptq_added(sk); | 810 | sk_acceptq_added(sk); |
809 | } | 811 | } |
810 | spin_unlock(&queue->rskq_lock); | 812 | spin_unlock(&queue->rskq_lock); |
813 | return child; | ||
811 | } | 814 | } |
812 | EXPORT_SYMBOL(inet_csk_reqsk_queue_add); | 815 | EXPORT_SYMBOL(inet_csk_reqsk_queue_add); |
813 | 816 | ||
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, | |||
817 | if (own_req) { | 820 | if (own_req) { |
818 | inet_csk_reqsk_queue_drop(sk, req); | 821 | inet_csk_reqsk_queue_drop(sk, req); |
819 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); | 822 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); |
820 | inet_csk_reqsk_queue_add(sk, req, child); | 823 | if (inet_csk_reqsk_queue_add(sk, req, child)) |
821 | /* Warning: caller must not call reqsk_put(req); | 824 | return child; |
822 | * child stole last reference on it. | ||
823 | */ | ||
824 | return child; | ||
825 | } | 825 | } |
826 | /* Too bad, another child took ownership of the request, undo. */ | 826 | /* Too bad, another child took ownership of the request, undo. */ |
827 | bh_unlock_sock(child); | 827 | bh_unlock_sock(child); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 56fdf4e0dce4..41ba68de46d8 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1054,8 +1054,9 @@ static const struct net_device_ops gre_tap_netdev_ops = { | |||
1054 | static void ipgre_tap_setup(struct net_device *dev) | 1054 | static void ipgre_tap_setup(struct net_device *dev) |
1055 | { | 1055 | { |
1056 | ether_setup(dev); | 1056 | ether_setup(dev); |
1057 | dev->netdev_ops = &gre_tap_netdev_ops; | 1057 | dev->netdev_ops = &gre_tap_netdev_ops; |
1058 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1058 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1059 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
1059 | ip_tunnel_setup(dev, gre_tap_net_id); | 1060 | ip_tunnel_setup(dev, gre_tap_net_id); |
1060 | } | 1061 | } |
1061 | 1062 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5f73a7c03e27..a50124260f5a 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
249 | switch (cmsg->cmsg_type) { | 249 | switch (cmsg->cmsg_type) { |
250 | case IP_RETOPTS: | 250 | case IP_RETOPTS: |
251 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); | 251 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); |
252 | |||
253 | /* Our caller is responsible for freeing ipc->opt */ | ||
252 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), | 254 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), |
253 | err < 40 ? err : 40); | 255 | err < 40 ? err : 40); |
254 | if (err) | 256 | if (err) |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c117b21b937d..d3a27165f9cc 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
746 | 746 | ||
747 | if (msg->msg_controllen) { | 747 | if (msg->msg_controllen) { |
748 | err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); | 748 | err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); |
749 | if (err) | 749 | if (unlikely(err)) { |
750 | kfree(ipc.opt); | ||
750 | return err; | 751 | return err; |
752 | } | ||
751 | if (ipc.opt) | 753 | if (ipc.opt) |
752 | free = 1; | 754 | free = 1; |
753 | } | 755 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bc35f1842512..7113bae4e6a0 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
547 | 547 | ||
548 | if (msg->msg_controllen) { | 548 | if (msg->msg_controllen) { |
549 | err = ip_cmsg_send(net, msg, &ipc, false); | 549 | err = ip_cmsg_send(net, msg, &ipc, false); |
550 | if (err) | 550 | if (unlikely(err)) { |
551 | kfree(ipc.opt); | ||
551 | goto out; | 552 | goto out; |
553 | } | ||
552 | if (ipc.opt) | 554 | if (ipc.opt) |
553 | free = 1; | 555 | free = 1; |
554 | } | 556 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 85f184e429c6..02c62299d717 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | |||
129 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 129 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
130 | static int ip_rt_min_advmss __read_mostly = 256; | 130 | static int ip_rt_min_advmss __read_mostly = 256; |
131 | 131 | ||
132 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; | ||
132 | /* | 133 | /* |
133 | * Interface to generic destination cache. | 134 | * Interface to generic destination cache. |
134 | */ | 135 | */ |
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
755 | struct fib_nh *nh = &FIB_RES_NH(res); | 756 | struct fib_nh *nh = &FIB_RES_NH(res); |
756 | 757 | ||
757 | update_or_create_fnhe(nh, fl4->daddr, new_gw, | 758 | update_or_create_fnhe(nh, fl4->daddr, new_gw, |
758 | 0, 0); | 759 | 0, jiffies + ip_rt_gc_timeout); |
759 | } | 760 | } |
760 | if (kill_route) | 761 | if (kill_route) |
761 | rt->dst.obsolete = DST_OBSOLETE_KILL; | 762 | rt->dst.obsolete = DST_OBSOLETE_KILL; |
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1556 | #endif | 1557 | #endif |
1557 | } | 1558 | } |
1558 | 1559 | ||
1560 | static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) | ||
1561 | { | ||
1562 | struct fnhe_hash_bucket *hash; | ||
1563 | struct fib_nh_exception *fnhe, __rcu **fnhe_p; | ||
1564 | u32 hval = fnhe_hashfun(daddr); | ||
1565 | |||
1566 | spin_lock_bh(&fnhe_lock); | ||
1567 | |||
1568 | hash = rcu_dereference_protected(nh->nh_exceptions, | ||
1569 | lockdep_is_held(&fnhe_lock)); | ||
1570 | hash += hval; | ||
1571 | |||
1572 | fnhe_p = &hash->chain; | ||
1573 | fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); | ||
1574 | while (fnhe) { | ||
1575 | if (fnhe->fnhe_daddr == daddr) { | ||
1576 | rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( | ||
1577 | fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); | ||
1578 | fnhe_flush_routes(fnhe); | ||
1579 | kfree_rcu(fnhe, rcu); | ||
1580 | break; | ||
1581 | } | ||
1582 | fnhe_p = &fnhe->fnhe_next; | ||
1583 | fnhe = rcu_dereference_protected(fnhe->fnhe_next, | ||
1584 | lockdep_is_held(&fnhe_lock)); | ||
1585 | } | ||
1586 | |||
1587 | spin_unlock_bh(&fnhe_lock); | ||
1588 | } | ||
1589 | |||
1559 | /* called in rcu_read_lock() section */ | 1590 | /* called in rcu_read_lock() section */ |
1560 | static int __mkroute_input(struct sk_buff *skb, | 1591 | static int __mkroute_input(struct sk_buff *skb, |
1561 | const struct fib_result *res, | 1592 | const struct fib_result *res, |
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1609 | 1640 | ||
1610 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); | 1641 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); |
1611 | if (do_cache) { | 1642 | if (do_cache) { |
1612 | if (fnhe) | 1643 | if (fnhe) { |
1613 | rth = rcu_dereference(fnhe->fnhe_rth_input); | 1644 | rth = rcu_dereference(fnhe->fnhe_rth_input); |
1614 | else | 1645 | if (rth && rth->dst.expires && |
1615 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | 1646 | time_after(jiffies, rth->dst.expires)) { |
1647 | ip_del_fnhe(&FIB_RES_NH(*res), daddr); | ||
1648 | fnhe = NULL; | ||
1649 | } else { | ||
1650 | goto rt_cache; | ||
1651 | } | ||
1652 | } | ||
1653 | |||
1654 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | ||
1616 | 1655 | ||
1656 | rt_cache: | ||
1617 | if (rt_cache_valid(rth)) { | 1657 | if (rt_cache_valid(rth)) { |
1618 | skb_dst_set_noref(skb, &rth->dst); | 1658 | skb_dst_set_noref(skb, &rth->dst); |
1619 | goto out; | 1659 | goto out; |
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
2014 | struct fib_nh *nh = &FIB_RES_NH(*res); | 2054 | struct fib_nh *nh = &FIB_RES_NH(*res); |
2015 | 2055 | ||
2016 | fnhe = find_exception(nh, fl4->daddr); | 2056 | fnhe = find_exception(nh, fl4->daddr); |
2017 | if (fnhe) | 2057 | if (fnhe) { |
2018 | prth = &fnhe->fnhe_rth_output; | 2058 | prth = &fnhe->fnhe_rth_output; |
2019 | else { | 2059 | rth = rcu_dereference(*prth); |
2020 | if (unlikely(fl4->flowi4_flags & | 2060 | if (rth && rth->dst.expires && |
2021 | FLOWI_FLAG_KNOWN_NH && | 2061 | time_after(jiffies, rth->dst.expires)) { |
2022 | !(nh->nh_gw && | 2062 | ip_del_fnhe(nh, fl4->daddr); |
2023 | nh->nh_scope == RT_SCOPE_LINK))) { | 2063 | fnhe = NULL; |
2024 | do_cache = false; | 2064 | } else { |
2025 | goto add; | 2065 | goto rt_cache; |
2026 | } | 2066 | } |
2027 | prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); | ||
2028 | } | 2067 | } |
2068 | |||
2069 | if (unlikely(fl4->flowi4_flags & | ||
2070 | FLOWI_FLAG_KNOWN_NH && | ||
2071 | !(nh->nh_gw && | ||
2072 | nh->nh_scope == RT_SCOPE_LINK))) { | ||
2073 | do_cache = false; | ||
2074 | goto add; | ||
2075 | } | ||
2076 | prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); | ||
2029 | rth = rcu_dereference(*prth); | 2077 | rth = rcu_dereference(*prth); |
2078 | |||
2079 | rt_cache: | ||
2030 | if (rt_cache_valid(rth)) { | 2080 | if (rt_cache_valid(rth)) { |
2031 | dst_hold(&rth->dst); | 2081 | dst_hold(&rth->dst); |
2032 | return rth; | 2082 | return rth; |
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev) | |||
2569 | } | 2619 | } |
2570 | 2620 | ||
2571 | #ifdef CONFIG_SYSCTL | 2621 | #ifdef CONFIG_SYSCTL |
2572 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; | ||
2573 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; | 2622 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; |
2574 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; | 2623 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; |
2575 | static int ip_rt_gc_elasticity __read_mostly = 8; | 2624 | static int ip_rt_gc_elasticity __read_mostly = 8; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0c36ef4a3f86..483ffdf5aa4d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2950,7 +2950,7 @@ static void __tcp_alloc_md5sig_pool(void) | |||
2950 | struct crypto_hash *hash; | 2950 | struct crypto_hash *hash; |
2951 | 2951 | ||
2952 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | 2952 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
2953 | if (IS_ERR_OR_NULL(hash)) | 2953 | if (IS_ERR(hash)) |
2954 | return; | 2954 | return; |
2955 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; | 2955 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; |
2956 | } | 2956 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1c2a73406261..3b2c8e90a475 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2896,7 +2896,10 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) | |||
2896 | { | 2896 | { |
2897 | const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; | 2897 | const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; |
2898 | struct rtt_meas *m = tcp_sk(sk)->rtt_min; | 2898 | struct rtt_meas *m = tcp_sk(sk)->rtt_min; |
2899 | struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; | 2899 | struct rtt_meas rttm = { |
2900 | .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1), | ||
2901 | .ts = now, | ||
2902 | }; | ||
2900 | u32 elapsed; | 2903 | u32 elapsed; |
2901 | 2904 | ||
2902 | /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ | 2905 | /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7f6ff037adaf..487ac67059e2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1597,28 +1597,30 @@ process: | |||
1597 | 1597 | ||
1598 | if (sk->sk_state == TCP_NEW_SYN_RECV) { | 1598 | if (sk->sk_state == TCP_NEW_SYN_RECV) { |
1599 | struct request_sock *req = inet_reqsk(sk); | 1599 | struct request_sock *req = inet_reqsk(sk); |
1600 | struct sock *nsk = NULL; | 1600 | struct sock *nsk; |
1601 | 1601 | ||
1602 | sk = req->rsk_listener; | 1602 | sk = req->rsk_listener; |
1603 | if (tcp_v4_inbound_md5_hash(sk, skb)) | 1603 | if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { |
1604 | goto discard_and_relse; | 1604 | reqsk_put(req); |
1605 | if (likely(sk->sk_state == TCP_LISTEN)) { | 1605 | goto discard_it; |
1606 | nsk = tcp_check_req(sk, skb, req, false); | 1606 | } |
1607 | } else { | 1607 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
1608 | inet_csk_reqsk_queue_drop_and_put(sk, req); | 1608 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
1609 | goto lookup; | 1609 | goto lookup; |
1610 | } | 1610 | } |
1611 | sock_hold(sk); | ||
1612 | nsk = tcp_check_req(sk, skb, req, false); | ||
1611 | if (!nsk) { | 1613 | if (!nsk) { |
1612 | reqsk_put(req); | 1614 | reqsk_put(req); |
1613 | goto discard_it; | 1615 | goto discard_and_relse; |
1614 | } | 1616 | } |
1615 | if (nsk == sk) { | 1617 | if (nsk == sk) { |
1616 | sock_hold(sk); | ||
1617 | reqsk_put(req); | 1618 | reqsk_put(req); |
1618 | } else if (tcp_child_process(sk, nsk, skb)) { | 1619 | } else if (tcp_child_process(sk, nsk, skb)) { |
1619 | tcp_v4_send_reset(nsk, skb); | 1620 | tcp_v4_send_reset(nsk, skb); |
1620 | goto discard_it; | 1621 | goto discard_and_relse; |
1621 | } else { | 1622 | } else { |
1623 | sock_put(sk); | ||
1622 | return 0; | 1624 | return 0; |
1623 | } | 1625 | } |
1624 | } | 1626 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index be0b21852b13..95d2f198017e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1048,8 +1048,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1048 | if (msg->msg_controllen) { | 1048 | if (msg->msg_controllen) { |
1049 | err = ip_cmsg_send(sock_net(sk), msg, &ipc, | 1049 | err = ip_cmsg_send(sock_net(sk), msg, &ipc, |
1050 | sk->sk_family == AF_INET6); | 1050 | sk->sk_family == AF_INET6); |
1051 | if (err) | 1051 | if (unlikely(err)) { |
1052 | kfree(ipc.opt); | ||
1052 | return err; | 1053 | return err; |
1054 | } | ||
1053 | if (ipc.opt) | 1055 | if (ipc.opt) |
1054 | free = 1; | 1056 | free = 1; |
1055 | connected = 0; | 1057 | connected = 0; |