diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-15 22:37:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-15 22:37:03 -0400 |
commit | a9be22425e767d936105679fdc9f568b97bd47cf (patch) | |
tree | 37a63136da83dcf272668462f96eed1e96f37de3 | |
parent | dd1845af24a47b70cf84c29126698884f740ff9c (diff) | |
parent | b58537a1f5629bdc98a8b9dc2051ce0e952f6b4b (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix checksumming regressions, from Tom Herbert.
2) Undo unintentional permissions changes for SCTP rto_alpha and
rto_beta sysfs knobs, from Denial Borkmann.
3) VXLAN, like other IP tunnels, should advertize it's encapsulation
size using dev->needed_headroom instead of dev->hard_header_len.
From Cong Wang.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net: sctp: fix permissions for rto_alpha and rto_beta knobs
vxlan: Checksum fixes
net: add skb_pop_rcv_encapsulation
udp: call __skb_checksum_complete when doing full checksum
net: Fix save software checksum complete
net: Fix GSO constants to match NETIF flags
udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup
vxlan: use dev->needed_headroom instead of dev->hard_header_len
MAINTAINERS: update cxgb4 maintainer
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | drivers/net/vxlan.c | 18 | ||||
-rw-r--r-- | include/linux/netdev_features.h | 1 | ||||
-rw-r--r-- | include/linux/netdevice.h | 7 | ||||
-rw-r--r-- | include/linux/skbuff.h | 23 | ||||
-rw-r--r-- | include/net/udp.h | 4 | ||||
-rw-r--r-- | net/core/datagram.c | 36 | ||||
-rw-r--r-- | net/core/skbuff.c | 3 | ||||
-rw-r--r-- | net/ipv4/udp.c | 4 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 32 |
10 files changed, 96 insertions, 34 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 055f95238d88..134483f206e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2594,7 +2594,7 @@ S: Supported | |||
2594 | F: drivers/infiniband/hw/cxgb3/ | 2594 | F: drivers/infiniband/hw/cxgb3/ |
2595 | 2595 | ||
2596 | CXGB4 ETHERNET DRIVER (CXGB4) | 2596 | CXGB4 ETHERNET DRIVER (CXGB4) |
2597 | M: Dimitris Michailidis <dm@chelsio.com> | 2597 | M: Hariprasad S <hariprasad@chelsio.com> |
2598 | L: netdev@vger.kernel.org | 2598 | L: netdev@vger.kernel.org |
2599 | W: http://www.chelsio.com | 2599 | W: http://www.chelsio.com |
2600 | S: Supported | 2600 | S: Supported |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1610d51dbb5c..ade33ef82823 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1156,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1156 | if (!vs) | 1156 | if (!vs) |
1157 | goto drop; | 1157 | goto drop; |
1158 | 1158 | ||
1159 | /* If the NIC driver gave us an encapsulated packet | 1159 | skb_pop_rcv_encapsulation(skb); |
1160 | * with the encapsulation mark, the device checksummed it | ||
1161 | * for us. Otherwise force the upper layers to verify it. | ||
1162 | */ | ||
1163 | if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) || | ||
1164 | !skb->encapsulation) | ||
1165 | skb->ip_summed = CHECKSUM_NONE; | ||
1166 | |||
1167 | skb->encapsulation = 0; | ||
1168 | 1160 | ||
1169 | vs->rcv(vs, skb, vxh->vx_vni); | 1161 | vs->rcv(vs, skb, vxh->vx_vni); |
1170 | return 0; | 1162 | return 0; |
@@ -1201,6 +1193,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, | |||
1201 | skb_reset_mac_header(skb); | 1193 | skb_reset_mac_header(skb); |
1202 | skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); | 1194 | skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); |
1203 | skb->protocol = eth_type_trans(skb, vxlan->dev); | 1195 | skb->protocol = eth_type_trans(skb, vxlan->dev); |
1196 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | ||
1204 | 1197 | ||
1205 | /* Ignore packet loops (and multicast echo) */ | 1198 | /* Ignore packet loops (and multicast echo) */ |
1206 | if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) | 1199 | if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) |
@@ -2247,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev) | |||
2247 | eth_hw_addr_random(dev); | 2240 | eth_hw_addr_random(dev); |
2248 | ether_setup(dev); | 2241 | ether_setup(dev); |
2249 | if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) | 2242 | if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) |
2250 | dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; | 2243 | dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; |
2251 | else | 2244 | else |
2252 | dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; | 2245 | dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; |
2253 | 2246 | ||
2254 | dev->netdev_ops = &vxlan_netdev_ops; | 2247 | dev->netdev_ops = &vxlan_netdev_ops; |
2255 | dev->destructor = free_netdev; | 2248 | dev->destructor = free_netdev; |
@@ -2646,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2646 | if (!tb[IFLA_MTU]) | 2639 | if (!tb[IFLA_MTU]) |
2647 | dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2640 | dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
2648 | 2641 | ||
2649 | /* update header length based on lower device */ | 2642 | dev->needed_headroom = lowerdev->hard_header_len + |
2650 | dev->hard_header_len = lowerdev->hard_header_len + | ||
2651 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2643 | (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
2652 | } else if (use_ipv6) | 2644 | } else if (use_ipv6) |
2653 | vxlan->flags |= VXLAN_F_IPV6; | 2645 | vxlan->flags |= VXLAN_F_IPV6; |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index e5a589435e2b..d99800cbdcf3 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -117,6 +117,7 @@ enum { | |||
117 | #define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) | 117 | #define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) |
118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) | 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) |
119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) | 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) |
120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) | ||
120 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) | 121 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) |
121 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) | 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
122 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) | 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index abe3de1db932..66f9a04ec270 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3305,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) | |||
3305 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | 3305 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); |
3306 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | 3306 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); |
3307 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | 3307 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); |
3308 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); | ||
3309 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); | ||
3310 | BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); | ||
3311 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | ||
3312 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | ||
3313 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | ||
3314 | BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); | ||
3308 | 3315 | ||
3309 | return (features & feature) == feature; | 3316 | return (features & feature) == feature; |
3310 | } | 3317 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5b5cd3189c98..ec89301ada41 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -338,17 +338,18 @@ enum { | |||
338 | 338 | ||
339 | SKB_GSO_GRE = 1 << 6, | 339 | SKB_GSO_GRE = 1 << 6, |
340 | 340 | ||
341 | SKB_GSO_IPIP = 1 << 7, | 341 | SKB_GSO_GRE_CSUM = 1 << 7, |
342 | 342 | ||
343 | SKB_GSO_SIT = 1 << 8, | 343 | SKB_GSO_IPIP = 1 << 8, |
344 | 344 | ||
345 | SKB_GSO_UDP_TUNNEL = 1 << 9, | 345 | SKB_GSO_SIT = 1 << 9, |
346 | 346 | ||
347 | SKB_GSO_MPLS = 1 << 10, | 347 | SKB_GSO_UDP_TUNNEL = 1 << 10, |
348 | 348 | ||
349 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | 349 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, |
350 | 350 | ||
351 | SKB_GSO_GRE_CSUM = 1 << 12, | 351 | SKB_GSO_MPLS = 1 << 12, |
352 | |||
352 | }; | 353 | }; |
353 | 354 | ||
354 | #if BITS_PER_LONG > 32 | 355 | #if BITS_PER_LONG > 32 |
@@ -1853,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) | |||
1853 | return pskb_may_pull(skb, skb_network_offset(skb) + len); | 1854 | return pskb_may_pull(skb, skb_network_offset(skb) + len); |
1854 | } | 1855 | } |
1855 | 1856 | ||
1857 | static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) | ||
1858 | { | ||
1859 | /* Only continue with checksum unnecessary if device indicated | ||
1860 | * it is valid across encapsulation (skb->encapsulation was set). | ||
1861 | */ | ||
1862 | if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) | ||
1863 | skb->ip_summed = CHECKSUM_NONE; | ||
1864 | |||
1865 | skb->encapsulation = 0; | ||
1866 | skb->csum_valid = 0; | ||
1867 | } | ||
1868 | |||
1856 | /* | 1869 | /* |
1857 | * CPUs often take a performance hit when accessing unaligned memory | 1870 | * CPUs often take a performance hit when accessing unaligned memory |
1858 | * locations. The actual performance hit varies, it can be small if the | 1871 | * locations. The actual performance hit varies, it can be small if the |
diff --git a/include/net/udp.h b/include/net/udp.h index 2ecfc6e15609..68a1fefe3dfe 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -111,7 +111,9 @@ struct sk_buff; | |||
111 | */ | 111 | */ |
112 | static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) | 112 | static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) |
113 | { | 113 | { |
114 | return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov); | 114 | return (UDP_SKB_CB(skb)->cscov == skb->len ? |
115 | __skb_checksum_complete(skb) : | ||
116 | __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); | ||
115 | } | 117 | } |
116 | 118 | ||
117 | static inline int udp_lib_checksum_complete(struct sk_buff *skb) | 119 | static inline int udp_lib_checksum_complete(struct sk_buff *skb) |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 6b1c04ca1d50..488dd1a825c0 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -739,22 +739,38 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) | |||
739 | __sum16 sum; | 739 | __sum16 sum; |
740 | 740 | ||
741 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); | 741 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
742 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum && | 742 | if (likely(!sum)) { |
743 | !skb->csum_complete_sw) | 743 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && |
744 | netdev_rx_csum_fault(skb->dev); | 744 | !skb->csum_complete_sw) |
745 | 745 | netdev_rx_csum_fault(skb->dev); | |
746 | /* Save checksum complete for later use */ | 746 | } |
747 | skb->csum = sum; | 747 | skb->csum_valid = !sum; |
748 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
749 | skb->csum_complete_sw = 1; | ||
750 | |||
751 | return sum; | 748 | return sum; |
752 | } | 749 | } |
753 | EXPORT_SYMBOL(__skb_checksum_complete_head); | 750 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
754 | 751 | ||
755 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | 752 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
756 | { | 753 | { |
757 | return __skb_checksum_complete_head(skb, skb->len); | 754 | __wsum csum; |
755 | __sum16 sum; | ||
756 | |||
757 | csum = skb_checksum(skb, 0, skb->len, 0); | ||
758 | |||
759 | /* skb->csum holds pseudo checksum */ | ||
760 | sum = csum_fold(csum_add(skb->csum, csum)); | ||
761 | if (likely(!sum)) { | ||
762 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | ||
763 | !skb->csum_complete_sw) | ||
764 | netdev_rx_csum_fault(skb->dev); | ||
765 | } | ||
766 | |||
767 | /* Save full packet checksum */ | ||
768 | skb->csum = csum; | ||
769 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
770 | skb->csum_complete_sw = 1; | ||
771 | skb->csum_valid = !sum; | ||
772 | |||
773 | return sum; | ||
758 | } | 774 | } |
759 | EXPORT_SYMBOL(__skb_checksum_complete); | 775 | EXPORT_SYMBOL(__skb_checksum_complete); |
760 | 776 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bf92824af3f7..9cd5344fad73 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -689,6 +689,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
689 | new->ooo_okay = old->ooo_okay; | 689 | new->ooo_okay = old->ooo_okay; |
690 | new->no_fcs = old->no_fcs; | 690 | new->no_fcs = old->no_fcs; |
691 | new->encapsulation = old->encapsulation; | 691 | new->encapsulation = old->encapsulation; |
692 | new->encap_hdr_csum = old->encap_hdr_csum; | ||
693 | new->csum_valid = old->csum_valid; | ||
694 | new->csum_complete_sw = old->csum_complete_sw; | ||
692 | #ifdef CONFIG_XFRM | 695 | #ifdef CONFIG_XFRM |
693 | new->sp = secpath_get(old->sp); | 696 | new->sp = secpath_get(old->sp); |
694 | #endif | 697 | #endif |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 185ed3e59802..d92f94b7e402 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1861,6 +1861,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, | |||
1861 | unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); | 1861 | unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); |
1862 | struct udp_hslot *hslot = &udp_table.hash[slot]; | 1862 | struct udp_hslot *hslot = &udp_table.hash[slot]; |
1863 | 1863 | ||
1864 | /* Do not bother scanning a too big list */ | ||
1865 | if (hslot->count > 10) | ||
1866 | return NULL; | ||
1867 | |||
1864 | rcu_read_lock(); | 1868 | rcu_read_lock(); |
1865 | begin: | 1869 | begin: |
1866 | count = 0; | 1870 | count = 0; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7e5eb7554990..dcb19592761e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -34,6 +34,8 @@ | |||
34 | * Sridhar Samudrala <sri@us.ibm.com> | 34 | * Sridhar Samudrala <sri@us.ibm.com> |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
38 | |||
37 | #include <net/sctp/structs.h> | 39 | #include <net/sctp/structs.h> |
38 | #include <net/sctp/sctp.h> | 40 | #include <net/sctp/sctp.h> |
39 | #include <linux/sysctl.h> | 41 | #include <linux/sysctl.h> |
@@ -46,6 +48,11 @@ static int sack_timer_min = 1; | |||
46 | static int sack_timer_max = 500; | 48 | static int sack_timer_max = 500; |
47 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | 49 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ |
48 | static int rwnd_scale_max = 16; | 50 | static int rwnd_scale_max = 16; |
51 | static int rto_alpha_min = 0; | ||
52 | static int rto_beta_min = 0; | ||
53 | static int rto_alpha_max = 1000; | ||
54 | static int rto_beta_max = 1000; | ||
55 | |||
49 | static unsigned long max_autoclose_min = 0; | 56 | static unsigned long max_autoclose_min = 0; |
50 | static unsigned long max_autoclose_max = | 57 | static unsigned long max_autoclose_max = |
51 | (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) | 58 | (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) |
@@ -64,6 +71,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, | |||
64 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | 71 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, |
65 | void __user *buffer, size_t *lenp, | 72 | void __user *buffer, size_t *lenp, |
66 | loff_t *ppos); | 73 | loff_t *ppos); |
74 | static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, | ||
75 | void __user *buffer, size_t *lenp, | ||
76 | loff_t *ppos); | ||
67 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, | 77 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, |
68 | void __user *buffer, size_t *lenp, | 78 | void __user *buffer, size_t *lenp, |
69 | loff_t *ppos); | 79 | loff_t *ppos); |
@@ -126,15 +136,19 @@ static struct ctl_table sctp_net_table[] = { | |||
126 | .procname = "rto_alpha_exp_divisor", | 136 | .procname = "rto_alpha_exp_divisor", |
127 | .data = &init_net.sctp.rto_alpha, | 137 | .data = &init_net.sctp.rto_alpha, |
128 | .maxlen = sizeof(int), | 138 | .maxlen = sizeof(int), |
129 | .mode = 0444, | 139 | .mode = 0644, |
130 | .proc_handler = proc_dointvec, | 140 | .proc_handler = proc_sctp_do_alpha_beta, |
141 | .extra1 = &rto_alpha_min, | ||
142 | .extra2 = &rto_alpha_max, | ||
131 | }, | 143 | }, |
132 | { | 144 | { |
133 | .procname = "rto_beta_exp_divisor", | 145 | .procname = "rto_beta_exp_divisor", |
134 | .data = &init_net.sctp.rto_beta, | 146 | .data = &init_net.sctp.rto_beta, |
135 | .maxlen = sizeof(int), | 147 | .maxlen = sizeof(int), |
136 | .mode = 0444, | 148 | .mode = 0644, |
137 | .proc_handler = proc_dointvec, | 149 | .proc_handler = proc_sctp_do_alpha_beta, |
150 | .extra1 = &rto_beta_min, | ||
151 | .extra2 = &rto_beta_max, | ||
138 | }, | 152 | }, |
139 | { | 153 | { |
140 | .procname = "max_burst", | 154 | .procname = "max_burst", |
@@ -403,6 +417,16 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | |||
403 | return ret; | 417 | return ret; |
404 | } | 418 | } |
405 | 419 | ||
420 | static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, | ||
421 | void __user *buffer, size_t *lenp, | ||
422 | loff_t *ppos) | ||
423 | { | ||
424 | pr_warn_once("Changing rto_alpha or rto_beta may lead to " | ||
425 | "suboptimal rtt/srtt estimations!\n"); | ||
426 | |||
427 | return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); | ||
428 | } | ||
429 | |||
406 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, | 430 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, |
407 | void __user *buffer, size_t *lenp, | 431 | void __user *buffer, size_t *lenp, |
408 | loff_t *ppos) | 432 | loff_t *ppos) |