diff options
Diffstat (limited to 'net/ipv4')
51 files changed, 874 insertions, 616 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1fbff5fa4241..e3286814c8d9 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1070,11 +1070,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1070 | return 0; | 1070 | return 0; |
1071 | 1071 | ||
1072 | if (sysctl_ip_dynaddr > 1) { | 1072 | if (sysctl_ip_dynaddr > 1) { |
1073 | printk(KERN_INFO "%s(): shifting inet->" | 1073 | printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", |
1074 | "saddr from " NIPQUAD_FMT " to " NIPQUAD_FMT "\n", | 1074 | __func__, &old_saddr, &new_saddr); |
1075 | __func__, | ||
1076 | NIPQUAD(old_saddr), | ||
1077 | NIPQUAD(new_saddr)); | ||
1078 | } | 1075 | } |
1079 | 1076 | ||
1080 | inet->saddr = inet->rcv_saddr = new_saddr; | 1077 | inet->saddr = inet->rcv_saddr = new_saddr; |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 8219b7e0968d..3f205181712d 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -201,8 +201,8 @@ out: | |||
201 | 201 | ||
202 | static void ah4_err(struct sk_buff *skb, u32 info) | 202 | static void ah4_err(struct sk_buff *skb, u32 info) |
203 | { | 203 | { |
204 | struct iphdr *iph = (struct iphdr*)skb->data; | 204 | struct iphdr *iph = (struct iphdr *)skb->data; |
205 | struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2)); | 205 | struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); |
206 | struct xfrm_state *x; | 206 | struct xfrm_state *x; |
207 | 207 | ||
208 | if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || | 208 | if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 1a9dd66511fc..957c87dc8e16 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -506,7 +506,7 @@ int arp_bind_neighbour(struct dst_entry *dst) | |||
506 | if (dev == NULL) | 506 | if (dev == NULL) |
507 | return -EINVAL; | 507 | return -EINVAL; |
508 | if (n == NULL) { | 508 | if (n == NULL) { |
509 | __be32 nexthop = ((struct rtable*)dst)->rt_gateway; | 509 | __be32 nexthop = ((struct rtable *)dst)->rt_gateway; |
510 | if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) | 510 | if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) |
511 | nexthop = 0; | 511 | nexthop = 0; |
512 | n = __neigh_lookup_errno( | 512 | n = __neigh_lookup_errno( |
@@ -640,14 +640,14 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
640 | arp_ptr=(unsigned char *)(arp+1); | 640 | arp_ptr=(unsigned char *)(arp+1); |
641 | 641 | ||
642 | memcpy(arp_ptr, src_hw, dev->addr_len); | 642 | memcpy(arp_ptr, src_hw, dev->addr_len); |
643 | arp_ptr+=dev->addr_len; | 643 | arp_ptr += dev->addr_len; |
644 | memcpy(arp_ptr, &src_ip,4); | 644 | memcpy(arp_ptr, &src_ip, 4); |
645 | arp_ptr+=4; | 645 | arp_ptr += 4; |
646 | if (target_hw != NULL) | 646 | if (target_hw != NULL) |
647 | memcpy(arp_ptr, target_hw, dev->addr_len); | 647 | memcpy(arp_ptr, target_hw, dev->addr_len); |
648 | else | 648 | else |
649 | memset(arp_ptr, 0, dev->addr_len); | 649 | memset(arp_ptr, 0, dev->addr_len); |
650 | arp_ptr+=dev->addr_len; | 650 | arp_ptr += dev->addr_len; |
651 | memcpy(arp_ptr, &dest_ip, 4); | 651 | memcpy(arp_ptr, &dest_ip, 4); |
652 | 652 | ||
653 | return skb; | 653 | return skb; |
@@ -823,9 +823,9 @@ static int arp_process(struct sk_buff *skb) | |||
823 | int dont_send = 0; | 823 | int dont_send = 0; |
824 | 824 | ||
825 | if (!dont_send) | 825 | if (!dont_send) |
826 | dont_send |= arp_ignore(in_dev,sip,tip); | 826 | dont_send |= arp_ignore(in_dev, sip, tip); |
827 | if (!dont_send && IN_DEV_ARPFILTER(in_dev)) | 827 | if (!dont_send && IN_DEV_ARPFILTER(in_dev)) |
828 | dont_send |= arp_filter(sip,tip,dev); | 828 | dont_send |= arp_filter(sip, tip, dev); |
829 | if (!dont_send) | 829 | if (!dont_send) |
830 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); | 830 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); |
831 | 831 | ||
@@ -1308,7 +1308,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, | |||
1308 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 1308 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
1309 | } | 1309 | } |
1310 | #endif | 1310 | #endif |
1311 | sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->primary_key)); | 1311 | sprintf(tbuf, "%pI4", n->primary_key); |
1312 | seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", | 1312 | seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", |
1313 | tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); | 1313 | tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); |
1314 | read_unlock(&n->lock); | 1314 | read_unlock(&n->lock); |
@@ -1321,7 +1321,7 @@ static void arp_format_pneigh_entry(struct seq_file *seq, | |||
1321 | int hatype = dev ? dev->type : 0; | 1321 | int hatype = dev ? dev->type : 0; |
1322 | char tbuf[16]; | 1322 | char tbuf[16]; |
1323 | 1323 | ||
1324 | sprintf(tbuf, NIPQUAD_FMT, NIPQUAD(*(u32*)n->key)); | 1324 | sprintf(tbuf, "%pI4", n->key); |
1325 | seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", | 1325 | seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", |
1326 | tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00", | 1326 | tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00", |
1327 | dev ? dev->name : "*"); | 1327 | dev ? dev->name : "*"); |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 2e78f6bd9775..e52799047a5f 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -490,7 +490,6 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | |||
490 | } | 490 | } |
491 | 491 | ||
492 | atomic_set(&doi_def->refcount, 1); | 492 | atomic_set(&doi_def->refcount, 1); |
493 | INIT_RCU_HEAD(&doi_def->rcu); | ||
494 | 493 | ||
495 | spin_lock(&cipso_v4_doi_list_lock); | 494 | spin_lock(&cipso_v4_doi_list_lock); |
496 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | 495 | if (cipso_v4_doi_search(doi_def->doi) != NULL) |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 56fce3ab6c55..309997edc8a5 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -112,13 +112,7 @@ static inline void devinet_sysctl_unregister(struct in_device *idev) | |||
112 | 112 | ||
113 | static struct in_ifaddr *inet_alloc_ifa(void) | 113 | static struct in_ifaddr *inet_alloc_ifa(void) |
114 | { | 114 | { |
115 | struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); | 115 | return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL); |
116 | |||
117 | if (ifa) { | ||
118 | INIT_RCU_HEAD(&ifa->rcu_head); | ||
119 | } | ||
120 | |||
121 | return ifa; | ||
122 | } | 116 | } |
123 | 117 | ||
124 | static void inet_rcu_free_ifa(struct rcu_head *head) | 118 | static void inet_rcu_free_ifa(struct rcu_head *head) |
@@ -161,7 +155,6 @@ static struct in_device *inetdev_init(struct net_device *dev) | |||
161 | in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); | 155 | in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); |
162 | if (!in_dev) | 156 | if (!in_dev) |
163 | goto out; | 157 | goto out; |
164 | INIT_RCU_HEAD(&in_dev->rcu_head); | ||
165 | memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, | 158 | memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, |
166 | sizeof(in_dev->cnf)); | 159 | sizeof(in_dev->cnf)); |
167 | in_dev->cnf.sysctl = NULL; | 160 | in_dev->cnf.sysctl = NULL; |
@@ -1108,7 +1101,7 @@ out: | |||
1108 | } | 1101 | } |
1109 | 1102 | ||
1110 | static struct notifier_block ip_netdev_notifier = { | 1103 | static struct notifier_block ip_netdev_notifier = { |
1111 | .notifier_call =inetdev_event, | 1104 | .notifier_call = inetdev_event, |
1112 | }; | 1105 | }; |
1113 | 1106 | ||
1114 | static inline size_t inet_nlmsg_size(void) | 1107 | static inline size_t inet_nlmsg_size(void) |
@@ -1195,7 +1188,7 @@ done: | |||
1195 | return skb->len; | 1188 | return skb->len; |
1196 | } | 1189 | } |
1197 | 1190 | ||
1198 | static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh, | 1191 | static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, |
1199 | u32 pid) | 1192 | u32 pid) |
1200 | { | 1193 | { |
1201 | struct sk_buff *skb; | 1194 | struct sk_buff *skb; |
@@ -1262,7 +1255,7 @@ static void inet_forward_change(struct net *net) | |||
1262 | } | 1255 | } |
1263 | 1256 | ||
1264 | static int devinet_conf_proc(ctl_table *ctl, int write, | 1257 | static int devinet_conf_proc(ctl_table *ctl, int write, |
1265 | struct file* filp, void __user *buffer, | 1258 | struct file *filp, void __user *buffer, |
1266 | size_t *lenp, loff_t *ppos) | 1259 | size_t *lenp, loff_t *ppos) |
1267 | { | 1260 | { |
1268 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1261 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
@@ -1334,7 +1327,7 @@ static int devinet_conf_sysctl(ctl_table *table, | |||
1334 | } | 1327 | } |
1335 | 1328 | ||
1336 | static int devinet_sysctl_forward(ctl_table *ctl, int write, | 1329 | static int devinet_sysctl_forward(ctl_table *ctl, int write, |
1337 | struct file* filp, void __user *buffer, | 1330 | struct file *filp, void __user *buffer, |
1338 | size_t *lenp, loff_t *ppos) | 1331 | size_t *lenp, loff_t *ppos) |
1339 | { | 1332 | { |
1340 | int *valp = ctl->data; | 1333 | int *valp = ctl->data; |
@@ -1363,7 +1356,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1363 | } | 1356 | } |
1364 | 1357 | ||
1365 | int ipv4_doint_and_flush(ctl_table *ctl, int write, | 1358 | int ipv4_doint_and_flush(ctl_table *ctl, int write, |
1366 | struct file* filp, void __user *buffer, | 1359 | struct file *filp, void __user *buffer, |
1367 | size_t *lenp, loff_t *ppos) | 1360 | size_t *lenp, loff_t *ppos) |
1368 | { | 1361 | { |
1369 | int *valp = ctl->data; | 1362 | int *valp = ctl->data; |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 21515d4c49eb..95a9c65003f8 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -413,8 +413,8 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) | |||
413 | 413 | ||
414 | static void esp4_err(struct sk_buff *skb, u32 info) | 414 | static void esp4_err(struct sk_buff *skb, u32 info) |
415 | { | 415 | { |
416 | struct iphdr *iph = (struct iphdr*)skb->data; | 416 | struct iphdr *iph = (struct iphdr *)skb->data; |
417 | struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); | 417 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); |
418 | struct xfrm_state *x; | 418 | struct xfrm_state *x; |
419 | 419 | ||
420 | if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || | 420 | if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 65c1503f8cc8..741e4fa3e474 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -578,7 +578,7 @@ errout: | |||
578 | return err; | 578 | return err; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 581 | static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
582 | { | 582 | { |
583 | struct net *net = sock_net(skb->sk); | 583 | struct net *net = sock_net(skb->sk); |
584 | struct fib_config cfg; | 584 | struct fib_config cfg; |
@@ -600,7 +600,7 @@ errout: | |||
600 | return err; | 600 | return err; |
601 | } | 601 | } |
602 | 602 | ||
603 | static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 603 | static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
604 | { | 604 | { |
605 | struct net *net = sock_net(skb->sk); | 605 | struct net *net = sock_net(skb->sk); |
606 | struct fib_config cfg; | 606 | struct fib_config cfg; |
@@ -903,7 +903,7 @@ static void fib_disable_ip(struct net_device *dev, int force) | |||
903 | 903 | ||
904 | static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) | 904 | static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) |
905 | { | 905 | { |
906 | struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; | 906 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
907 | struct net_device *dev = ifa->ifa_dev->dev; | 907 | struct net_device *dev = ifa->ifa_dev->dev; |
908 | 908 | ||
909 | switch (event) { | 909 | switch (event) { |
@@ -964,11 +964,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
964 | } | 964 | } |
965 | 965 | ||
966 | static struct notifier_block fib_inetaddr_notifier = { | 966 | static struct notifier_block fib_inetaddr_notifier = { |
967 | .notifier_call =fib_inetaddr_event, | 967 | .notifier_call = fib_inetaddr_event, |
968 | }; | 968 | }; |
969 | 969 | ||
970 | static struct notifier_block fib_netdev_notifier = { | 970 | static struct notifier_block fib_netdev_notifier = { |
971 | .notifier_call =fib_netdev_event, | 971 | .notifier_call = fib_netdev_event, |
972 | }; | 972 | }; |
973 | 973 | ||
974 | static int __net_init ip_fib_net_init(struct net *net) | 974 | static int __net_init ip_fib_net_init(struct net *net) |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index c8cac6c7f881..ded8c44fb848 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -247,7 +247,7 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result | |||
247 | { | 247 | { |
248 | int err; | 248 | int err; |
249 | struct fn_zone *fz; | 249 | struct fn_zone *fz; |
250 | struct fn_hash *t = (struct fn_hash*)tb->tb_data; | 250 | struct fn_hash *t = (struct fn_hash *)tb->tb_data; |
251 | 251 | ||
252 | read_lock(&fib_hash_lock); | 252 | read_lock(&fib_hash_lock); |
253 | for (fz = t->fn_zone_list; fz; fz = fz->fz_next) { | 253 | for (fz = t->fn_zone_list; fz; fz = fz->fz_next) { |
@@ -283,7 +283,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib | |||
283 | struct fib_node *f; | 283 | struct fib_node *f; |
284 | struct fib_info *fi = NULL; | 284 | struct fib_info *fi = NULL; |
285 | struct fib_info *last_resort; | 285 | struct fib_info *last_resort; |
286 | struct fn_hash *t = (struct fn_hash*)tb->tb_data; | 286 | struct fn_hash *t = (struct fn_hash *)tb->tb_data; |
287 | struct fn_zone *fz = t->fn_zones[0]; | 287 | struct fn_zone *fz = t->fn_zones[0]; |
288 | 288 | ||
289 | if (fz == NULL) | 289 | if (fz == NULL) |
@@ -548,7 +548,7 @@ out: | |||
548 | 548 | ||
549 | static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) | 549 | static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) |
550 | { | 550 | { |
551 | struct fn_hash *table = (struct fn_hash*)tb->tb_data; | 551 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; |
552 | struct fib_node *f; | 552 | struct fib_node *f; |
553 | struct fib_alias *fa, *fa_to_delete; | 553 | struct fib_alias *fa, *fa_to_delete; |
554 | struct fn_zone *fz; | 554 | struct fn_zone *fz; |
@@ -748,7 +748,7 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin | |||
748 | { | 748 | { |
749 | int m, s_m; | 749 | int m, s_m; |
750 | struct fn_zone *fz; | 750 | struct fn_zone *fz; |
751 | struct fn_hash *table = (struct fn_hash*)tb->tb_data; | 751 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; |
752 | 752 | ||
753 | s_m = cb->args[2]; | 753 | s_m = cb->args[2]; |
754 | read_lock(&fib_hash_lock); | 754 | read_lock(&fib_hash_lock); |
@@ -845,10 +845,10 @@ static struct fib_alias *fib_get_first(struct seq_file *seq) | |||
845 | struct hlist_node *node; | 845 | struct hlist_node *node; |
846 | struct fib_node *fn; | 846 | struct fib_node *fn; |
847 | 847 | ||
848 | hlist_for_each_entry(fn,node,iter->hash_head,fn_hash) { | 848 | hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) { |
849 | struct fib_alias *fa; | 849 | struct fib_alias *fa; |
850 | 850 | ||
851 | list_for_each_entry(fa,&fn->fn_alias,fa_list) { | 851 | list_for_each_entry(fa, &fn->fn_alias, fa_list) { |
852 | iter->fn = fn; | 852 | iter->fn = fn; |
853 | iter->fa = fa; | 853 | iter->fa = fa; |
854 | goto out; | 854 | goto out; |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ded2ae34eab1..4817dea3bc73 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -63,16 +63,16 @@ static DEFINE_SPINLOCK(fib_multipath_lock); | |||
63 | for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) | 63 | for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) |
64 | 64 | ||
65 | #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ | 65 | #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ |
66 | for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) | 66 | for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) |
67 | 67 | ||
68 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ | 68 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
69 | 69 | ||
70 | /* Hope, that gcc will optimize it to get rid of dummy loop */ | 70 | /* Hope, that gcc will optimize it to get rid of dummy loop */ |
71 | 71 | ||
72 | #define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \ | 72 | #define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ |
73 | for (nhsel=0; nhsel < 1; nhsel++) | 73 | for (nhsel=0; nhsel < 1; nhsel++) |
74 | 74 | ||
75 | #define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \ | 75 | #define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ |
76 | for (nhsel=0; nhsel < 1; nhsel++) | 76 | for (nhsel=0; nhsel < 1; nhsel++) |
77 | 77 | ||
78 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 78 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
@@ -358,7 +358,7 @@ int fib_detect_death(struct fib_info *fi, int order, | |||
358 | state = n->nud_state; | 358 | state = n->nud_state; |
359 | neigh_release(n); | 359 | neigh_release(n); |
360 | } | 360 | } |
361 | if (state==NUD_REACHABLE) | 361 | if (state == NUD_REACHABLE) |
362 | return 0; | 362 | return 0; |
363 | if ((state&NUD_VALID) && order != dflt) | 363 | if ((state&NUD_VALID) && order != dflt) |
364 | return 0; | 364 | return 0; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5cb72786a8af..ec0ae490f0b6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -2399,8 +2399,8 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
2399 | __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); | 2399 | __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); |
2400 | 2400 | ||
2401 | seq_indent(seq, iter->depth-1); | 2401 | seq_indent(seq, iter->depth-1); |
2402 | seq_printf(seq, " +-- " NIPQUAD_FMT "/%d %d %d %d\n", | 2402 | seq_printf(seq, " +-- %pI4/%d %d %d %d\n", |
2403 | NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, | 2403 | &prf, tn->pos, tn->bits, tn->full_children, |
2404 | tn->empty_children); | 2404 | tn->empty_children); |
2405 | 2405 | ||
2406 | } else { | 2406 | } else { |
@@ -2410,7 +2410,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
2410 | __be32 val = htonl(l->key); | 2410 | __be32 val = htonl(l->key); |
2411 | 2411 | ||
2412 | seq_indent(seq, iter->depth); | 2412 | seq_indent(seq, iter->depth); |
2413 | seq_printf(seq, " |-- " NIPQUAD_FMT "\n", NIPQUAD(val)); | 2413 | seq_printf(seq, " |-- %pI4\n", &val); |
2414 | 2414 | ||
2415 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { | 2415 | hlist_for_each_entry_rcu(li, node, &l->list, hlist) { |
2416 | struct fib_alias *fa; | 2416 | struct fib_alias *fa; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 72b2de76f1cd..21e497efbd7f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -683,10 +683,8 @@ static void icmp_unreach(struct sk_buff *skb) | |||
683 | break; | 683 | break; |
684 | case ICMP_FRAG_NEEDED: | 684 | case ICMP_FRAG_NEEDED: |
685 | if (ipv4_config.no_pmtu_disc) { | 685 | if (ipv4_config.no_pmtu_disc) { |
686 | LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": " | 686 | LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", |
687 | "fragmentation needed " | 687 | &iph->daddr); |
688 | "and DF set.\n", | ||
689 | NIPQUAD(iph->daddr)); | ||
690 | } else { | 688 | } else { |
691 | info = ip_rt_frag_needed(net, iph, | 689 | info = ip_rt_frag_needed(net, iph, |
692 | ntohs(icmph->un.frag.mtu), | 690 | ntohs(icmph->un.frag.mtu), |
@@ -696,9 +694,8 @@ static void icmp_unreach(struct sk_buff *skb) | |||
696 | } | 694 | } |
697 | break; | 695 | break; |
698 | case ICMP_SR_FAILED: | 696 | case ICMP_SR_FAILED: |
699 | LIMIT_NETDEBUG(KERN_INFO "ICMP: " NIPQUAD_FMT ": Source " | 697 | LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", |
700 | "Route Failed.\n", | 698 | &iph->daddr); |
701 | NIPQUAD(iph->daddr)); | ||
702 | break; | 699 | break; |
703 | default: | 700 | default: |
704 | break; | 701 | break; |
@@ -729,12 +726,12 @@ static void icmp_unreach(struct sk_buff *skb) | |||
729 | if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && | 726 | if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && |
730 | inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { | 727 | inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { |
731 | if (net_ratelimit()) | 728 | if (net_ratelimit()) |
732 | printk(KERN_WARNING NIPQUAD_FMT " sent an invalid ICMP " | 729 | printk(KERN_WARNING "%pI4 sent an invalid ICMP " |
733 | "type %u, code %u " | 730 | "type %u, code %u " |
734 | "error to a broadcast: " NIPQUAD_FMT " on %s\n", | 731 | "error to a broadcast: %pI4 on %s\n", |
735 | NIPQUAD(ip_hdr(skb)->saddr), | 732 | &ip_hdr(skb)->saddr, |
736 | icmph->type, icmph->code, | 733 | icmph->type, icmph->code, |
737 | NIPQUAD(iph->daddr), | 734 | &iph->daddr, |
738 | skb->dev->name); | 735 | skb->dev->name); |
739 | goto out; | 736 | goto out; |
740 | } | 737 | } |
@@ -952,9 +949,8 @@ static void icmp_address_reply(struct sk_buff *skb) | |||
952 | break; | 949 | break; |
953 | } | 950 | } |
954 | if (!ifa && net_ratelimit()) { | 951 | if (!ifa && net_ratelimit()) { |
955 | printk(KERN_INFO "Wrong address mask " NIPQUAD_FMT " from " | 952 | printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", |
956 | "%s/" NIPQUAD_FMT "\n", | 953 | mp, dev->name, &rt->rt_src); |
957 | NIPQUAD(*mp), dev->name, NIPQUAD(rt->rt_src)); | ||
958 | } | 954 | } |
959 | } | 955 | } |
960 | rcu_read_unlock(); | 956 | rcu_read_unlock(); |
@@ -976,9 +972,10 @@ int icmp_rcv(struct sk_buff *skb) | |||
976 | struct net *net = dev_net(rt->u.dst.dev); | 972 | struct net *net = dev_net(rt->u.dst.dev); |
977 | 973 | ||
978 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 974 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
975 | struct sec_path *sp = skb_sec_path(skb); | ||
979 | int nh; | 976 | int nh; |
980 | 977 | ||
981 | if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags & | 978 | if (!(sp && sp->xvec[sp->len - 1]->props.flags & |
982 | XFRM_STATE_ICMP)) | 979 | XFRM_STATE_ICMP)) |
983 | goto drop; | 980 | goto drop; |
984 | 981 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index a0d86455c53e..f92733e15c9f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -167,7 +167,7 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im) | |||
167 | spin_lock_bh(&im->lock); | 167 | spin_lock_bh(&im->lock); |
168 | if (del_timer(&im->timer)) | 168 | if (del_timer(&im->timer)) |
169 | atomic_dec(&im->refcnt); | 169 | atomic_dec(&im->refcnt); |
170 | im->tm_running=0; | 170 | im->tm_running = 0; |
171 | im->reporter = 0; | 171 | im->reporter = 0; |
172 | im->unsolicit_count = 0; | 172 | im->unsolicit_count = 0; |
173 | spin_unlock_bh(&im->lock); | 173 | spin_unlock_bh(&im->lock); |
@@ -176,9 +176,9 @@ static __inline__ void igmp_stop_timer(struct ip_mc_list *im) | |||
176 | /* It must be called with locked im->lock */ | 176 | /* It must be called with locked im->lock */ |
177 | static void igmp_start_timer(struct ip_mc_list *im, int max_delay) | 177 | static void igmp_start_timer(struct ip_mc_list *im, int max_delay) |
178 | { | 178 | { |
179 | int tv=net_random() % max_delay; | 179 | int tv = net_random() % max_delay; |
180 | 180 | ||
181 | im->tm_running=1; | 181 | im->tm_running = 1; |
182 | if (!mod_timer(&im->timer, jiffies+tv+2)) | 182 | if (!mod_timer(&im->timer, jiffies+tv+2)) |
183 | atomic_inc(&im->refcnt); | 183 | atomic_inc(&im->refcnt); |
184 | } | 184 | } |
@@ -207,7 +207,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) | |||
207 | if (del_timer(&im->timer)) { | 207 | if (del_timer(&im->timer)) { |
208 | if ((long)(im->timer.expires-jiffies) < max_delay) { | 208 | if ((long)(im->timer.expires-jiffies) < max_delay) { |
209 | add_timer(&im->timer); | 209 | add_timer(&im->timer); |
210 | im->tm_running=1; | 210 | im->tm_running = 1; |
211 | spin_unlock_bh(&im->lock); | 211 | spin_unlock_bh(&im->lock); |
212 | return; | 212 | return; |
213 | } | 213 | } |
@@ -358,7 +358,7 @@ static int igmpv3_sendpack(struct sk_buff *skb) | |||
358 | 358 | ||
359 | static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) | 359 | static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) |
360 | { | 360 | { |
361 | return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc,type,gdel,sdel); | 361 | return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); |
362 | } | 362 | } |
363 | 363 | ||
364 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, | 364 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, |
@@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
653 | return -1; | 653 | return -1; |
654 | } | 654 | } |
655 | 655 | ||
656 | skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); | 656 | skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); |
657 | if (skb == NULL) { | 657 | if (skb == NULL) { |
658 | ip_rt_put(rt); | 658 | ip_rt_put(rt); |
659 | return -1; | 659 | return -1; |
@@ -682,11 +682,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
682 | ((u8*)&iph[1])[3] = 0; | 682 | ((u8*)&iph[1])[3] = 0; |
683 | 683 | ||
684 | ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); | 684 | ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); |
685 | ih->type=type; | 685 | ih->type = type; |
686 | ih->code=0; | 686 | ih->code = 0; |
687 | ih->csum=0; | 687 | ih->csum = 0; |
688 | ih->group=group; | 688 | ih->group = group; |
689 | ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr)); | 689 | ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); |
690 | 690 | ||
691 | return ip_local_out(skb); | 691 | return ip_local_out(skb); |
692 | } | 692 | } |
@@ -728,7 +728,7 @@ static void igmp_timer_expire(unsigned long data) | |||
728 | struct in_device *in_dev = im->interface; | 728 | struct in_device *in_dev = im->interface; |
729 | 729 | ||
730 | spin_lock(&im->lock); | 730 | spin_lock(&im->lock); |
731 | im->tm_running=0; | 731 | im->tm_running = 0; |
732 | 732 | ||
733 | if (im->unsolicit_count) { | 733 | if (im->unsolicit_count) { |
734 | im->unsolicit_count--; | 734 | im->unsolicit_count--; |
@@ -997,7 +997,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) | |||
997 | --ANK | 997 | --ANK |
998 | */ | 998 | */ |
999 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 999 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1000 | dev_mc_add(dev,buf,dev->addr_len,0); | 1000 | dev_mc_add(dev, buf, dev->addr_len, 0); |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | /* | 1003 | /* |
@@ -1010,7 +1010,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) | |||
1010 | struct net_device *dev = in_dev->dev; | 1010 | struct net_device *dev = in_dev->dev; |
1011 | 1011 | ||
1012 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1012 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1013 | dev_mc_delete(dev,buf,dev->addr_len,0); | 1013 | dev_mc_delete(dev, buf, dev->addr_len, 0); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | #ifdef CONFIG_IP_MULTICAST | 1016 | #ifdef CONFIG_IP_MULTICAST |
@@ -1210,10 +1210,10 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1210 | if (!im) | 1210 | if (!im) |
1211 | goto out; | 1211 | goto out; |
1212 | 1212 | ||
1213 | im->users=1; | 1213 | im->users = 1; |
1214 | im->interface=in_dev; | 1214 | im->interface = in_dev; |
1215 | in_dev_hold(in_dev); | 1215 | in_dev_hold(in_dev); |
1216 | im->multiaddr=addr; | 1216 | im->multiaddr = addr; |
1217 | /* initial mode is (EX, empty) */ | 1217 | /* initial mode is (EX, empty) */ |
1218 | im->sfmode = MCAST_EXCLUDE; | 1218 | im->sfmode = MCAST_EXCLUDE; |
1219 | im->sfcount[MCAST_INCLUDE] = 0; | 1219 | im->sfcount[MCAST_INCLUDE] = 0; |
@@ -1224,7 +1224,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1224 | atomic_set(&im->refcnt, 1); | 1224 | atomic_set(&im->refcnt, 1); |
1225 | spin_lock_init(&im->lock); | 1225 | spin_lock_init(&im->lock); |
1226 | #ifdef CONFIG_IP_MULTICAST | 1226 | #ifdef CONFIG_IP_MULTICAST |
1227 | im->tm_running=0; | 1227 | im->tm_running = 0; |
1228 | setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); | 1228 | setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); |
1229 | im->unsolicit_count = IGMP_Unsolicited_Report_Count; | 1229 | im->unsolicit_count = IGMP_Unsolicited_Report_Count; |
1230 | im->reporter = 0; | 1230 | im->reporter = 0; |
@@ -1232,8 +1232,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1232 | #endif | 1232 | #endif |
1233 | im->loaded = 0; | 1233 | im->loaded = 0; |
1234 | write_lock_bh(&in_dev->mc_list_lock); | 1234 | write_lock_bh(&in_dev->mc_list_lock); |
1235 | im->next=in_dev->mc_list; | 1235 | im->next = in_dev->mc_list; |
1236 | in_dev->mc_list=im; | 1236 | in_dev->mc_list = im; |
1237 | in_dev->mc_count++; | 1237 | in_dev->mc_count++; |
1238 | write_unlock_bh(&in_dev->mc_list_lock); | 1238 | write_unlock_bh(&in_dev->mc_list_lock); |
1239 | #ifdef CONFIG_IP_MULTICAST | 1239 | #ifdef CONFIG_IP_MULTICAST |
@@ -1279,7 +1279,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | |||
1279 | ASSERT_RTNL(); | 1279 | ASSERT_RTNL(); |
1280 | 1280 | ||
1281 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1281 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { |
1282 | if (i->multiaddr==addr) { | 1282 | if (i->multiaddr == addr) { |
1283 | if (--i->users == 0) { | 1283 | if (--i->users == 0) { |
1284 | write_lock_bh(&in_dev->mc_list_lock); | 1284 | write_lock_bh(&in_dev->mc_list_lock); |
1285 | *ip = i->next; | 1285 | *ip = i->next; |
@@ -1738,7 +1738,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1738 | { | 1738 | { |
1739 | int err; | 1739 | int err; |
1740 | __be32 addr = imr->imr_multiaddr.s_addr; | 1740 | __be32 addr = imr->imr_multiaddr.s_addr; |
1741 | struct ip_mc_socklist *iml=NULL, *i; | 1741 | struct ip_mc_socklist *iml = NULL, *i; |
1742 | struct in_device *in_dev; | 1742 | struct in_device *in_dev; |
1743 | struct inet_sock *inet = inet_sk(sk); | 1743 | struct inet_sock *inet = inet_sk(sk); |
1744 | struct net *net = sock_net(sk); | 1744 | struct net *net = sock_net(sk); |
@@ -1769,7 +1769,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1769 | err = -ENOBUFS; | 1769 | err = -ENOBUFS; |
1770 | if (count >= sysctl_igmp_max_memberships) | 1770 | if (count >= sysctl_igmp_max_memberships) |
1771 | goto done; | 1771 | goto done; |
1772 | iml = sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL); | 1772 | iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); |
1773 | if (iml == NULL) | 1773 | if (iml == NULL) |
1774 | goto done; | 1774 | goto done; |
1775 | 1775 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index bd1278a2d828..36f4cbc7da3a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -323,7 +323,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) | |||
323 | 323 | ||
324 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); | 324 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
325 | 325 | ||
326 | struct dst_entry* inet_csk_route_req(struct sock *sk, | 326 | struct dst_entry *inet_csk_route_req(struct sock *sk, |
327 | const struct request_sock *req) | 327 | const struct request_sock *req) |
328 | { | 328 | { |
329 | struct rtable *rt; | 329 | struct rtable *rt; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index a456ceeac3f2..b1fbe18feb5a 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -144,7 +144,7 @@ static void unlink_from_unused(struct inet_peer *p) | |||
144 | * _stack is known to be NULL or not at compile time, | 144 | * _stack is known to be NULL or not at compile time, |
145 | * so compiler will optimize the if (_stack) tests. | 145 | * so compiler will optimize the if (_stack) tests. |
146 | */ | 146 | */ |
147 | #define lookup(_daddr,_stack) \ | 147 | #define lookup(_daddr, _stack) \ |
148 | ({ \ | 148 | ({ \ |
149 | struct inet_peer *u, **v; \ | 149 | struct inet_peer *u, **v; \ |
150 | if (_stack != NULL) { \ | 150 | if (_stack != NULL) { \ |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 450016b89a18..df3fe50bbf0d 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -106,7 +106,7 @@ int ip_forward(struct sk_buff *skb) | |||
106 | * We now generate an ICMP HOST REDIRECT giving the route | 106 | * We now generate an ICMP HOST REDIRECT giving the route |
107 | * we calculated. | 107 | * we calculated. |
108 | */ | 108 | */ |
109 | if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb->sp) | 109 | if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) |
110 | ip_rt_send_redirect(skb); | 110 | ip_rt_send_redirect(skb); |
111 | 111 | ||
112 | skb->priority = rt_tos2priority(iph->tos); | 112 | skb->priority = rt_tos2priority(iph->tos); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e4f81f54befe..6659ac000eeb 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -56,7 +56,7 @@ struct ipfrag_skb_cb | |||
56 | int offset; | 56 | int offset; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb)) | 59 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
60 | 60 | ||
61 | /* Describe an entry in the "incomplete datagrams" queue. */ | 61 | /* Describe an entry in the "incomplete datagrams" queue. */ |
62 | struct ipq { | 62 | struct ipq { |
@@ -559,9 +559,8 @@ out_nomem: | |||
559 | goto out_fail; | 559 | goto out_fail; |
560 | out_oversize: | 560 | out_oversize: |
561 | if (net_ratelimit()) | 561 | if (net_ratelimit()) |
562 | printk(KERN_INFO | 562 | printk(KERN_INFO "Oversized IP packet from %pI4.\n", |
563 | "Oversized IP packet from " NIPQUAD_FMT ".\n", | 563 | &qp->saddr); |
564 | NIPQUAD(qp->saddr)); | ||
565 | out_fail: | 564 | out_fail: |
566 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); | 565 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); |
567 | return err; | 566 | return err; |
@@ -608,7 +607,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { | |||
608 | .data = &init_net.ipv4.frags.high_thresh, | 607 | .data = &init_net.ipv4.frags.high_thresh, |
609 | .maxlen = sizeof(int), | 608 | .maxlen = sizeof(int), |
610 | .mode = 0644, | 609 | .mode = 0644, |
611 | .proc_handler = &proc_dointvec | 610 | .proc_handler = proc_dointvec |
612 | }, | 611 | }, |
613 | { | 612 | { |
614 | .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, | 613 | .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, |
@@ -616,7 +615,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { | |||
616 | .data = &init_net.ipv4.frags.low_thresh, | 615 | .data = &init_net.ipv4.frags.low_thresh, |
617 | .maxlen = sizeof(int), | 616 | .maxlen = sizeof(int), |
618 | .mode = 0644, | 617 | .mode = 0644, |
619 | .proc_handler = &proc_dointvec | 618 | .proc_handler = proc_dointvec |
620 | }, | 619 | }, |
621 | { | 620 | { |
622 | .ctl_name = NET_IPV4_IPFRAG_TIME, | 621 | .ctl_name = NET_IPV4_IPFRAG_TIME, |
@@ -624,8 +623,8 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { | |||
624 | .data = &init_net.ipv4.frags.timeout, | 623 | .data = &init_net.ipv4.frags.timeout, |
625 | .maxlen = sizeof(int), | 624 | .maxlen = sizeof(int), |
626 | .mode = 0644, | 625 | .mode = 0644, |
627 | .proc_handler = &proc_dointvec_jiffies, | 626 | .proc_handler = proc_dointvec_jiffies, |
628 | .strategy = &sysctl_jiffies | 627 | .strategy = sysctl_jiffies |
629 | }, | 628 | }, |
630 | { } | 629 | { } |
631 | }; | 630 | }; |
@@ -637,15 +636,15 @@ static struct ctl_table ip4_frags_ctl_table[] = { | |||
637 | .data = &ip4_frags.secret_interval, | 636 | .data = &ip4_frags.secret_interval, |
638 | .maxlen = sizeof(int), | 637 | .maxlen = sizeof(int), |
639 | .mode = 0644, | 638 | .mode = 0644, |
640 | .proc_handler = &proc_dointvec_jiffies, | 639 | .proc_handler = proc_dointvec_jiffies, |
641 | .strategy = &sysctl_jiffies | 640 | .strategy = sysctl_jiffies |
642 | }, | 641 | }, |
643 | { | 642 | { |
644 | .procname = "ipfrag_max_dist", | 643 | .procname = "ipfrag_max_dist", |
645 | .data = &sysctl_ipfrag_max_dist, | 644 | .data = &sysctl_ipfrag_max_dist, |
646 | .maxlen = sizeof(int), | 645 | .maxlen = sizeof(int), |
647 | .mode = 0644, | 646 | .mode = 0644, |
648 | .proc_handler = &proc_dointvec_minmax, | 647 | .proc_handler = proc_dointvec_minmax, |
649 | .extra1 = &zero | 648 | .extra1 = &zero |
650 | }, | 649 | }, |
651 | { } | 650 | { } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 85c487b8572b..191ef7588134 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -371,7 +371,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
371 | by themself??? | 371 | by themself??? |
372 | */ | 372 | */ |
373 | 373 | ||
374 | struct iphdr *iph = (struct iphdr*)skb->data; | 374 | struct iphdr *iph = (struct iphdr *)skb->data; |
375 | __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); | 375 | __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); |
376 | int grehlen = (iph->ihl<<2) + 4; | 376 | int grehlen = (iph->ihl<<2) + 4; |
377 | const int type = icmp_hdr(skb)->type; | 377 | const int type = icmp_hdr(skb)->type; |
@@ -632,7 +632,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
632 | 632 | ||
633 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 633 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
634 | gre_hlen = 0; | 634 | gre_hlen = 0; |
635 | tiph = (struct iphdr*)skb->data; | 635 | tiph = (struct iphdr *)skb->data; |
636 | } else { | 636 | } else { |
637 | gre_hlen = tunnel->hlen; | 637 | gre_hlen = tunnel->hlen; |
638 | tiph = &tunnel->parms.iph; | 638 | tiph = &tunnel->parms.iph; |
@@ -660,7 +660,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
660 | if (neigh == NULL) | 660 | if (neigh == NULL) |
661 | goto tx_error; | 661 | goto tx_error; |
662 | 662 | ||
663 | addr6 = (struct in6_addr*)&neigh->primary_key; | 663 | addr6 = (struct in6_addr *)&neigh->primary_key; |
664 | addr_type = ipv6_addr_type(addr6); | 664 | addr_type = ipv6_addr_type(addr6); |
665 | 665 | ||
666 | if (addr_type == IPV6_ADDR_ANY) { | 666 | if (addr_type == IPV6_ADDR_ANY) { |
@@ -726,7 +726,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
726 | } | 726 | } |
727 | #ifdef CONFIG_IPV6 | 727 | #ifdef CONFIG_IPV6 |
728 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 728 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
729 | struct rt6_info *rt6 = (struct rt6_info*)skb->dst; | 729 | struct rt6_info *rt6 = (struct rt6_info *)skb->dst; |
730 | 730 | ||
731 | if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { | 731 | if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { |
732 | if ((tunnel->parms.iph.daddr && | 732 | if ((tunnel->parms.iph.daddr && |
@@ -800,7 +800,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
800 | iph->ttl = old_iph->ttl; | 800 | iph->ttl = old_iph->ttl; |
801 | #ifdef CONFIG_IPV6 | 801 | #ifdef CONFIG_IPV6 |
802 | else if (skb->protocol == htons(ETH_P_IPV6)) | 802 | else if (skb->protocol == htons(ETH_P_IPV6)) |
803 | iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit; | 803 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; |
804 | #endif | 804 | #endif |
805 | else | 805 | else |
806 | iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); | 806 | iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); |
@@ -962,7 +962,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
962 | break; | 962 | break; |
963 | } | 963 | } |
964 | } else { | 964 | } else { |
965 | unsigned nflags=0; | 965 | unsigned nflags = 0; |
966 | 966 | ||
967 | t = netdev_priv(dev); | 967 | t = netdev_priv(dev); |
968 | 968 | ||
@@ -1104,7 +1104,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
1104 | 1104 | ||
1105 | static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) | 1105 | static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) |
1106 | { | 1106 | { |
1107 | struct iphdr *iph = (struct iphdr*) skb_mac_header(skb); | 1107 | struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); |
1108 | memcpy(haddr, &iph->saddr, 4); | 1108 | memcpy(haddr, &iph->saddr, 4); |
1109 | return 4; | 1109 | return 4; |
1110 | } | 1110 | } |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 861978a4f1a8..70bedab03b09 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -294,10 +294,8 @@ static inline int ip_rcv_options(struct sk_buff *skb) | |||
294 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { | 294 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { |
295 | if (IN_DEV_LOG_MARTIANS(in_dev) && | 295 | if (IN_DEV_LOG_MARTIANS(in_dev) && |
296 | net_ratelimit()) | 296 | net_ratelimit()) |
297 | printk(KERN_INFO "source route option " | 297 | printk(KERN_INFO "source route option %pI4 -> %pI4\n", |
298 | NIPQUAD_FMT " -> " NIPQUAD_FMT "\n", | 298 | &iph->saddr, &iph->daddr); |
299 | NIPQUAD(iph->saddr), | ||
300 | NIPQUAD(iph->daddr)); | ||
301 | in_dev_put(in_dev); | 299 | in_dev_put(in_dev); |
302 | goto drop; | 300 | goto drop; |
303 | } | 301 | } |
@@ -342,9 +340,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
342 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); | 340 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); |
343 | u32 idx = skb->dst->tclassid; | 341 | u32 idx = skb->dst->tclassid; |
344 | st[idx&0xFF].o_packets++; | 342 | st[idx&0xFF].o_packets++; |
345 | st[idx&0xFF].o_bytes+=skb->len; | 343 | st[idx&0xFF].o_bytes += skb->len; |
346 | st[(idx>>16)&0xFF].i_packets++; | 344 | st[(idx>>16)&0xFF].i_packets++; |
347 | st[(idx>>16)&0xFF].i_bytes+=skb->len; | 345 | st[(idx>>16)&0xFF].i_bytes += skb->len; |
348 | } | 346 | } |
349 | #endif | 347 | #endif |
350 | 348 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d2a8f8bb78a6..46d7be233eac 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -430,7 +430,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
430 | * single device frame, and queue such a frame for sending. | 430 | * single device frame, and queue such a frame for sending. |
431 | */ | 431 | */ |
432 | 432 | ||
433 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | 433 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) |
434 | { | 434 | { |
435 | struct iphdr *iph; | 435 | struct iphdr *iph; |
436 | int raw = 0; | 436 | int raw = 0; |
@@ -720,7 +720,7 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
720 | int getfrag(void *from, char *to, int offset, int len, | 720 | int getfrag(void *from, char *to, int offset, int len, |
721 | int odd, struct sk_buff *skb), | 721 | int odd, struct sk_buff *skb), |
722 | void *from, int length, int hh_len, int fragheaderlen, | 722 | void *from, int length, int hh_len, int fragheaderlen, |
723 | int transhdrlen, int mtu,unsigned int flags) | 723 | int transhdrlen, int mtu, unsigned int flags) |
724 | { | 724 | { |
725 | struct sk_buff *skb; | 725 | struct sk_buff *skb; |
726 | int err; | 726 | int err; |
@@ -741,7 +741,7 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
741 | skb_reserve(skb, hh_len); | 741 | skb_reserve(skb, hh_len); |
742 | 742 | ||
743 | /* create space for UDP/IP header */ | 743 | /* create space for UDP/IP header */ |
744 | skb_put(skb,fragheaderlen + transhdrlen); | 744 | skb_put(skb, fragheaderlen + transhdrlen); |
745 | 745 | ||
746 | /* initialize network header pointer */ | 746 | /* initialize network header pointer */ |
747 | skb_reset_network_header(skb); | 747 | skb_reset_network_header(skb); |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 465abf0a9869..e976efeb1456 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -94,7 +94,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) | |||
94 | static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) | 94 | static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) |
95 | { | 95 | { |
96 | unsigned char optbuf[sizeof(struct ip_options) + 40]; | 96 | unsigned char optbuf[sizeof(struct ip_options) + 40]; |
97 | struct ip_options * opt = (struct ip_options*)optbuf; | 97 | struct ip_options * opt = (struct ip_options *)optbuf; |
98 | 98 | ||
99 | if (IPCB(skb)->opt.optlen == 0) | 99 | if (IPCB(skb)->opt.optlen == 0) |
100 | return; | 100 | return; |
@@ -411,7 +411,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
411 | int optname, char __user *optval, int optlen) | 411 | int optname, char __user *optval, int optlen) |
412 | { | 412 | { |
413 | struct inet_sock *inet = inet_sk(sk); | 413 | struct inet_sock *inet = inet_sk(sk); |
414 | int val=0,err; | 414 | int val = 0, err; |
415 | 415 | ||
416 | if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | | 416 | if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | |
417 | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | | 417 | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | |
@@ -437,7 +437,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
437 | /* If optlen==0, it is equivalent to val == 0 */ | 437 | /* If optlen==0, it is equivalent to val == 0 */ |
438 | 438 | ||
439 | if (ip_mroute_opt(optname)) | 439 | if (ip_mroute_opt(optname)) |
440 | return ip_mroute_setsockopt(sk,optname,optval,optlen); | 440 | return ip_mroute_setsockopt(sk, optname, optval, optlen); |
441 | 441 | ||
442 | err = 0; | 442 | err = 0; |
443 | lock_sock(sk); | 443 | lock_sock(sk); |
@@ -549,7 +549,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
549 | goto e_inval; | 549 | goto e_inval; |
550 | if (optlen<1) | 550 | if (optlen<1) |
551 | goto e_inval; | 551 | goto e_inval; |
552 | if (val==-1) | 552 | if (val == -1) |
553 | val = 1; | 553 | val = 1; |
554 | if (val < 0 || val > 255) | 554 | if (val < 0 || val > 255) |
555 | goto e_inval; | 555 | goto e_inval; |
@@ -573,12 +573,12 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
573 | 573 | ||
574 | err = -EFAULT; | 574 | err = -EFAULT; |
575 | if (optlen >= sizeof(struct ip_mreqn)) { | 575 | if (optlen >= sizeof(struct ip_mreqn)) { |
576 | if (copy_from_user(&mreq,optval,sizeof(mreq))) | 576 | if (copy_from_user(&mreq, optval, sizeof(mreq))) |
577 | break; | 577 | break; |
578 | } else { | 578 | } else { |
579 | memset(&mreq, 0, sizeof(mreq)); | 579 | memset(&mreq, 0, sizeof(mreq)); |
580 | if (optlen >= sizeof(struct in_addr) && | 580 | if (optlen >= sizeof(struct in_addr) && |
581 | copy_from_user(&mreq.imr_address,optval,sizeof(struct in_addr))) | 581 | copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) |
582 | break; | 582 | break; |
583 | } | 583 | } |
584 | 584 | ||
@@ -626,11 +626,11 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
626 | goto e_inval; | 626 | goto e_inval; |
627 | err = -EFAULT; | 627 | err = -EFAULT; |
628 | if (optlen >= sizeof(struct ip_mreqn)) { | 628 | if (optlen >= sizeof(struct ip_mreqn)) { |
629 | if (copy_from_user(&mreq,optval,sizeof(mreq))) | 629 | if (copy_from_user(&mreq, optval, sizeof(mreq))) |
630 | break; | 630 | break; |
631 | } else { | 631 | } else { |
632 | memset(&mreq, 0, sizeof(mreq)); | 632 | memset(&mreq, 0, sizeof(mreq)); |
633 | if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq))) | 633 | if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) |
634 | break; | 634 | break; |
635 | } | 635 | } |
636 | 636 | ||
@@ -808,7 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
808 | err = -ENOBUFS; | 808 | err = -ENOBUFS; |
809 | break; | 809 | break; |
810 | } | 810 | } |
811 | gsf = kmalloc(optlen,GFP_KERNEL); | 811 | gsf = kmalloc(optlen, GFP_KERNEL); |
812 | if (!gsf) { | 812 | if (!gsf) { |
813 | err = -ENOBUFS; | 813 | err = -ENOBUFS; |
814 | break; | 814 | break; |
@@ -828,7 +828,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
828 | goto mc_msf_out; | 828 | goto mc_msf_out; |
829 | } | 829 | } |
830 | msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); | 830 | msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); |
831 | msf = kmalloc(msize,GFP_KERNEL); | 831 | msf = kmalloc(msize, GFP_KERNEL); |
832 | if (!msf) { | 832 | if (!msf) { |
833 | err = -ENOBUFS; | 833 | err = -ENOBUFS; |
834 | goto mc_msf_out; | 834 | goto mc_msf_out; |
@@ -971,9 +971,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
971 | return -EOPNOTSUPP; | 971 | return -EOPNOTSUPP; |
972 | 972 | ||
973 | if (ip_mroute_opt(optname)) | 973 | if (ip_mroute_opt(optname)) |
974 | return ip_mroute_getsockopt(sk,optname,optval,optlen); | 974 | return ip_mroute_getsockopt(sk, optname, optval, optlen); |
975 | 975 | ||
976 | if (get_user(len,optlen)) | 976 | if (get_user(len, optlen)) |
977 | return -EFAULT; | 977 | return -EFAULT; |
978 | if (len < 0) | 978 | if (len < 0) |
979 | return -EINVAL; | 979 | return -EINVAL; |
@@ -984,7 +984,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
984 | case IP_OPTIONS: | 984 | case IP_OPTIONS: |
985 | { | 985 | { |
986 | unsigned char optbuf[sizeof(struct ip_options)+40]; | 986 | unsigned char optbuf[sizeof(struct ip_options)+40]; |
987 | struct ip_options * opt = (struct ip_options*)optbuf; | 987 | struct ip_options * opt = (struct ip_options *)optbuf; |
988 | opt->optlen = 0; | 988 | opt->optlen = 0; |
989 | if (inet->opt) | 989 | if (inet->opt) |
990 | memcpy(optbuf, inet->opt, | 990 | memcpy(optbuf, inet->opt, |
@@ -1154,13 +1154,13 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1154 | len = 1; | 1154 | len = 1; |
1155 | if (put_user(len, optlen)) | 1155 | if (put_user(len, optlen)) |
1156 | return -EFAULT; | 1156 | return -EFAULT; |
1157 | if (copy_to_user(optval,&ucval,1)) | 1157 | if (copy_to_user(optval, &ucval, 1)) |
1158 | return -EFAULT; | 1158 | return -EFAULT; |
1159 | } else { | 1159 | } else { |
1160 | len = min_t(unsigned int, sizeof(int), len); | 1160 | len = min_t(unsigned int, sizeof(int), len); |
1161 | if (put_user(len, optlen)) | 1161 | if (put_user(len, optlen)) |
1162 | return -EFAULT; | 1162 | return -EFAULT; |
1163 | if (copy_to_user(optval,&val,len)) | 1163 | if (copy_to_user(optval, &val, len)) |
1164 | return -EFAULT; | 1164 | return -EFAULT; |
1165 | } | 1165 | } |
1166 | return 0; | 1166 | return 0; |
@@ -1178,7 +1178,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1178 | !ip_mroute_opt(optname)) { | 1178 | !ip_mroute_opt(optname)) { |
1179 | int len; | 1179 | int len; |
1180 | 1180 | ||
1181 | if (get_user(len,optlen)) | 1181 | if (get_user(len, optlen)) |
1182 | return -EFAULT; | 1182 | return -EFAULT; |
1183 | 1183 | ||
1184 | lock_sock(sk); | 1184 | lock_sock(sk); |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 38ccb6dfb02e..ec8264ae45c2 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -39,8 +39,8 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) | |||
39 | spi, IPPROTO_COMP, AF_INET); | 39 | spi, IPPROTO_COMP, AF_INET); |
40 | if (!x) | 40 | if (!x) |
41 | return; | 41 | return; |
42 | NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIPQUAD_FMT "\n", | 42 | NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n", |
43 | spi, NIPQUAD(iph->daddr)); | 43 | spi, &iph->daddr); |
44 | xfrm_state_put(x); | 44 | xfrm_state_put(x); |
45 | } | 45 | } |
46 | 46 | ||
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 42065fff46c4..42a0f3dd3fd6 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -374,7 +374,7 @@ static int __init ic_defaults(void) | |||
374 | */ | 374 | */ |
375 | 375 | ||
376 | if (!ic_host_name_set) | 376 | if (!ic_host_name_set) |
377 | sprintf(init_utsname()->nodename, NIPQUAD_FMT, NIPQUAD(ic_myaddr)); | 377 | sprintf(init_utsname()->nodename, "%pI4", &ic_myaddr); |
378 | 378 | ||
379 | if (root_server_addr == NONE) | 379 | if (root_server_addr == NONE) |
380 | root_server_addr = ic_servaddr; | 380 | root_server_addr = ic_servaddr; |
@@ -387,11 +387,11 @@ static int __init ic_defaults(void) | |||
387 | else if (IN_CLASSC(ntohl(ic_myaddr))) | 387 | else if (IN_CLASSC(ntohl(ic_myaddr))) |
388 | ic_netmask = htonl(IN_CLASSC_NET); | 388 | ic_netmask = htonl(IN_CLASSC_NET); |
389 | else { | 389 | else { |
390 | printk(KERN_ERR "IP-Config: Unable to guess netmask for address " NIPQUAD_FMT "\n", | 390 | printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n", |
391 | NIPQUAD(ic_myaddr)); | 391 | &ic_myaddr); |
392 | return -1; | 392 | return -1; |
393 | } | 393 | } |
394 | printk("IP-Config: Guessing netmask " NIPQUAD_FMT "\n", NIPQUAD(ic_netmask)); | 394 | printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); |
395 | } | 395 | } |
396 | 396 | ||
397 | return 0; | 397 | return 0; |
@@ -979,10 +979,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
979 | ic_myaddr = b->your_ip; | 979 | ic_myaddr = b->your_ip; |
980 | ic_servaddr = server_id; | 980 | ic_servaddr = server_id; |
981 | #ifdef IPCONFIG_DEBUG | 981 | #ifdef IPCONFIG_DEBUG |
982 | printk("DHCP: Offered address " NIPQUAD_FMT, | 982 | printk("DHCP: Offered address %pI4 by server %pI4\n", |
983 | NIPQUAD(ic_myaddr)); | 983 | &ic_myaddr, &ic_servaddr); |
984 | printk(" by server " NIPQUAD_FMT "\n", | ||
985 | NIPQUAD(ic_servaddr)); | ||
986 | #endif | 984 | #endif |
987 | /* The DHCP indicated server address takes | 985 | /* The DHCP indicated server address takes |
988 | * precedence over the bootp header one if | 986 | * precedence over the bootp header one if |
@@ -1177,11 +1175,11 @@ static int __init ic_dynamic(void) | |||
1177 | return -1; | 1175 | return -1; |
1178 | } | 1176 | } |
1179 | 1177 | ||
1180 | printk("IP-Config: Got %s answer from " NIPQUAD_FMT ", ", | 1178 | printk("IP-Config: Got %s answer from %pI4, ", |
1181 | ((ic_got_reply & IC_RARP) ? "RARP" | 1179 | ((ic_got_reply & IC_RARP) ? "RARP" |
1182 | : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), | 1180 | : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), |
1183 | NIPQUAD(ic_servaddr)); | 1181 | &ic_servaddr); |
1184 | printk("my address is " NIPQUAD_FMT "\n", NIPQUAD(ic_myaddr)); | 1182 | printk("my address is %pI4\n", &ic_myaddr); |
1185 | 1183 | ||
1186 | return 0; | 1184 | return 0; |
1187 | } | 1185 | } |
@@ -1206,14 +1204,12 @@ static int pnp_seq_show(struct seq_file *seq, void *v) | |||
1206 | "domain %s\n", ic_domain); | 1204 | "domain %s\n", ic_domain); |
1207 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { | 1205 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { |
1208 | if (ic_nameservers[i] != NONE) | 1206 | if (ic_nameservers[i] != NONE) |
1209 | seq_printf(seq, | 1207 | seq_printf(seq, "nameserver %pI4\n", |
1210 | "nameserver " NIPQUAD_FMT "\n", | 1208 | &ic_nameservers[i]); |
1211 | NIPQUAD(ic_nameservers[i])); | ||
1212 | } | 1209 | } |
1213 | if (ic_servaddr != NONE) | 1210 | if (ic_servaddr != NONE) |
1214 | seq_printf(seq, | 1211 | seq_printf(seq, "bootserver %pI4\n", |
1215 | "bootserver " NIPQUAD_FMT "\n", | 1212 | &ic_servaddr); |
1216 | NIPQUAD(ic_servaddr)); | ||
1217 | return 0; | 1213 | return 0; |
1218 | } | 1214 | } |
1219 | 1215 | ||
@@ -1387,13 +1383,13 @@ static int __init ip_auto_config(void) | |||
1387 | */ | 1383 | */ |
1388 | printk("IP-Config: Complete:"); | 1384 | printk("IP-Config: Complete:"); |
1389 | printk("\n device=%s", ic_dev->name); | 1385 | printk("\n device=%s", ic_dev->name); |
1390 | printk(", addr=" NIPQUAD_FMT, NIPQUAD(ic_myaddr)); | 1386 | printk(", addr=%pI4", &ic_myaddr); |
1391 | printk(", mask=" NIPQUAD_FMT, NIPQUAD(ic_netmask)); | 1387 | printk(", mask=%pI4", &ic_netmask); |
1392 | printk(", gw=" NIPQUAD_FMT, NIPQUAD(ic_gateway)); | 1388 | printk(", gw=%pI4", &ic_gateway); |
1393 | printk(",\n host=%s, domain=%s, nis-domain=%s", | 1389 | printk(",\n host=%s, domain=%s, nis-domain=%s", |
1394 | utsname()->nodename, ic_domain, utsname()->domainname); | 1390 | utsname()->nodename, ic_domain, utsname()->domainname); |
1395 | printk(",\n bootserver=" NIPQUAD_FMT, NIPQUAD(ic_servaddr)); | 1391 | printk(",\n bootserver=%pI4", &ic_servaddr); |
1396 | printk(", rootserver=" NIPQUAD_FMT, NIPQUAD(root_server_addr)); | 1392 | printk(", rootserver=%pI4", &root_server_addr); |
1397 | printk(", rootpath=%s", root_server_path); | 1393 | printk(", rootpath=%s", root_server_path); |
1398 | printk("\n"); | 1394 | printk("\n"); |
1399 | #endif /* !SILENT */ | 1395 | #endif /* !SILENT */ |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 29609d29df76..b3c3d7b0d116 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -281,7 +281,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
281 | 8 bytes of packet payload. It means, that precise relaying of | 281 | 8 bytes of packet payload. It means, that precise relaying of |
282 | ICMP in the real Internet is absolutely infeasible. | 282 | ICMP in the real Internet is absolutely infeasible. |
283 | */ | 283 | */ |
284 | struct iphdr *iph = (struct iphdr*)skb->data; | 284 | struct iphdr *iph = (struct iphdr *)skb->data; |
285 | const int type = icmp_hdr(skb)->type; | 285 | const int type = icmp_hdr(skb)->type; |
286 | const int code = icmp_hdr(skb)->code; | 286 | const int code = icmp_hdr(skb)->code; |
287 | struct ip_tunnel *t; | 287 | struct ip_tunnel *t; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b42e082cc170..05ed336f798a 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -331,7 +331,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
331 | 331 | ||
332 | atomic_dec(&cache_resolve_queue_len); | 332 | atomic_dec(&cache_resolve_queue_len); |
333 | 333 | ||
334 | while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { | 334 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { |
335 | if (ip_hdr(skb)->version == 0) { | 335 | if (ip_hdr(skb)->version == 0) { |
336 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); | 336 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); |
337 | nlh->nlmsg_type = NLMSG_ERROR; | 337 | nlh->nlmsg_type = NLMSG_ERROR; |
@@ -477,13 +477,13 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
477 | /* | 477 | /* |
478 | * Fill in the VIF structures | 478 | * Fill in the VIF structures |
479 | */ | 479 | */ |
480 | v->rate_limit=vifc->vifc_rate_limit; | 480 | v->rate_limit = vifc->vifc_rate_limit; |
481 | v->local=vifc->vifc_lcl_addr.s_addr; | 481 | v->local = vifc->vifc_lcl_addr.s_addr; |
482 | v->remote=vifc->vifc_rmt_addr.s_addr; | 482 | v->remote = vifc->vifc_rmt_addr.s_addr; |
483 | v->flags=vifc->vifc_flags; | 483 | v->flags = vifc->vifc_flags; |
484 | if (!mrtsock) | 484 | if (!mrtsock) |
485 | v->flags |= VIFF_STATIC; | 485 | v->flags |= VIFF_STATIC; |
486 | v->threshold=vifc->vifc_threshold; | 486 | v->threshold = vifc->vifc_threshold; |
487 | v->bytes_in = 0; | 487 | v->bytes_in = 0; |
488 | v->bytes_out = 0; | 488 | v->bytes_out = 0; |
489 | v->pkt_in = 0; | 489 | v->pkt_in = 0; |
@@ -494,7 +494,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
494 | 494 | ||
495 | /* And finish update writing critical data */ | 495 | /* And finish update writing critical data */ |
496 | write_lock_bh(&mrt_lock); | 496 | write_lock_bh(&mrt_lock); |
497 | v->dev=dev; | 497 | v->dev = dev; |
498 | #ifdef CONFIG_IP_PIMSM | 498 | #ifdef CONFIG_IP_PIMSM |
499 | if (v->flags&VIFF_REGISTER) | 499 | if (v->flags&VIFF_REGISTER) |
500 | reg_vif_num = vifi; | 500 | reg_vif_num = vifi; |
@@ -507,7 +507,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
507 | 507 | ||
508 | static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) | 508 | static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) |
509 | { | 509 | { |
510 | int line=MFC_HASH(mcastgrp,origin); | 510 | int line = MFC_HASH(mcastgrp, origin); |
511 | struct mfc_cache *c; | 511 | struct mfc_cache *c; |
512 | 512 | ||
513 | for (c=mfc_cache_array[line]; c; c = c->next) { | 513 | for (c=mfc_cache_array[line]; c; c = c->next) { |
@@ -522,8 +522,8 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) | |||
522 | */ | 522 | */ |
523 | static struct mfc_cache *ipmr_cache_alloc(void) | 523 | static struct mfc_cache *ipmr_cache_alloc(void) |
524 | { | 524 | { |
525 | struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 525 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
526 | if (c==NULL) | 526 | if (c == NULL) |
527 | return NULL; | 527 | return NULL; |
528 | c->mfc_un.res.minvif = MAXVIFS; | 528 | c->mfc_un.res.minvif = MAXVIFS; |
529 | return c; | 529 | return c; |
@@ -531,8 +531,8 @@ static struct mfc_cache *ipmr_cache_alloc(void) | |||
531 | 531 | ||
532 | static struct mfc_cache *ipmr_cache_alloc_unres(void) | 532 | static struct mfc_cache *ipmr_cache_alloc_unres(void) |
533 | { | 533 | { |
534 | struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 534 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
535 | if (c==NULL) | 535 | if (c == NULL) |
536 | return NULL; | 536 | return NULL; |
537 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 537 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
538 | c->mfc_un.unres.expires = jiffies + 10*HZ; | 538 | c->mfc_un.unres.expires = jiffies + 10*HZ; |
@@ -552,7 +552,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
552 | * Play the pending entries through our router | 552 | * Play the pending entries through our router |
553 | */ | 553 | */ |
554 | 554 | ||
555 | while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { | 555 | while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { |
556 | if (ip_hdr(skb)->version == 0) { | 556 | if (ip_hdr(skb)->version == 0) { |
557 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); | 557 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); |
558 | 558 | ||
@@ -637,7 +637,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
637 | * Add our header | 637 | * Add our header |
638 | */ | 638 | */ |
639 | 639 | ||
640 | igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); | 640 | igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); |
641 | igmp->type = | 641 | igmp->type = |
642 | msg->im_msgtype = assert; | 642 | msg->im_msgtype = assert; |
643 | igmp->code = 0; | 643 | igmp->code = 0; |
@@ -653,7 +653,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
653 | /* | 653 | /* |
654 | * Deliver to mrouted | 654 | * Deliver to mrouted |
655 | */ | 655 | */ |
656 | if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) { | 656 | if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) { |
657 | if (net_ratelimit()) | 657 | if (net_ratelimit()) |
658 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); | 658 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); |
659 | kfree_skb(skb); | 659 | kfree_skb(skb); |
@@ -685,7 +685,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
685 | * Create a new entry if allowable | 685 | * Create a new entry if allowable |
686 | */ | 686 | */ |
687 | 687 | ||
688 | if (atomic_read(&cache_resolve_queue_len)>=10 || | 688 | if (atomic_read(&cache_resolve_queue_len) >= 10 || |
689 | (c=ipmr_cache_alloc_unres())==NULL) { | 689 | (c=ipmr_cache_alloc_unres())==NULL) { |
690 | spin_unlock_bh(&mfc_unres_lock); | 690 | spin_unlock_bh(&mfc_unres_lock); |
691 | 691 | ||
@@ -728,7 +728,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
728 | kfree_skb(skb); | 728 | kfree_skb(skb); |
729 | err = -ENOBUFS; | 729 | err = -ENOBUFS; |
730 | } else { | 730 | } else { |
731 | skb_queue_tail(&c->mfc_un.unres.unresolved,skb); | 731 | skb_queue_tail(&c->mfc_un.unres.unresolved, skb); |
732 | err = 0; | 732 | err = 0; |
733 | } | 733 | } |
734 | 734 | ||
@@ -745,7 +745,7 @@ static int ipmr_mfc_delete(struct mfcctl *mfc) | |||
745 | int line; | 745 | int line; |
746 | struct mfc_cache *c, **cp; | 746 | struct mfc_cache *c, **cp; |
747 | 747 | ||
748 | line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 748 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
749 | 749 | ||
750 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { | 750 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { |
751 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 751 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
@@ -766,7 +766,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
766 | int line; | 766 | int line; |
767 | struct mfc_cache *uc, *c, **cp; | 767 | struct mfc_cache *uc, *c, **cp; |
768 | 768 | ||
769 | line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 769 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
770 | 770 | ||
771 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { | 771 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { |
772 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 772 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
@@ -787,13 +787,13 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
787 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) | 787 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) |
788 | return -EINVAL; | 788 | return -EINVAL; |
789 | 789 | ||
790 | c=ipmr_cache_alloc(); | 790 | c = ipmr_cache_alloc(); |
791 | if (c==NULL) | 791 | if (c == NULL) |
792 | return -ENOMEM; | 792 | return -ENOMEM; |
793 | 793 | ||
794 | c->mfc_origin=mfc->mfcc_origin.s_addr; | 794 | c->mfc_origin = mfc->mfcc_origin.s_addr; |
795 | c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr; | 795 | c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; |
796 | c->mfc_parent=mfc->mfcc_parent; | 796 | c->mfc_parent = mfc->mfcc_parent; |
797 | ipmr_update_thresholds(c, mfc->mfcc_ttls); | 797 | ipmr_update_thresholds(c, mfc->mfcc_ttls); |
798 | if (!mrtsock) | 798 | if (!mrtsock) |
799 | c->mfc_flags |= MFC_STATIC; | 799 | c->mfc_flags |= MFC_STATIC; |
@@ -846,7 +846,7 @@ static void mroute_clean_tables(struct sock *sk) | |||
846 | /* | 846 | /* |
847 | * Wipe the cache | 847 | * Wipe the cache |
848 | */ | 848 | */ |
849 | for (i=0;i<MFC_LINES;i++) { | 849 | for (i=0; i<MFC_LINES; i++) { |
850 | struct mfc_cache *c, **cp; | 850 | struct mfc_cache *c, **cp; |
851 | 851 | ||
852 | cp = &mfc_cache_array[i]; | 852 | cp = &mfc_cache_array[i]; |
@@ -887,7 +887,7 @@ static void mrtsock_destruct(struct sock *sk) | |||
887 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; | 887 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; |
888 | 888 | ||
889 | write_lock_bh(&mrt_lock); | 889 | write_lock_bh(&mrt_lock); |
890 | mroute_socket=NULL; | 890 | mroute_socket = NULL; |
891 | write_unlock_bh(&mrt_lock); | 891 | write_unlock_bh(&mrt_lock); |
892 | 892 | ||
893 | mroute_clean_tables(sk); | 893 | mroute_clean_tables(sk); |
@@ -902,7 +902,7 @@ static void mrtsock_destruct(struct sock *sk) | |||
902 | * MOSPF/PIM router set up we can clean this up. | 902 | * MOSPF/PIM router set up we can clean this up. |
903 | */ | 903 | */ |
904 | 904 | ||
905 | int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) | 905 | int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) |
906 | { | 906 | { |
907 | int ret; | 907 | int ret; |
908 | struct vifctl vif; | 908 | struct vifctl vif; |
@@ -918,7 +918,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
918 | if (sk->sk_type != SOCK_RAW || | 918 | if (sk->sk_type != SOCK_RAW || |
919 | inet_sk(sk)->num != IPPROTO_IGMP) | 919 | inet_sk(sk)->num != IPPROTO_IGMP) |
920 | return -EOPNOTSUPP; | 920 | return -EOPNOTSUPP; |
921 | if (optlen!=sizeof(int)) | 921 | if (optlen != sizeof(int)) |
922 | return -ENOPROTOOPT; | 922 | return -ENOPROTOOPT; |
923 | 923 | ||
924 | rtnl_lock(); | 924 | rtnl_lock(); |
@@ -930,7 +930,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
930 | ret = ip_ra_control(sk, 1, mrtsock_destruct); | 930 | ret = ip_ra_control(sk, 1, mrtsock_destruct); |
931 | if (ret == 0) { | 931 | if (ret == 0) { |
932 | write_lock_bh(&mrt_lock); | 932 | write_lock_bh(&mrt_lock); |
933 | mroute_socket=sk; | 933 | mroute_socket = sk; |
934 | write_unlock_bh(&mrt_lock); | 934 | write_unlock_bh(&mrt_lock); |
935 | 935 | ||
936 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; | 936 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; |
@@ -938,19 +938,19 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
938 | rtnl_unlock(); | 938 | rtnl_unlock(); |
939 | return ret; | 939 | return ret; |
940 | case MRT_DONE: | 940 | case MRT_DONE: |
941 | if (sk!=mroute_socket) | 941 | if (sk != mroute_socket) |
942 | return -EACCES; | 942 | return -EACCES; |
943 | return ip_ra_control(sk, 0, NULL); | 943 | return ip_ra_control(sk, 0, NULL); |
944 | case MRT_ADD_VIF: | 944 | case MRT_ADD_VIF: |
945 | case MRT_DEL_VIF: | 945 | case MRT_DEL_VIF: |
946 | if (optlen!=sizeof(vif)) | 946 | if (optlen != sizeof(vif)) |
947 | return -EINVAL; | 947 | return -EINVAL; |
948 | if (copy_from_user(&vif,optval,sizeof(vif))) | 948 | if (copy_from_user(&vif, optval, sizeof(vif))) |
949 | return -EFAULT; | 949 | return -EFAULT; |
950 | if (vif.vifc_vifi >= MAXVIFS) | 950 | if (vif.vifc_vifi >= MAXVIFS) |
951 | return -ENFILE; | 951 | return -ENFILE; |
952 | rtnl_lock(); | 952 | rtnl_lock(); |
953 | if (optname==MRT_ADD_VIF) { | 953 | if (optname == MRT_ADD_VIF) { |
954 | ret = vif_add(&vif, sk==mroute_socket); | 954 | ret = vif_add(&vif, sk==mroute_socket); |
955 | } else { | 955 | } else { |
956 | ret = vif_delete(vif.vifc_vifi, 0); | 956 | ret = vif_delete(vif.vifc_vifi, 0); |
@@ -964,12 +964,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
964 | */ | 964 | */ |
965 | case MRT_ADD_MFC: | 965 | case MRT_ADD_MFC: |
966 | case MRT_DEL_MFC: | 966 | case MRT_DEL_MFC: |
967 | if (optlen!=sizeof(mfc)) | 967 | if (optlen != sizeof(mfc)) |
968 | return -EINVAL; | 968 | return -EINVAL; |
969 | if (copy_from_user(&mfc,optval, sizeof(mfc))) | 969 | if (copy_from_user(&mfc, optval, sizeof(mfc))) |
970 | return -EFAULT; | 970 | return -EFAULT; |
971 | rtnl_lock(); | 971 | rtnl_lock(); |
972 | if (optname==MRT_DEL_MFC) | 972 | if (optname == MRT_DEL_MFC) |
973 | ret = ipmr_mfc_delete(&mfc); | 973 | ret = ipmr_mfc_delete(&mfc); |
974 | else | 974 | else |
975 | ret = ipmr_mfc_add(&mfc, sk==mroute_socket); | 975 | ret = ipmr_mfc_add(&mfc, sk==mroute_socket); |
@@ -1028,12 +1028,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
1028 | * Getsock opt support for the multicast routing system. | 1028 | * Getsock opt support for the multicast routing system. |
1029 | */ | 1029 | */ |
1030 | 1030 | ||
1031 | int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) | 1031 | int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) |
1032 | { | 1032 | { |
1033 | int olr; | 1033 | int olr; |
1034 | int val; | 1034 | int val; |
1035 | 1035 | ||
1036 | if (optname!=MRT_VERSION && | 1036 | if (optname != MRT_VERSION && |
1037 | #ifdef CONFIG_IP_PIMSM | 1037 | #ifdef CONFIG_IP_PIMSM |
1038 | optname!=MRT_PIM && | 1038 | optname!=MRT_PIM && |
1039 | #endif | 1039 | #endif |
@@ -1047,17 +1047,17 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u | |||
1047 | if (olr < 0) | 1047 | if (olr < 0) |
1048 | return -EINVAL; | 1048 | return -EINVAL; |
1049 | 1049 | ||
1050 | if (put_user(olr,optlen)) | 1050 | if (put_user(olr, optlen)) |
1051 | return -EFAULT; | 1051 | return -EFAULT; |
1052 | if (optname==MRT_VERSION) | 1052 | if (optname == MRT_VERSION) |
1053 | val=0x0305; | 1053 | val = 0x0305; |
1054 | #ifdef CONFIG_IP_PIMSM | 1054 | #ifdef CONFIG_IP_PIMSM |
1055 | else if (optname==MRT_PIM) | 1055 | else if (optname == MRT_PIM) |
1056 | val=mroute_do_pim; | 1056 | val = mroute_do_pim; |
1057 | #endif | 1057 | #endif |
1058 | else | 1058 | else |
1059 | val=mroute_do_assert; | 1059 | val = mroute_do_assert; |
1060 | if (copy_to_user(optval,&val,olr)) | 1060 | if (copy_to_user(optval, &val, olr)) |
1061 | return -EFAULT; | 1061 | return -EFAULT; |
1062 | return 0; | 1062 | return 0; |
1063 | } | 1063 | } |
@@ -1075,27 +1075,27 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1075 | 1075 | ||
1076 | switch (cmd) { | 1076 | switch (cmd) { |
1077 | case SIOCGETVIFCNT: | 1077 | case SIOCGETVIFCNT: |
1078 | if (copy_from_user(&vr,arg,sizeof(vr))) | 1078 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1079 | return -EFAULT; | 1079 | return -EFAULT; |
1080 | if (vr.vifi>=maxvif) | 1080 | if (vr.vifi >= maxvif) |
1081 | return -EINVAL; | 1081 | return -EINVAL; |
1082 | read_lock(&mrt_lock); | 1082 | read_lock(&mrt_lock); |
1083 | vif=&vif_table[vr.vifi]; | 1083 | vif=&vif_table[vr.vifi]; |
1084 | if (VIF_EXISTS(vr.vifi)) { | 1084 | if (VIF_EXISTS(vr.vifi)) { |
1085 | vr.icount=vif->pkt_in; | 1085 | vr.icount = vif->pkt_in; |
1086 | vr.ocount=vif->pkt_out; | 1086 | vr.ocount = vif->pkt_out; |
1087 | vr.ibytes=vif->bytes_in; | 1087 | vr.ibytes = vif->bytes_in; |
1088 | vr.obytes=vif->bytes_out; | 1088 | vr.obytes = vif->bytes_out; |
1089 | read_unlock(&mrt_lock); | 1089 | read_unlock(&mrt_lock); |
1090 | 1090 | ||
1091 | if (copy_to_user(arg,&vr,sizeof(vr))) | 1091 | if (copy_to_user(arg, &vr, sizeof(vr))) |
1092 | return -EFAULT; | 1092 | return -EFAULT; |
1093 | return 0; | 1093 | return 0; |
1094 | } | 1094 | } |
1095 | read_unlock(&mrt_lock); | 1095 | read_unlock(&mrt_lock); |
1096 | return -EADDRNOTAVAIL; | 1096 | return -EADDRNOTAVAIL; |
1097 | case SIOCGETSGCNT: | 1097 | case SIOCGETSGCNT: |
1098 | if (copy_from_user(&sr,arg,sizeof(sr))) | 1098 | if (copy_from_user(&sr, arg, sizeof(sr))) |
1099 | return -EFAULT; | 1099 | return -EFAULT; |
1100 | 1100 | ||
1101 | read_lock(&mrt_lock); | 1101 | read_lock(&mrt_lock); |
@@ -1106,7 +1106,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1106 | sr.wrong_if = c->mfc_un.res.wrong_if; | 1106 | sr.wrong_if = c->mfc_un.res.wrong_if; |
1107 | read_unlock(&mrt_lock); | 1107 | read_unlock(&mrt_lock); |
1108 | 1108 | ||
1109 | if (copy_to_user(arg,&sr,sizeof(sr))) | 1109 | if (copy_to_user(arg, &sr, sizeof(sr))) |
1110 | return -EFAULT; | 1110 | return -EFAULT; |
1111 | return 0; | 1111 | return 0; |
1112 | } | 1112 | } |
@@ -1130,15 +1130,15 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1130 | if (event != NETDEV_UNREGISTER) | 1130 | if (event != NETDEV_UNREGISTER) |
1131 | return NOTIFY_DONE; | 1131 | return NOTIFY_DONE; |
1132 | v=&vif_table[0]; | 1132 | v=&vif_table[0]; |
1133 | for (ct=0;ct<maxvif;ct++,v++) { | 1133 | for (ct=0; ct<maxvif; ct++,v++) { |
1134 | if (v->dev==dev) | 1134 | if (v->dev == dev) |
1135 | vif_delete(ct, 1); | 1135 | vif_delete(ct, 1); |
1136 | } | 1136 | } |
1137 | return NOTIFY_DONE; | 1137 | return NOTIFY_DONE; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | 1140 | ||
1141 | static struct notifier_block ip_mr_notifier={ | 1141 | static struct notifier_block ip_mr_notifier = { |
1142 | .notifier_call = ipmr_device_event, | 1142 | .notifier_call = ipmr_device_event, |
1143 | }; | 1143 | }; |
1144 | 1144 | ||
@@ -1204,7 +1204,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1204 | #ifdef CONFIG_IP_PIMSM | 1204 | #ifdef CONFIG_IP_PIMSM |
1205 | if (vif->flags & VIFF_REGISTER) { | 1205 | if (vif->flags & VIFF_REGISTER) { |
1206 | vif->pkt_out++; | 1206 | vif->pkt_out++; |
1207 | vif->bytes_out+=skb->len; | 1207 | vif->bytes_out += skb->len; |
1208 | vif->dev->stats.tx_bytes += skb->len; | 1208 | vif->dev->stats.tx_bytes += skb->len; |
1209 | vif->dev->stats.tx_packets++; | 1209 | vif->dev->stats.tx_packets++; |
1210 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); | 1210 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); |
@@ -1254,7 +1254,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | vif->pkt_out++; | 1256 | vif->pkt_out++; |
1257 | vif->bytes_out+=skb->len; | 1257 | vif->bytes_out += skb->len; |
1258 | 1258 | ||
1259 | dst_release(skb->dst); | 1259 | dst_release(skb->dst); |
1260 | skb->dst = &rt->u.dst; | 1260 | skb->dst = &rt->u.dst; |
@@ -1352,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | vif_table[vif].pkt_in++; | 1354 | vif_table[vif].pkt_in++; |
1355 | vif_table[vif].bytes_in+=skb->len; | 1355 | vif_table[vif].bytes_in += skb->len; |
1356 | 1356 | ||
1357 | /* | 1357 | /* |
1358 | * Forward the frame | 1358 | * Forward the frame |
@@ -1364,7 +1364,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1364 | if (skb2) | 1364 | if (skb2) |
1365 | ipmr_queue_xmit(skb2, cache, psend); | 1365 | ipmr_queue_xmit(skb2, cache, psend); |
1366 | } | 1366 | } |
1367 | psend=ct; | 1367 | psend = ct; |
1368 | } | 1368 | } |
1369 | } | 1369 | } |
1370 | if (psend != -1) { | 1370 | if (psend != -1) { |
@@ -1428,7 +1428,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1428 | /* | 1428 | /* |
1429 | * No usable cache entry | 1429 | * No usable cache entry |
1430 | */ | 1430 | */ |
1431 | if (cache==NULL) { | 1431 | if (cache == NULL) { |
1432 | int vif; | 1432 | int vif; |
1433 | 1433 | ||
1434 | if (local) { | 1434 | if (local) { |
@@ -1602,13 +1602,13 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1602 | if (dev) | 1602 | if (dev) |
1603 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1603 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); |
1604 | 1604 | ||
1605 | mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0)); | 1605 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1606 | 1606 | ||
1607 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1607 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1608 | if (c->mfc_un.res.ttls[ct] < 255) { | 1608 | if (c->mfc_un.res.ttls[ct] < 255) { |
1609 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1609 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1610 | goto rtattr_failure; | 1610 | goto rtattr_failure; |
1611 | nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1611 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1612 | nhp->rtnh_flags = 0; | 1612 | nhp->rtnh_flags = 0; |
1613 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 1613 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1614 | nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; | 1614 | nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; |
@@ -1634,7 +1634,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1634 | read_lock(&mrt_lock); | 1634 | read_lock(&mrt_lock); |
1635 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); | 1635 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); |
1636 | 1636 | ||
1637 | if (cache==NULL) { | 1637 | if (cache == NULL) { |
1638 | struct sk_buff *skb2; | 1638 | struct sk_buff *skb2; |
1639 | struct iphdr *iph; | 1639 | struct iphdr *iph; |
1640 | struct net_device *dev; | 1640 | struct net_device *dev; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 8d70d29f1ccf..7ea88b61cb0d 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -142,15 +142,15 @@ static inline int arp_packet_match(const struct arphdr *arphdr, | |||
142 | ARPT_INV_TGTIP)) { | 142 | ARPT_INV_TGTIP)) { |
143 | dprintf("Source or target IP address mismatch.\n"); | 143 | dprintf("Source or target IP address mismatch.\n"); |
144 | 144 | ||
145 | dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n", | 145 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
146 | NIPQUAD(src_ipaddr), | 146 | &src_ipaddr, |
147 | NIPQUAD(arpinfo->smsk.s_addr), | 147 | &arpinfo->smsk.s_addr, |
148 | NIPQUAD(arpinfo->src.s_addr), | 148 | &arpinfo->src.s_addr, |
149 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); | 149 | arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); |
150 | dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n", | 150 | dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
151 | NIPQUAD(tgt_ipaddr), | 151 | &tgt_ipaddr, |
152 | NIPQUAD(arpinfo->tmsk.s_addr), | 152 | &arpinfo->tmsk.s_addr, |
153 | NIPQUAD(arpinfo->tgt.s_addr), | 153 | &arpinfo->tgt.s_addr, |
154 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); | 154 | arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); |
155 | return 0; | 155 | return 0; |
156 | } | 156 | } |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 213fb27debc1..ef8b6ca068b2 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -94,15 +94,11 @@ ip_packet_match(const struct iphdr *ip, | |||
94 | IPT_INV_DSTIP)) { | 94 | IPT_INV_DSTIP)) { |
95 | dprintf("Source or dest mismatch.\n"); | 95 | dprintf("Source or dest mismatch.\n"); |
96 | 96 | ||
97 | dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n", | 97 | dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", |
98 | NIPQUAD(ip->saddr), | 98 | &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, |
99 | NIPQUAD(ipinfo->smsk.s_addr), | ||
100 | NIPQUAD(ipinfo->src.s_addr), | ||
101 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); | 99 | ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); |
102 | dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n", | 100 | dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", |
103 | NIPQUAD(ip->daddr), | 101 | &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, |
104 | NIPQUAD(ipinfo->dmsk.s_addr), | ||
105 | NIPQUAD(ipinfo->dst.s_addr), | ||
106 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); | 102 | ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); |
107 | return false; | 103 | return false; |
108 | } | 104 | } |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7ac1677419a9..2e4f98b85524 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -168,7 +168,7 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
168 | char buffer[16]; | 168 | char buffer[16]; |
169 | 169 | ||
170 | /* create proc dir entry */ | 170 | /* create proc dir entry */ |
171 | sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); | 171 | sprintf(buffer, "%pI4", &ip); |
172 | c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR, | 172 | c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR, |
173 | clusterip_procdir, | 173 | clusterip_procdir, |
174 | &clusterip_proc_fops, c); | 174 | &clusterip_proc_fops, c); |
@@ -373,7 +373,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) | |||
373 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); | 373 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); |
374 | if (!config) { | 374 | if (!config) { |
375 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { | 375 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { |
376 | printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr)); | 376 | printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr); |
377 | return false; | 377 | return false; |
378 | } else { | 378 | } else { |
379 | struct net_device *dev; | 379 | struct net_device *dev; |
@@ -478,9 +478,8 @@ static void arp_print(struct arp_payload *payload) | |||
478 | } | 478 | } |
479 | hbuffer[--k]='\0'; | 479 | hbuffer[--k]='\0'; |
480 | 480 | ||
481 | printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n", | 481 | printk("src %pI4@%s, dst %pI4\n", |
482 | NIPQUAD(payload->src_ip), hbuffer, | 482 | &payload->src_ip, hbuffer, &payload->dst_ip); |
483 | NIPQUAD(payload->dst_ip)); | ||
484 | } | 483 | } |
485 | #endif | 484 | #endif |
486 | 485 | ||
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index fc6ce04a3e35..4614a696f1b0 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -54,8 +54,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
54 | /* Important fields: | 54 | /* Important fields: |
55 | * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */ | 55 | * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */ |
56 | /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ | 56 | /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ |
57 | printk("SRC=%u.%u.%u.%u DST=%u.%u.%u.%u ", | 57 | printk("SRC=%pI4 DST=%pI4 ", |
58 | NIPQUAD(ih->saddr), NIPQUAD(ih->daddr)); | 58 | &ih->saddr, &ih->daddr); |
59 | 59 | ||
60 | /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ | 60 | /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ |
61 | printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", | 61 | printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", |
@@ -262,8 +262,7 @@ static void dump_packet(const struct nf_loginfo *info, | |||
262 | break; | 262 | break; |
263 | case ICMP_REDIRECT: | 263 | case ICMP_REDIRECT: |
264 | /* Max length: 24 "GATEWAY=255.255.255.255 " */ | 264 | /* Max length: 24 "GATEWAY=255.255.255.255 " */ |
265 | printk("GATEWAY=%u.%u.%u.%u ", | 265 | printk("GATEWAY=%pI4 ", &ich->un.gateway); |
266 | NIPQUAD(ich->un.gateway)); | ||
267 | /* Fall through */ | 266 | /* Fall through */ |
268 | case ICMP_DEST_UNREACH: | 267 | case ICMP_DEST_UNREACH: |
269 | case ICMP_SOURCE_QUENCH: | 268 | case ICMP_SOURCE_QUENCH: |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 4a7c35275396..b2141e11575e 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -60,9 +60,8 @@ static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
60 | static int ipv4_print_tuple(struct seq_file *s, | 60 | static int ipv4_print_tuple(struct seq_file *s, |
61 | const struct nf_conntrack_tuple *tuple) | 61 | const struct nf_conntrack_tuple *tuple) |
62 | { | 62 | { |
63 | return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", | 63 | return seq_printf(s, "src=%pI4 dst=%pI4 ", |
64 | NIPQUAD(tuple->src.u3.ip), | 64 | &tuple->src.u3.ip, &tuple->dst.u3.ip); |
65 | NIPQUAD(tuple->dst.u3.ip)); | ||
66 | } | 65 | } |
67 | 66 | ||
68 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 67 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
@@ -198,7 +197,7 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
198 | .data = &nf_conntrack_max, | 197 | .data = &nf_conntrack_max, |
199 | .maxlen = sizeof(int), | 198 | .maxlen = sizeof(int), |
200 | .mode = 0644, | 199 | .mode = 0644, |
201 | .proc_handler = &proc_dointvec, | 200 | .proc_handler = proc_dointvec, |
202 | }, | 201 | }, |
203 | { | 202 | { |
204 | .ctl_name = NET_IPV4_NF_CONNTRACK_COUNT, | 203 | .ctl_name = NET_IPV4_NF_CONNTRACK_COUNT, |
@@ -206,7 +205,7 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
206 | .data = &init_net.ct.count, | 205 | .data = &init_net.ct.count, |
207 | .maxlen = sizeof(int), | 206 | .maxlen = sizeof(int), |
208 | .mode = 0444, | 207 | .mode = 0444, |
209 | .proc_handler = &proc_dointvec, | 208 | .proc_handler = proc_dointvec, |
210 | }, | 209 | }, |
211 | { | 210 | { |
212 | .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, | 211 | .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, |
@@ -214,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
214 | .data = &nf_conntrack_htable_size, | 213 | .data = &nf_conntrack_htable_size, |
215 | .maxlen = sizeof(unsigned int), | 214 | .maxlen = sizeof(unsigned int), |
216 | .mode = 0444, | 215 | .mode = 0444, |
217 | .proc_handler = &proc_dointvec, | 216 | .proc_handler = proc_dointvec, |
218 | }, | 217 | }, |
219 | { | 218 | { |
220 | .ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM, | 219 | .ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM, |
@@ -222,7 +221,7 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
222 | .data = &init_net.ct.sysctl_checksum, | 221 | .data = &init_net.ct.sysctl_checksum, |
223 | .maxlen = sizeof(int), | 222 | .maxlen = sizeof(int), |
224 | .mode = 0644, | 223 | .mode = 0644, |
225 | .proc_handler = &proc_dointvec, | 224 | .proc_handler = proc_dointvec, |
226 | }, | 225 | }, |
227 | { | 226 | { |
228 | .ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID, | 227 | .ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID, |
@@ -230,8 +229,8 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
230 | .data = &init_net.ct.sysctl_log_invalid, | 229 | .data = &init_net.ct.sysctl_log_invalid, |
231 | .maxlen = sizeof(unsigned int), | 230 | .maxlen = sizeof(unsigned int), |
232 | .mode = 0644, | 231 | .mode = 0644, |
233 | .proc_handler = &proc_dointvec_minmax, | 232 | .proc_handler = proc_dointvec_minmax, |
234 | .strategy = &sysctl_intvec, | 233 | .strategy = sysctl_intvec, |
235 | .extra1 = &log_invalid_proto_min, | 234 | .extra1 = &log_invalid_proto_min, |
236 | .extra2 = &log_invalid_proto_max, | 235 | .extra2 = &log_invalid_proto_max, |
237 | }, | 236 | }, |
@@ -284,17 +283,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
284 | .tuple.dst.u3.ip; | 283 | .tuple.dst.u3.ip; |
285 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); | 284 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); |
286 | 285 | ||
287 | pr_debug("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", | 286 | pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", |
288 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | 287 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); |
289 | nf_ct_put(ct); | 288 | nf_ct_put(ct); |
290 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) | 289 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) |
291 | return -EFAULT; | 290 | return -EFAULT; |
292 | else | 291 | else |
293 | return 0; | 292 | return 0; |
294 | } | 293 | } |
295 | pr_debug("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", | 294 | pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", |
296 | NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port), | 295 | &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), |
297 | NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port)); | 296 | &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); |
298 | return -ENOENT; | 297 | return -ENOENT; |
299 | } | 298 | } |
300 | 299 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 4e8879220222..1fd3ef7718b6 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -272,7 +272,7 @@ static struct ctl_table icmp_sysctl_table[] = { | |||
272 | .data = &nf_ct_icmp_timeout, | 272 | .data = &nf_ct_icmp_timeout, |
273 | .maxlen = sizeof(unsigned int), | 273 | .maxlen = sizeof(unsigned int), |
274 | .mode = 0644, | 274 | .mode = 0644, |
275 | .proc_handler = &proc_dointvec_jiffies, | 275 | .proc_handler = proc_dointvec_jiffies, |
276 | }, | 276 | }, |
277 | { | 277 | { |
278 | .ctl_name = 0 | 278 | .ctl_name = 0 |
@@ -285,7 +285,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = { | |||
285 | .data = &nf_ct_icmp_timeout, | 285 | .data = &nf_ct_icmp_timeout, |
286 | .maxlen = sizeof(unsigned int), | 286 | .maxlen = sizeof(unsigned int), |
287 | .mode = 0644, | 287 | .mode = 0644, |
288 | .proc_handler = &proc_dointvec_jiffies, | 288 | .proc_handler = proc_dointvec_jiffies, |
289 | }, | 289 | }, |
290 | { | 290 | { |
291 | .ctl_name = 0 | 291 | .ctl_name = 0 |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index ee47bf28c825..7e8e6fc75413 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -119,10 +119,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, | |||
119 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) | 119 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) |
120 | i = 0; | 120 | i = 0; |
121 | 121 | ||
122 | pr_debug("nf_nat_ras: set signal address " | 122 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", |
123 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 123 | &addr.ip, port, |
124 | NIPQUAD(addr.ip), port, | 124 | &ct->tuplehash[!dir].tuple.dst.u3.ip, |
125 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | ||
126 | info->sig_port[!dir]); | 125 | info->sig_port[!dir]); |
127 | return set_h225_addr(skb, data, 0, &taddr[i], | 126 | return set_h225_addr(skb, data, 0, &taddr[i], |
128 | &ct->tuplehash[!dir]. | 127 | &ct->tuplehash[!dir]. |
@@ -131,10 +130,9 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, | |||
131 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | 130 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && |
132 | port == info->sig_port[dir]) { | 131 | port == info->sig_port[dir]) { |
133 | /* GK->GW */ | 132 | /* GK->GW */ |
134 | pr_debug("nf_nat_ras: set signal address " | 133 | pr_debug("nf_nat_ras: set signal address %pI4:%hu->%pI4:%hu\n", |
135 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 134 | &addr.ip, port, |
136 | NIPQUAD(addr.ip), port, | 135 | &ct->tuplehash[!dir].tuple.src.u3.ip, |
137 | NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip), | ||
138 | info->sig_port[!dir]); | 136 | info->sig_port[!dir]); |
139 | return set_h225_addr(skb, data, 0, &taddr[i], | 137 | return set_h225_addr(skb, data, 0, &taddr[i], |
140 | &ct->tuplehash[!dir]. | 138 | &ct->tuplehash[!dir]. |
@@ -162,10 +160,9 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, | |||
162 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | 160 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && |
163 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 161 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
164 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { | 162 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { |
165 | pr_debug("nf_nat_ras: set rasAddress " | 163 | pr_debug("nf_nat_ras: set rasAddress %pI4:%hu->%pI4:%hu\n", |
166 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 164 | &addr.ip, ntohs(port), |
167 | NIPQUAD(addr.ip), ntohs(port), | 165 | &ct->tuplehash[!dir].tuple.dst.u3.ip, |
168 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | ||
169 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); | 166 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); |
170 | return set_h225_addr(skb, data, 0, &taddr[i], | 167 | return set_h225_addr(skb, data, 0, &taddr[i], |
171 | &ct->tuplehash[!dir].tuple.dst.u3, | 168 | &ct->tuplehash[!dir].tuple.dst.u3, |
@@ -257,15 +254,15 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
257 | } | 254 | } |
258 | 255 | ||
259 | /* Success */ | 256 | /* Success */ |
260 | pr_debug("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 257 | pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", |
261 | NIPQUAD(rtp_exp->tuple.src.u3.ip), | 258 | &rtp_exp->tuple.src.u3.ip, |
262 | ntohs(rtp_exp->tuple.src.u.udp.port), | 259 | ntohs(rtp_exp->tuple.src.u.udp.port), |
263 | NIPQUAD(rtp_exp->tuple.dst.u3.ip), | 260 | &rtp_exp->tuple.dst.u3.ip, |
264 | ntohs(rtp_exp->tuple.dst.u.udp.port)); | 261 | ntohs(rtp_exp->tuple.dst.u.udp.port)); |
265 | pr_debug("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 262 | pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n", |
266 | NIPQUAD(rtcp_exp->tuple.src.u3.ip), | 263 | &rtcp_exp->tuple.src.u3.ip, |
267 | ntohs(rtcp_exp->tuple.src.u.udp.port), | 264 | ntohs(rtcp_exp->tuple.src.u.udp.port), |
268 | NIPQUAD(rtcp_exp->tuple.dst.u3.ip), | 265 | &rtcp_exp->tuple.dst.u3.ip, |
269 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); | 266 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); |
270 | 267 | ||
271 | return 0; | 268 | return 0; |
@@ -307,10 +304,10 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, | |||
307 | return -1; | 304 | return -1; |
308 | } | 305 | } |
309 | 306 | ||
310 | pr_debug("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 307 | pr_debug("nf_nat_h323: expect T.120 %pI4:%hu->%pI4:%hu\n", |
311 | NIPQUAD(exp->tuple.src.u3.ip), | 308 | &exp->tuple.src.u3.ip, |
312 | ntohs(exp->tuple.src.u.tcp.port), | 309 | ntohs(exp->tuple.src.u.tcp.port), |
313 | NIPQUAD(exp->tuple.dst.u3.ip), | 310 | &exp->tuple.dst.u3.ip, |
314 | ntohs(exp->tuple.dst.u.tcp.port)); | 311 | ntohs(exp->tuple.dst.u.tcp.port)); |
315 | 312 | ||
316 | return 0; | 313 | return 0; |
@@ -361,10 +358,10 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, | |||
361 | return -1; | 358 | return -1; |
362 | } | 359 | } |
363 | 360 | ||
364 | pr_debug("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 361 | pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", |
365 | NIPQUAD(exp->tuple.src.u3.ip), | 362 | &exp->tuple.src.u3.ip, |
366 | ntohs(exp->tuple.src.u.tcp.port), | 363 | ntohs(exp->tuple.src.u.tcp.port), |
367 | NIPQUAD(exp->tuple.dst.u3.ip), | 364 | &exp->tuple.dst.u3.ip, |
368 | ntohs(exp->tuple.dst.u.tcp.port)); | 365 | ntohs(exp->tuple.dst.u.tcp.port)); |
369 | 366 | ||
370 | return 0; | 367 | return 0; |
@@ -455,10 +452,10 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, | |||
455 | } | 452 | } |
456 | 453 | ||
457 | /* Success */ | 454 | /* Success */ |
458 | pr_debug("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 455 | pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", |
459 | NIPQUAD(exp->tuple.src.u3.ip), | 456 | &exp->tuple.src.u3.ip, |
460 | ntohs(exp->tuple.src.u.tcp.port), | 457 | ntohs(exp->tuple.src.u.tcp.port), |
461 | NIPQUAD(exp->tuple.dst.u3.ip), | 458 | &exp->tuple.dst.u3.ip, |
462 | ntohs(exp->tuple.dst.u.tcp.port)); | 459 | ntohs(exp->tuple.dst.u.tcp.port)); |
463 | 460 | ||
464 | return 0; | 461 | return 0; |
@@ -524,11 +521,10 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, | |||
524 | } | 521 | } |
525 | 522 | ||
526 | /* Success */ | 523 | /* Success */ |
527 | pr_debug("nf_nat_q931: expect Call Forwarding " | 524 | pr_debug("nf_nat_q931: expect Call Forwarding %pI4:%hu->%pI4:%hu\n", |
528 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 525 | &exp->tuple.src.u3.ip, |
529 | NIPQUAD(exp->tuple.src.u3.ip), | ||
530 | ntohs(exp->tuple.src.u.tcp.port), | 526 | ntohs(exp->tuple.src.u.tcp.port), |
531 | NIPQUAD(exp->tuple.dst.u3.ip), | 527 | &exp->tuple.dst.u3.ip, |
532 | ntohs(exp->tuple.dst.u.tcp.port)); | 528 | ntohs(exp->tuple.dst.u.tcp.port)); |
533 | 529 | ||
534 | return 0; | 530 | return 0; |
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index fe6f9cef6c85..ea83a886b03e 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c | |||
@@ -55,8 +55,8 @@ static unsigned int help(struct sk_buff *skb, | |||
55 | 55 | ||
56 | ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); | 56 | ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); |
57 | sprintf(buffer, "%u %u", ip, port); | 57 | sprintf(buffer, "%u %u", ip, port); |
58 | pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", | 58 | pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", |
59 | buffer, NIPQUAD(ip), port); | 59 | buffer, &ip, port); |
60 | 60 | ||
61 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, | 61 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, |
62 | matchoff, matchlen, buffer, | 62 | matchoff, matchlen, buffer, |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index bea54a685109..a4f1c3479e23 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -98,8 +98,7 @@ static void warn_if_extra_mangle(struct net *net, __be32 dstip, __be32 srcip) | |||
98 | 98 | ||
99 | if (rt->rt_src != srcip && !warned) { | 99 | if (rt->rt_src != srcip && !warned) { |
100 | printk("NAT: no longer support implicit source local NAT\n"); | 100 | printk("NAT: no longer support implicit source local NAT\n"); |
101 | printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n", | 101 | printk("NAT: packet src %pI4 -> dst %pI4\n", &srcip, &dstip); |
102 | NIPQUAD(srcip), NIPQUAD(dstip)); | ||
103 | warned = 1; | 102 | warned = 1; |
104 | } | 103 | } |
105 | ip_rt_put(rt); | 104 | ip_rt_put(rt); |
@@ -166,8 +165,7 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | |||
166 | struct nf_nat_range range | 165 | struct nf_nat_range range |
167 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; | 166 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; |
168 | 167 | ||
169 | pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n", | 168 | pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, &ip); |
170 | ct, NIPQUAD(ip)); | ||
171 | return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); | 169 | return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); |
172 | } | 170 | } |
173 | 171 | ||
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 14544320c545..07d61a57613c 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -74,8 +74,7 @@ static int map_addr(struct sk_buff *skb, | |||
74 | if (newaddr == addr->ip && newport == port) | 74 | if (newaddr == addr->ip && newport == port) |
75 | return 1; | 75 | return 1; |
76 | 76 | ||
77 | buflen = sprintf(buffer, "%u.%u.%u.%u:%u", | 77 | buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); |
78 | NIPQUAD(newaddr), ntohs(newport)); | ||
79 | 78 | ||
80 | return mangle_packet(skb, dptr, datalen, matchoff, matchlen, | 79 | return mangle_packet(skb, dptr, datalen, matchoff, matchlen, |
81 | buffer, buflen); | 80 | buffer, buflen); |
@@ -152,8 +151,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, | |||
152 | &addr) > 0 && | 151 | &addr) > 0 && |
153 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 152 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
154 | addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { | 153 | addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { |
155 | __be32 ip = ct->tuplehash[!dir].tuple.dst.u3.ip; | 154 | buflen = sprintf(buffer, "%pI4", |
156 | buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); | 155 | &ct->tuplehash[!dir].tuple.dst.u3.ip); |
157 | if (!mangle_packet(skb, dptr, datalen, poff, plen, | 156 | if (!mangle_packet(skb, dptr, datalen, poff, plen, |
158 | buffer, buflen)) | 157 | buffer, buflen)) |
159 | return NF_DROP; | 158 | return NF_DROP; |
@@ -166,8 +165,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, | |||
166 | &addr) > 0 && | 165 | &addr) > 0 && |
167 | addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | 166 | addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && |
168 | addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { | 167 | addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { |
169 | __be32 ip = ct->tuplehash[!dir].tuple.src.u3.ip; | 168 | buflen = sprintf(buffer, "%pI4", |
170 | buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); | 169 | &ct->tuplehash[!dir].tuple.src.u3.ip); |
171 | if (!mangle_packet(skb, dptr, datalen, poff, plen, | 170 | if (!mangle_packet(skb, dptr, datalen, poff, plen, |
172 | buffer, buflen)) | 171 | buffer, buflen)) |
173 | return NF_DROP; | 172 | return NF_DROP; |
@@ -279,8 +278,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, | |||
279 | 278 | ||
280 | if (exp->tuple.dst.u3.ip != exp->saved_ip || | 279 | if (exp->tuple.dst.u3.ip != exp->saved_ip || |
281 | exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { | 280 | exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { |
282 | buflen = sprintf(buffer, "%u.%u.%u.%u:%u", | 281 | buflen = sprintf(buffer, "%pI4:%u", &newip, port); |
283 | NIPQUAD(newip), port); | ||
284 | if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, | 282 | if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, |
285 | buffer, buflen)) | 283 | buffer, buflen)) |
286 | goto err; | 284 | goto err; |
@@ -345,7 +343,7 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, | |||
345 | char buffer[sizeof("nnn.nnn.nnn.nnn")]; | 343 | char buffer[sizeof("nnn.nnn.nnn.nnn")]; |
346 | unsigned int buflen; | 344 | unsigned int buflen; |
347 | 345 | ||
348 | buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip)); | 346 | buflen = sprintf(buffer, "%pI4", &addr->ip); |
349 | if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, | 347 | if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, |
350 | buffer, buflen)) | 348 | buffer, buflen)) |
351 | return 0; | 349 | return 0; |
@@ -380,7 +378,7 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, | |||
380 | unsigned int buflen; | 378 | unsigned int buflen; |
381 | 379 | ||
382 | /* Mangle session description owner and contact addresses */ | 380 | /* Mangle session description owner and contact addresses */ |
383 | buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip)); | 381 | buflen = sprintf(buffer, "%pI4", &addr->ip); |
384 | if (mangle_sdp_packet(skb, dptr, dataoff, datalen, | 382 | if (mangle_sdp_packet(skb, dptr, dataoff, datalen, |
385 | SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, | 383 | SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, |
386 | buffer, buflen)) | 384 | buffer, buflen)) |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 8303e4b406c0..182f845de92f 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -930,8 +930,8 @@ static inline void mangle_address(unsigned char *begin, | |||
930 | } | 930 | } |
931 | 931 | ||
932 | if (debug) | 932 | if (debug) |
933 | printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " | 933 | printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n", |
934 | "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); | 934 | &old, addr); |
935 | } | 935 | } |
936 | } | 936 | } |
937 | 937 | ||
@@ -1267,9 +1267,8 @@ static int help(struct sk_buff *skb, unsigned int protoff, | |||
1267 | */ | 1267 | */ |
1268 | if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { | 1268 | if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { |
1269 | if (net_ratelimit()) | 1269 | if (net_ratelimit()) |
1270 | printk(KERN_WARNING "SNMP: dropping malformed packet " | 1270 | printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", |
1271 | "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", | 1271 | &iph->saddr, &iph->daddr); |
1272 | NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); | ||
1273 | return NF_DROP; | 1272 | return NF_DROP; |
1274 | } | 1273 | } |
1275 | 1274 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index cd975743bcd2..998fcffc9e15 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) | |||
247 | } | 247 | } |
248 | 248 | ||
249 | if (inet->recverr) { | 249 | if (inet->recverr) { |
250 | struct iphdr *iph = (struct iphdr*)skb->data; | 250 | struct iphdr *iph = (struct iphdr *)skb->data; |
251 | u8 *payload = skb->data + (iph->ihl << 2); | 251 | u8 *payload = skb->data + (iph->ihl << 2); |
252 | 252 | ||
253 | if (inet->hdrincl) | 253 | if (inet->hdrincl) |
@@ -465,7 +465,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
465 | */ | 465 | */ |
466 | 466 | ||
467 | if (msg->msg_namelen) { | 467 | if (msg->msg_namelen) { |
468 | struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name; | 468 | struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; |
469 | err = -EINVAL; | 469 | err = -EINVAL; |
470 | if (msg->msg_namelen < sizeof(*usin)) | 470 | if (msg->msg_namelen < sizeof(*usin)) |
471 | goto out; | 471 | goto out; |
@@ -851,7 +851,7 @@ struct proto raw_prot = { | |||
851 | static struct sock *raw_get_first(struct seq_file *seq) | 851 | static struct sock *raw_get_first(struct seq_file *seq) |
852 | { | 852 | { |
853 | struct sock *sk; | 853 | struct sock *sk; |
854 | struct raw_iter_state* state = raw_seq_private(seq); | 854 | struct raw_iter_state *state = raw_seq_private(seq); |
855 | 855 | ||
856 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; | 856 | for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; |
857 | ++state->bucket) { | 857 | ++state->bucket) { |
@@ -868,7 +868,7 @@ found: | |||
868 | 868 | ||
869 | static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) | 869 | static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) |
870 | { | 870 | { |
871 | struct raw_iter_state* state = raw_seq_private(seq); | 871 | struct raw_iter_state *state = raw_seq_private(seq); |
872 | 872 | ||
873 | do { | 873 | do { |
874 | sk = sk_next(sk); | 874 | sk = sk_next(sk); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2ea6dcc3e2cc..0dc0c3826763 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | |||
129 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 129 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
130 | static int ip_rt_min_advmss __read_mostly = 256; | 130 | static int ip_rt_min_advmss __read_mostly = 256; |
131 | static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; | 131 | static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; |
132 | static int rt_chain_length_max __read_mostly = 20; | ||
132 | 133 | ||
133 | static void rt_worker_func(struct work_struct *work); | 134 | static void rt_worker_func(struct work_struct *work); |
134 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); | 135 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); |
@@ -145,6 +146,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); | |||
145 | static void ipv4_link_failure(struct sk_buff *skb); | 146 | static void ipv4_link_failure(struct sk_buff *skb); |
146 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); | 147 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); |
147 | static int rt_garbage_collect(struct dst_ops *ops); | 148 | static int rt_garbage_collect(struct dst_ops *ops); |
149 | static void rt_emergency_hash_rebuild(struct net *net); | ||
148 | 150 | ||
149 | 151 | ||
150 | static struct dst_ops ipv4_dst_ops = { | 152 | static struct dst_ops ipv4_dst_ops = { |
@@ -201,6 +203,7 @@ const __u8 ip_tos2prio[16] = { | |||
201 | struct rt_hash_bucket { | 203 | struct rt_hash_bucket { |
202 | struct rtable *chain; | 204 | struct rtable *chain; |
203 | }; | 205 | }; |
206 | |||
204 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ | 207 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
205 | defined(CONFIG_PROVE_LOCKING) | 208 | defined(CONFIG_PROVE_LOCKING) |
206 | /* | 209 | /* |
@@ -674,6 +677,20 @@ static inline u32 rt_score(struct rtable *rt) | |||
674 | return score; | 677 | return score; |
675 | } | 678 | } |
676 | 679 | ||
680 | static inline bool rt_caching(const struct net *net) | ||
681 | { | ||
682 | return net->ipv4.current_rt_cache_rebuild_count <= | ||
683 | net->ipv4.sysctl_rt_cache_rebuild_count; | ||
684 | } | ||
685 | |||
686 | static inline bool compare_hash_inputs(const struct flowi *fl1, | ||
687 | const struct flowi *fl2) | ||
688 | { | ||
689 | return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | ||
690 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | | ||
691 | (fl1->iif ^ fl2->iif)) == 0); | ||
692 | } | ||
693 | |||
677 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 694 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) |
678 | { | 695 | { |
679 | return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 696 | return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | |
@@ -753,11 +770,24 @@ static void rt_do_flush(int process_context) | |||
753 | } | 770 | } |
754 | } | 771 | } |
755 | 772 | ||
773 | /* | ||
774 | * While freeing expired entries, we compute average chain length | ||
775 | * and standard deviation, using fixed-point arithmetic. | ||
776 | * This to have an estimation of rt_chain_length_max | ||
777 | * rt_chain_length_max = max(elasticity, AVG + 4*SD) | ||
778 | * We use 3 bits for frational part, and 29 (or 61) for magnitude. | ||
779 | */ | ||
780 | |||
781 | #define FRACT_BITS 3 | ||
782 | #define ONE (1UL << FRACT_BITS) | ||
783 | |||
756 | static void rt_check_expire(void) | 784 | static void rt_check_expire(void) |
757 | { | 785 | { |
758 | static unsigned int rover; | 786 | static unsigned int rover; |
759 | unsigned int i = rover, goal; | 787 | unsigned int i = rover, goal; |
760 | struct rtable *rth, **rthp; | 788 | struct rtable *rth, **rthp; |
789 | unsigned long length = 0, samples = 0; | ||
790 | unsigned long sum = 0, sum2 = 0; | ||
761 | u64 mult; | 791 | u64 mult; |
762 | 792 | ||
763 | mult = ((u64)ip_rt_gc_interval) << rt_hash_log; | 793 | mult = ((u64)ip_rt_gc_interval) << rt_hash_log; |
@@ -766,6 +796,7 @@ static void rt_check_expire(void) | |||
766 | goal = (unsigned int)mult; | 796 | goal = (unsigned int)mult; |
767 | if (goal > rt_hash_mask) | 797 | if (goal > rt_hash_mask) |
768 | goal = rt_hash_mask + 1; | 798 | goal = rt_hash_mask + 1; |
799 | length = 0; | ||
769 | for (; goal > 0; goal--) { | 800 | for (; goal > 0; goal--) { |
770 | unsigned long tmo = ip_rt_gc_timeout; | 801 | unsigned long tmo = ip_rt_gc_timeout; |
771 | 802 | ||
@@ -775,6 +806,8 @@ static void rt_check_expire(void) | |||
775 | if (need_resched()) | 806 | if (need_resched()) |
776 | cond_resched(); | 807 | cond_resched(); |
777 | 808 | ||
809 | samples++; | ||
810 | |||
778 | if (*rthp == NULL) | 811 | if (*rthp == NULL) |
779 | continue; | 812 | continue; |
780 | spin_lock_bh(rt_hash_lock_addr(i)); | 813 | spin_lock_bh(rt_hash_lock_addr(i)); |
@@ -789,11 +822,29 @@ static void rt_check_expire(void) | |||
789 | if (time_before_eq(jiffies, rth->u.dst.expires)) { | 822 | if (time_before_eq(jiffies, rth->u.dst.expires)) { |
790 | tmo >>= 1; | 823 | tmo >>= 1; |
791 | rthp = &rth->u.dst.rt_next; | 824 | rthp = &rth->u.dst.rt_next; |
825 | /* | ||
826 | * Only bump our length if the hash | ||
827 | * inputs on entries n and n+1 are not | ||
828 | * the same, we only count entries on | ||
829 | * a chain with equal hash inputs once | ||
830 | * so that entries for different QOS | ||
831 | * levels, and other non-hash input | ||
832 | * attributes don't unfairly skew | ||
833 | * the length computation | ||
834 | */ | ||
835 | if ((*rthp == NULL) || | ||
836 | !compare_hash_inputs(&(*rthp)->fl, | ||
837 | &rth->fl)) | ||
838 | length += ONE; | ||
792 | continue; | 839 | continue; |
793 | } | 840 | } |
794 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { | 841 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { |
795 | tmo >>= 1; | 842 | tmo >>= 1; |
796 | rthp = &rth->u.dst.rt_next; | 843 | rthp = &rth->u.dst.rt_next; |
844 | if ((*rthp == NULL) || | ||
845 | !compare_hash_inputs(&(*rthp)->fl, | ||
846 | &rth->fl)) | ||
847 | length += ONE; | ||
797 | continue; | 848 | continue; |
798 | } | 849 | } |
799 | 850 | ||
@@ -802,6 +853,15 @@ static void rt_check_expire(void) | |||
802 | rt_free(rth); | 853 | rt_free(rth); |
803 | } | 854 | } |
804 | spin_unlock_bh(rt_hash_lock_addr(i)); | 855 | spin_unlock_bh(rt_hash_lock_addr(i)); |
856 | sum += length; | ||
857 | sum2 += length*length; | ||
858 | } | ||
859 | if (samples) { | ||
860 | unsigned long avg = sum / samples; | ||
861 | unsigned long sd = int_sqrt(sum2 / samples - avg*avg); | ||
862 | rt_chain_length_max = max_t(unsigned long, | ||
863 | ip_rt_gc_elasticity, | ||
864 | (avg + 4*sd) >> FRACT_BITS); | ||
805 | } | 865 | } |
806 | rover = i; | 866 | rover = i; |
807 | } | 867 | } |
@@ -851,6 +911,26 @@ static void rt_secret_rebuild(unsigned long __net) | |||
851 | mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); | 911 | mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); |
852 | } | 912 | } |
853 | 913 | ||
914 | static void rt_secret_rebuild_oneshot(struct net *net) | ||
915 | { | ||
916 | del_timer_sync(&net->ipv4.rt_secret_timer); | ||
917 | rt_cache_invalidate(net); | ||
918 | if (ip_rt_secret_interval) { | ||
919 | net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; | ||
920 | add_timer(&net->ipv4.rt_secret_timer); | ||
921 | } | ||
922 | } | ||
923 | |||
924 | static void rt_emergency_hash_rebuild(struct net *net) | ||
925 | { | ||
926 | if (net_ratelimit()) { | ||
927 | printk(KERN_WARNING "Route hash chain too long!\n"); | ||
928 | printk(KERN_WARNING "Adjust your secret_interval!\n"); | ||
929 | } | ||
930 | |||
931 | rt_secret_rebuild_oneshot(net); | ||
932 | } | ||
933 | |||
854 | /* | 934 | /* |
855 | Short description of GC goals. | 935 | Short description of GC goals. |
856 | 936 | ||
@@ -989,6 +1069,7 @@ out: return 0; | |||
989 | static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) | 1069 | static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) |
990 | { | 1070 | { |
991 | struct rtable *rth, **rthp; | 1071 | struct rtable *rth, **rthp; |
1072 | struct rtable *rthi; | ||
992 | unsigned long now; | 1073 | unsigned long now; |
993 | struct rtable *cand, **candp; | 1074 | struct rtable *cand, **candp; |
994 | u32 min_score; | 1075 | u32 min_score; |
@@ -1002,7 +1083,13 @@ restart: | |||
1002 | candp = NULL; | 1083 | candp = NULL; |
1003 | now = jiffies; | 1084 | now = jiffies; |
1004 | 1085 | ||
1086 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | ||
1087 | rt_drop(rt); | ||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1005 | rthp = &rt_hash_table[hash].chain; | 1091 | rthp = &rt_hash_table[hash].chain; |
1092 | rthi = NULL; | ||
1006 | 1093 | ||
1007 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1094 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1008 | while ((rth = *rthp) != NULL) { | 1095 | while ((rth = *rthp) != NULL) { |
@@ -1048,6 +1135,17 @@ restart: | |||
1048 | chain_length++; | 1135 | chain_length++; |
1049 | 1136 | ||
1050 | rthp = &rth->u.dst.rt_next; | 1137 | rthp = &rth->u.dst.rt_next; |
1138 | |||
1139 | /* | ||
1140 | * check to see if the next entry in the chain | ||
1141 | * contains the same hash input values as rt. If it does | ||
1142 | * This is where we will insert into the list, instead of | ||
1143 | * at the head. This groups entries that differ by aspects not | ||
1144 | * relvant to the hash function together, which we use to adjust | ||
1145 | * our chain length | ||
1146 | */ | ||
1147 | if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl)) | ||
1148 | rthi = rth; | ||
1051 | } | 1149 | } |
1052 | 1150 | ||
1053 | if (cand) { | 1151 | if (cand) { |
@@ -1061,6 +1159,16 @@ restart: | |||
1061 | *candp = cand->u.dst.rt_next; | 1159 | *candp = cand->u.dst.rt_next; |
1062 | rt_free(cand); | 1160 | rt_free(cand); |
1063 | } | 1161 | } |
1162 | } else { | ||
1163 | if (chain_length > rt_chain_length_max) { | ||
1164 | struct net *net = dev_net(rt->u.dst.dev); | ||
1165 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | ||
1166 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | ||
1167 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", | ||
1168 | rt->u.dst.dev->name, num); | ||
1169 | } | ||
1170 | rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); | ||
1171 | } | ||
1064 | } | 1172 | } |
1065 | 1173 | ||
1066 | /* Try to bind route to arp only if it is output | 1174 | /* Try to bind route to arp only if it is output |
@@ -1098,14 +1206,17 @@ restart: | |||
1098 | } | 1206 | } |
1099 | } | 1207 | } |
1100 | 1208 | ||
1101 | rt->u.dst.rt_next = rt_hash_table[hash].chain; | 1209 | if (rthi) |
1210 | rt->u.dst.rt_next = rthi->u.dst.rt_next; | ||
1211 | else | ||
1212 | rt->u.dst.rt_next = rt_hash_table[hash].chain; | ||
1213 | |||
1102 | #if RT_CACHE_DEBUG >= 2 | 1214 | #if RT_CACHE_DEBUG >= 2 |
1103 | if (rt->u.dst.rt_next) { | 1215 | if (rt->u.dst.rt_next) { |
1104 | struct rtable *trt; | 1216 | struct rtable *trt; |
1105 | printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash, | 1217 | printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst); |
1106 | NIPQUAD(rt->rt_dst)); | ||
1107 | for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) | 1218 | for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) |
1108 | printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst)); | 1219 | printk(" . %pI4", &trt->rt_dst); |
1109 | printk("\n"); | 1220 | printk("\n"); |
1110 | } | 1221 | } |
1111 | #endif | 1222 | #endif |
@@ -1114,7 +1225,11 @@ restart: | |||
1114 | * previous writes to rt are comitted to memory | 1225 | * previous writes to rt are comitted to memory |
1115 | * before making rt visible to other CPUS. | 1226 | * before making rt visible to other CPUS. |
1116 | */ | 1227 | */ |
1117 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); | 1228 | if (rthi) |
1229 | rcu_assign_pointer(rthi->u.dst.rt_next, rt); | ||
1230 | else | ||
1231 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); | ||
1232 | |||
1118 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1233 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1119 | *rp = rt; | 1234 | *rp = rt; |
1120 | return 0; | 1235 | return 0; |
@@ -1217,6 +1332,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1217 | || ipv4_is_zeronet(new_gw)) | 1332 | || ipv4_is_zeronet(new_gw)) |
1218 | goto reject_redirect; | 1333 | goto reject_redirect; |
1219 | 1334 | ||
1335 | if (!rt_caching(net)) | ||
1336 | goto reject_redirect; | ||
1337 | |||
1220 | if (!IN_DEV_SHARED_MEDIA(in_dev)) { | 1338 | if (!IN_DEV_SHARED_MEDIA(in_dev)) { |
1221 | if (!inet_addr_onlink(in_dev, new_gw, old_gw)) | 1339 | if (!inet_addr_onlink(in_dev, new_gw, old_gw)) |
1222 | goto reject_redirect; | 1340 | goto reject_redirect; |
@@ -1267,7 +1385,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1267 | 1385 | ||
1268 | /* Copy all the information. */ | 1386 | /* Copy all the information. */ |
1269 | *rt = *rth; | 1387 | *rt = *rth; |
1270 | INIT_RCU_HEAD(&rt->u.dst.rcu_head); | ||
1271 | rt->u.dst.__use = 1; | 1388 | rt->u.dst.__use = 1; |
1272 | atomic_set(&rt->u.dst.__refcnt, 1); | 1389 | atomic_set(&rt->u.dst.__refcnt, 1); |
1273 | rt->u.dst.child = NULL; | 1390 | rt->u.dst.child = NULL; |
@@ -1280,7 +1397,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1280 | rt->u.dst.path = &rt->u.dst; | 1397 | rt->u.dst.path = &rt->u.dst; |
1281 | rt->u.dst.neighbour = NULL; | 1398 | rt->u.dst.neighbour = NULL; |
1282 | rt->u.dst.hh = NULL; | 1399 | rt->u.dst.hh = NULL; |
1400 | #ifdef CONFIG_XFRM | ||
1283 | rt->u.dst.xfrm = NULL; | 1401 | rt->u.dst.xfrm = NULL; |
1402 | #endif | ||
1284 | rt->rt_genid = rt_genid(net); | 1403 | rt->rt_genid = rt_genid(net); |
1285 | rt->rt_flags |= RTCF_REDIRECTED; | 1404 | rt->rt_flags |= RTCF_REDIRECTED; |
1286 | 1405 | ||
@@ -1324,11 +1443,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1324 | reject_redirect: | 1443 | reject_redirect: |
1325 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 1444 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
1326 | if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) | 1445 | if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) |
1327 | printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about " | 1446 | printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n" |
1328 | NIPQUAD_FMT " ignored.\n" | 1447 | " Advised path = %pI4 -> %pI4\n", |
1329 | " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n", | 1448 | &old_gw, dev->name, &new_gw, |
1330 | NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw), | 1449 | &saddr, &daddr); |
1331 | NIPQUAD(saddr), NIPQUAD(daddr)); | ||
1332 | #endif | 1450 | #endif |
1333 | in_dev_put(in_dev); | 1451 | in_dev_put(in_dev); |
1334 | } | 1452 | } |
@@ -1348,9 +1466,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1348 | rt->fl.oif, | 1466 | rt->fl.oif, |
1349 | rt_genid(dev_net(dst->dev))); | 1467 | rt_genid(dev_net(dst->dev))); |
1350 | #if RT_CACHE_DEBUG >= 1 | 1468 | #if RT_CACHE_DEBUG >= 1 |
1351 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to " | 1469 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n", |
1352 | NIPQUAD_FMT "/%02x dropped\n", | 1470 | &rt->rt_dst, rt->fl.fl4_tos); |
1353 | NIPQUAD(rt->rt_dst), rt->fl.fl4_tos); | ||
1354 | #endif | 1471 | #endif |
1355 | rt_del(hash, rt); | 1472 | rt_del(hash, rt); |
1356 | ret = NULL; | 1473 | ret = NULL; |
@@ -1414,10 +1531,9 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1414 | if (IN_DEV_LOG_MARTIANS(in_dev) && | 1531 | if (IN_DEV_LOG_MARTIANS(in_dev) && |
1415 | rt->u.dst.rate_tokens == ip_rt_redirect_number && | 1532 | rt->u.dst.rate_tokens == ip_rt_redirect_number && |
1416 | net_ratelimit()) | 1533 | net_ratelimit()) |
1417 | printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores " | 1534 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", |
1418 | "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n", | 1535 | &rt->rt_src, rt->rt_iif, |
1419 | NIPQUAD(rt->rt_src), rt->rt_iif, | 1536 | &rt->rt_dst, &rt->rt_gateway); |
1420 | NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway)); | ||
1421 | #endif | 1537 | #endif |
1422 | } | 1538 | } |
1423 | out: | 1539 | out: |
@@ -1610,8 +1726,8 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1610 | 1726 | ||
1611 | static int ip_rt_bug(struct sk_buff *skb) | 1727 | static int ip_rt_bug(struct sk_buff *skb) |
1612 | { | 1728 | { |
1613 | printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n", | 1729 | printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n", |
1614 | NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr), | 1730 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, |
1615 | skb->dev ? skb->dev->name : "?"); | 1731 | skb->dev ? skb->dev->name : "?"); |
1616 | kfree_skb(skb); | 1732 | kfree_skb(skb); |
1617 | return 0; | 1733 | return 0; |
@@ -1788,9 +1904,8 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1788 | * RFC1812 recommendation, if source is martian, | 1904 | * RFC1812 recommendation, if source is martian, |
1789 | * the only hint is MAC header. | 1905 | * the only hint is MAC header. |
1790 | */ | 1906 | */ |
1791 | printk(KERN_WARNING "martian source " NIPQUAD_FMT " from " | 1907 | printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n", |
1792 | NIPQUAD_FMT", on dev %s\n", | 1908 | &daddr, &saddr, dev->name); |
1793 | NIPQUAD(daddr), NIPQUAD(saddr), dev->name); | ||
1794 | if (dev->hard_header_len && skb_mac_header_was_set(skb)) { | 1909 | if (dev->hard_header_len && skb_mac_header_was_set(skb)) { |
1795 | int i; | 1910 | int i; |
1796 | const unsigned char *p = skb_mac_header(skb); | 1911 | const unsigned char *p = skb_mac_header(skb); |
@@ -2099,9 +2214,8 @@ martian_destination: | |||
2099 | RT_CACHE_STAT_INC(in_martian_dst); | 2214 | RT_CACHE_STAT_INC(in_martian_dst); |
2100 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 2215 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
2101 | if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) | 2216 | if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) |
2102 | printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from " | 2217 | printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n", |
2103 | NIPQUAD_FMT ", dev %s\n", | 2218 | &daddr, &saddr, dev->name); |
2104 | NIPQUAD(daddr), NIPQUAD(saddr), dev->name); | ||
2105 | #endif | 2219 | #endif |
2106 | 2220 | ||
2107 | e_hostunreach: | 2221 | e_hostunreach: |
@@ -2130,6 +2244,10 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2130 | struct net *net; | 2244 | struct net *net; |
2131 | 2245 | ||
2132 | net = dev_net(dev); | 2246 | net = dev_net(dev); |
2247 | |||
2248 | if (!rt_caching(net)) | ||
2249 | goto skip_cache; | ||
2250 | |||
2133 | tos &= IPTOS_RT_MASK; | 2251 | tos &= IPTOS_RT_MASK; |
2134 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); | 2252 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); |
2135 | 2253 | ||
@@ -2154,6 +2272,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2154 | } | 2272 | } |
2155 | rcu_read_unlock(); | 2273 | rcu_read_unlock(); |
2156 | 2274 | ||
2275 | skip_cache: | ||
2157 | /* Multicast recognition logic is moved from route cache to here. | 2276 | /* Multicast recognition logic is moved from route cache to here. |
2158 | The problem was that too many Ethernet cards have broken/missing | 2277 | The problem was that too many Ethernet cards have broken/missing |
2159 | hardware multicast filters :-( As result the host on multicasting | 2278 | hardware multicast filters :-( As result the host on multicasting |
@@ -2539,6 +2658,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2539 | unsigned hash; | 2658 | unsigned hash; |
2540 | struct rtable *rth; | 2659 | struct rtable *rth; |
2541 | 2660 | ||
2661 | if (!rt_caching(net)) | ||
2662 | goto slow_output; | ||
2663 | |||
2542 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); | 2664 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); |
2543 | 2665 | ||
2544 | rcu_read_lock_bh(); | 2666 | rcu_read_lock_bh(); |
@@ -2563,6 +2685,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2563 | } | 2685 | } |
2564 | rcu_read_unlock_bh(); | 2686 | rcu_read_unlock_bh(); |
2565 | 2687 | ||
2688 | slow_output: | ||
2566 | return ip_route_output_slow(net, rp, flp); | 2689 | return ip_route_output_slow(net, rp, flp); |
2567 | } | 2690 | } |
2568 | 2691 | ||
@@ -2995,7 +3118,7 @@ static ctl_table ipv4_route_table[] = { | |||
2995 | .data = &ipv4_dst_ops.gc_thresh, | 3118 | .data = &ipv4_dst_ops.gc_thresh, |
2996 | .maxlen = sizeof(int), | 3119 | .maxlen = sizeof(int), |
2997 | .mode = 0644, | 3120 | .mode = 0644, |
2998 | .proc_handler = &proc_dointvec, | 3121 | .proc_handler = proc_dointvec, |
2999 | }, | 3122 | }, |
3000 | { | 3123 | { |
3001 | .ctl_name = NET_IPV4_ROUTE_MAX_SIZE, | 3124 | .ctl_name = NET_IPV4_ROUTE_MAX_SIZE, |
@@ -3003,7 +3126,7 @@ static ctl_table ipv4_route_table[] = { | |||
3003 | .data = &ip_rt_max_size, | 3126 | .data = &ip_rt_max_size, |
3004 | .maxlen = sizeof(int), | 3127 | .maxlen = sizeof(int), |
3005 | .mode = 0644, | 3128 | .mode = 0644, |
3006 | .proc_handler = &proc_dointvec, | 3129 | .proc_handler = proc_dointvec, |
3007 | }, | 3130 | }, |
3008 | { | 3131 | { |
3009 | /* Deprecated. Use gc_min_interval_ms */ | 3132 | /* Deprecated. Use gc_min_interval_ms */ |
@@ -3013,8 +3136,8 @@ static ctl_table ipv4_route_table[] = { | |||
3013 | .data = &ip_rt_gc_min_interval, | 3136 | .data = &ip_rt_gc_min_interval, |
3014 | .maxlen = sizeof(int), | 3137 | .maxlen = sizeof(int), |
3015 | .mode = 0644, | 3138 | .mode = 0644, |
3016 | .proc_handler = &proc_dointvec_jiffies, | 3139 | .proc_handler = proc_dointvec_jiffies, |
3017 | .strategy = &sysctl_jiffies, | 3140 | .strategy = sysctl_jiffies, |
3018 | }, | 3141 | }, |
3019 | { | 3142 | { |
3020 | .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, | 3143 | .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, |
@@ -3022,8 +3145,8 @@ static ctl_table ipv4_route_table[] = { | |||
3022 | .data = &ip_rt_gc_min_interval, | 3145 | .data = &ip_rt_gc_min_interval, |
3023 | .maxlen = sizeof(int), | 3146 | .maxlen = sizeof(int), |
3024 | .mode = 0644, | 3147 | .mode = 0644, |
3025 | .proc_handler = &proc_dointvec_ms_jiffies, | 3148 | .proc_handler = proc_dointvec_ms_jiffies, |
3026 | .strategy = &sysctl_ms_jiffies, | 3149 | .strategy = sysctl_ms_jiffies, |
3027 | }, | 3150 | }, |
3028 | { | 3151 | { |
3029 | .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT, | 3152 | .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT, |
@@ -3031,8 +3154,8 @@ static ctl_table ipv4_route_table[] = { | |||
3031 | .data = &ip_rt_gc_timeout, | 3154 | .data = &ip_rt_gc_timeout, |
3032 | .maxlen = sizeof(int), | 3155 | .maxlen = sizeof(int), |
3033 | .mode = 0644, | 3156 | .mode = 0644, |
3034 | .proc_handler = &proc_dointvec_jiffies, | 3157 | .proc_handler = proc_dointvec_jiffies, |
3035 | .strategy = &sysctl_jiffies, | 3158 | .strategy = sysctl_jiffies, |
3036 | }, | 3159 | }, |
3037 | { | 3160 | { |
3038 | .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL, | 3161 | .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL, |
@@ -3040,8 +3163,8 @@ static ctl_table ipv4_route_table[] = { | |||
3040 | .data = &ip_rt_gc_interval, | 3163 | .data = &ip_rt_gc_interval, |
3041 | .maxlen = sizeof(int), | 3164 | .maxlen = sizeof(int), |
3042 | .mode = 0644, | 3165 | .mode = 0644, |
3043 | .proc_handler = &proc_dointvec_jiffies, | 3166 | .proc_handler = proc_dointvec_jiffies, |
3044 | .strategy = &sysctl_jiffies, | 3167 | .strategy = sysctl_jiffies, |
3045 | }, | 3168 | }, |
3046 | { | 3169 | { |
3047 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD, | 3170 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD, |
@@ -3049,7 +3172,7 @@ static ctl_table ipv4_route_table[] = { | |||
3049 | .data = &ip_rt_redirect_load, | 3172 | .data = &ip_rt_redirect_load, |
3050 | .maxlen = sizeof(int), | 3173 | .maxlen = sizeof(int), |
3051 | .mode = 0644, | 3174 | .mode = 0644, |
3052 | .proc_handler = &proc_dointvec, | 3175 | .proc_handler = proc_dointvec, |
3053 | }, | 3176 | }, |
3054 | { | 3177 | { |
3055 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER, | 3178 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER, |
@@ -3057,7 +3180,7 @@ static ctl_table ipv4_route_table[] = { | |||
3057 | .data = &ip_rt_redirect_number, | 3180 | .data = &ip_rt_redirect_number, |
3058 | .maxlen = sizeof(int), | 3181 | .maxlen = sizeof(int), |
3059 | .mode = 0644, | 3182 | .mode = 0644, |
3060 | .proc_handler = &proc_dointvec, | 3183 | .proc_handler = proc_dointvec, |
3061 | }, | 3184 | }, |
3062 | { | 3185 | { |
3063 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE, | 3186 | .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE, |
@@ -3065,7 +3188,7 @@ static ctl_table ipv4_route_table[] = { | |||
3065 | .data = &ip_rt_redirect_silence, | 3188 | .data = &ip_rt_redirect_silence, |
3066 | .maxlen = sizeof(int), | 3189 | .maxlen = sizeof(int), |
3067 | .mode = 0644, | 3190 | .mode = 0644, |
3068 | .proc_handler = &proc_dointvec, | 3191 | .proc_handler = proc_dointvec, |
3069 | }, | 3192 | }, |
3070 | { | 3193 | { |
3071 | .ctl_name = NET_IPV4_ROUTE_ERROR_COST, | 3194 | .ctl_name = NET_IPV4_ROUTE_ERROR_COST, |
@@ -3073,7 +3196,7 @@ static ctl_table ipv4_route_table[] = { | |||
3073 | .data = &ip_rt_error_cost, | 3196 | .data = &ip_rt_error_cost, |
3074 | .maxlen = sizeof(int), | 3197 | .maxlen = sizeof(int), |
3075 | .mode = 0644, | 3198 | .mode = 0644, |
3076 | .proc_handler = &proc_dointvec, | 3199 | .proc_handler = proc_dointvec, |
3077 | }, | 3200 | }, |
3078 | { | 3201 | { |
3079 | .ctl_name = NET_IPV4_ROUTE_ERROR_BURST, | 3202 | .ctl_name = NET_IPV4_ROUTE_ERROR_BURST, |
@@ -3081,7 +3204,7 @@ static ctl_table ipv4_route_table[] = { | |||
3081 | .data = &ip_rt_error_burst, | 3204 | .data = &ip_rt_error_burst, |
3082 | .maxlen = sizeof(int), | 3205 | .maxlen = sizeof(int), |
3083 | .mode = 0644, | 3206 | .mode = 0644, |
3084 | .proc_handler = &proc_dointvec, | 3207 | .proc_handler = proc_dointvec, |
3085 | }, | 3208 | }, |
3086 | { | 3209 | { |
3087 | .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY, | 3210 | .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY, |
@@ -3089,7 +3212,7 @@ static ctl_table ipv4_route_table[] = { | |||
3089 | .data = &ip_rt_gc_elasticity, | 3212 | .data = &ip_rt_gc_elasticity, |
3090 | .maxlen = sizeof(int), | 3213 | .maxlen = sizeof(int), |
3091 | .mode = 0644, | 3214 | .mode = 0644, |
3092 | .proc_handler = &proc_dointvec, | 3215 | .proc_handler = proc_dointvec, |
3093 | }, | 3216 | }, |
3094 | { | 3217 | { |
3095 | .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES, | 3218 | .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES, |
@@ -3097,8 +3220,8 @@ static ctl_table ipv4_route_table[] = { | |||
3097 | .data = &ip_rt_mtu_expires, | 3220 | .data = &ip_rt_mtu_expires, |
3098 | .maxlen = sizeof(int), | 3221 | .maxlen = sizeof(int), |
3099 | .mode = 0644, | 3222 | .mode = 0644, |
3100 | .proc_handler = &proc_dointvec_jiffies, | 3223 | .proc_handler = proc_dointvec_jiffies, |
3101 | .strategy = &sysctl_jiffies, | 3224 | .strategy = sysctl_jiffies, |
3102 | }, | 3225 | }, |
3103 | { | 3226 | { |
3104 | .ctl_name = NET_IPV4_ROUTE_MIN_PMTU, | 3227 | .ctl_name = NET_IPV4_ROUTE_MIN_PMTU, |
@@ -3106,7 +3229,7 @@ static ctl_table ipv4_route_table[] = { | |||
3106 | .data = &ip_rt_min_pmtu, | 3229 | .data = &ip_rt_min_pmtu, |
3107 | .maxlen = sizeof(int), | 3230 | .maxlen = sizeof(int), |
3108 | .mode = 0644, | 3231 | .mode = 0644, |
3109 | .proc_handler = &proc_dointvec, | 3232 | .proc_handler = proc_dointvec, |
3110 | }, | 3233 | }, |
3111 | { | 3234 | { |
3112 | .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS, | 3235 | .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS, |
@@ -3114,7 +3237,7 @@ static ctl_table ipv4_route_table[] = { | |||
3114 | .data = &ip_rt_min_advmss, | 3237 | .data = &ip_rt_min_advmss, |
3115 | .maxlen = sizeof(int), | 3238 | .maxlen = sizeof(int), |
3116 | .mode = 0644, | 3239 | .mode = 0644, |
3117 | .proc_handler = &proc_dointvec, | 3240 | .proc_handler = proc_dointvec, |
3118 | }, | 3241 | }, |
3119 | { | 3242 | { |
3120 | .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL, | 3243 | .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL, |
@@ -3122,8 +3245,8 @@ static ctl_table ipv4_route_table[] = { | |||
3122 | .data = &ip_rt_secret_interval, | 3245 | .data = &ip_rt_secret_interval, |
3123 | .maxlen = sizeof(int), | 3246 | .maxlen = sizeof(int), |
3124 | .mode = 0644, | 3247 | .mode = 0644, |
3125 | .proc_handler = &ipv4_sysctl_rt_secret_interval, | 3248 | .proc_handler = ipv4_sysctl_rt_secret_interval, |
3126 | .strategy = &ipv4_sysctl_rt_secret_interval_strategy, | 3249 | .strategy = ipv4_sysctl_rt_secret_interval_strategy, |
3127 | }, | 3250 | }, |
3128 | { .ctl_name = 0 } | 3251 | { .ctl_name = 0 } |
3129 | }; | 3252 | }; |
@@ -3151,8 +3274,8 @@ static struct ctl_table ipv4_route_flush_table[] = { | |||
3151 | .procname = "flush", | 3274 | .procname = "flush", |
3152 | .maxlen = sizeof(int), | 3275 | .maxlen = sizeof(int), |
3153 | .mode = 0200, | 3276 | .mode = 0200, |
3154 | .proc_handler = &ipv4_sysctl_rtcache_flush, | 3277 | .proc_handler = ipv4_sysctl_rtcache_flush, |
3155 | .strategy = &ipv4_sysctl_rtcache_flush_strategy, | 3278 | .strategy = ipv4_sysctl_rtcache_flush_strategy, |
3156 | }, | 3279 | }, |
3157 | { .ctl_name = 0 }, | 3280 | { .ctl_name = 0 }, |
3158 | }; | 3281 | }; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1bb10df8ce7d..4710d219f06a 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -195,7 +195,7 @@ static struct ctl_table ipv4_table[] = { | |||
195 | .data = &sysctl_tcp_timestamps, | 195 | .data = &sysctl_tcp_timestamps, |
196 | .maxlen = sizeof(int), | 196 | .maxlen = sizeof(int), |
197 | .mode = 0644, | 197 | .mode = 0644, |
198 | .proc_handler = &proc_dointvec | 198 | .proc_handler = proc_dointvec |
199 | }, | 199 | }, |
200 | { | 200 | { |
201 | .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, | 201 | .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, |
@@ -203,7 +203,7 @@ static struct ctl_table ipv4_table[] = { | |||
203 | .data = &sysctl_tcp_window_scaling, | 203 | .data = &sysctl_tcp_window_scaling, |
204 | .maxlen = sizeof(int), | 204 | .maxlen = sizeof(int), |
205 | .mode = 0644, | 205 | .mode = 0644, |
206 | .proc_handler = &proc_dointvec | 206 | .proc_handler = proc_dointvec |
207 | }, | 207 | }, |
208 | { | 208 | { |
209 | .ctl_name = NET_IPV4_TCP_SACK, | 209 | .ctl_name = NET_IPV4_TCP_SACK, |
@@ -211,7 +211,7 @@ static struct ctl_table ipv4_table[] = { | |||
211 | .data = &sysctl_tcp_sack, | 211 | .data = &sysctl_tcp_sack, |
212 | .maxlen = sizeof(int), | 212 | .maxlen = sizeof(int), |
213 | .mode = 0644, | 213 | .mode = 0644, |
214 | .proc_handler = &proc_dointvec | 214 | .proc_handler = proc_dointvec |
215 | }, | 215 | }, |
216 | { | 216 | { |
217 | .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, | 217 | .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, |
@@ -219,7 +219,7 @@ static struct ctl_table ipv4_table[] = { | |||
219 | .data = &sysctl_tcp_retrans_collapse, | 219 | .data = &sysctl_tcp_retrans_collapse, |
220 | .maxlen = sizeof(int), | 220 | .maxlen = sizeof(int), |
221 | .mode = 0644, | 221 | .mode = 0644, |
222 | .proc_handler = &proc_dointvec | 222 | .proc_handler = proc_dointvec |
223 | }, | 223 | }, |
224 | { | 224 | { |
225 | .ctl_name = NET_IPV4_DEFAULT_TTL, | 225 | .ctl_name = NET_IPV4_DEFAULT_TTL, |
@@ -227,8 +227,8 @@ static struct ctl_table ipv4_table[] = { | |||
227 | .data = &sysctl_ip_default_ttl, | 227 | .data = &sysctl_ip_default_ttl, |
228 | .maxlen = sizeof(int), | 228 | .maxlen = sizeof(int), |
229 | .mode = 0644, | 229 | .mode = 0644, |
230 | .proc_handler = &ipv4_doint_and_flush, | 230 | .proc_handler = ipv4_doint_and_flush, |
231 | .strategy = &ipv4_doint_and_flush_strategy, | 231 | .strategy = ipv4_doint_and_flush_strategy, |
232 | .extra2 = &init_net, | 232 | .extra2 = &init_net, |
233 | }, | 233 | }, |
234 | { | 234 | { |
@@ -237,7 +237,7 @@ static struct ctl_table ipv4_table[] = { | |||
237 | .data = &ipv4_config.no_pmtu_disc, | 237 | .data = &ipv4_config.no_pmtu_disc, |
238 | .maxlen = sizeof(int), | 238 | .maxlen = sizeof(int), |
239 | .mode = 0644, | 239 | .mode = 0644, |
240 | .proc_handler = &proc_dointvec | 240 | .proc_handler = proc_dointvec |
241 | }, | 241 | }, |
242 | { | 242 | { |
243 | .ctl_name = NET_IPV4_NONLOCAL_BIND, | 243 | .ctl_name = NET_IPV4_NONLOCAL_BIND, |
@@ -245,7 +245,7 @@ static struct ctl_table ipv4_table[] = { | |||
245 | .data = &sysctl_ip_nonlocal_bind, | 245 | .data = &sysctl_ip_nonlocal_bind, |
246 | .maxlen = sizeof(int), | 246 | .maxlen = sizeof(int), |
247 | .mode = 0644, | 247 | .mode = 0644, |
248 | .proc_handler = &proc_dointvec | 248 | .proc_handler = proc_dointvec |
249 | }, | 249 | }, |
250 | { | 250 | { |
251 | .ctl_name = NET_IPV4_TCP_SYN_RETRIES, | 251 | .ctl_name = NET_IPV4_TCP_SYN_RETRIES, |
@@ -253,7 +253,7 @@ static struct ctl_table ipv4_table[] = { | |||
253 | .data = &sysctl_tcp_syn_retries, | 253 | .data = &sysctl_tcp_syn_retries, |
254 | .maxlen = sizeof(int), | 254 | .maxlen = sizeof(int), |
255 | .mode = 0644, | 255 | .mode = 0644, |
256 | .proc_handler = &proc_dointvec | 256 | .proc_handler = proc_dointvec |
257 | }, | 257 | }, |
258 | { | 258 | { |
259 | .ctl_name = NET_TCP_SYNACK_RETRIES, | 259 | .ctl_name = NET_TCP_SYNACK_RETRIES, |
@@ -261,7 +261,7 @@ static struct ctl_table ipv4_table[] = { | |||
261 | .data = &sysctl_tcp_synack_retries, | 261 | .data = &sysctl_tcp_synack_retries, |
262 | .maxlen = sizeof(int), | 262 | .maxlen = sizeof(int), |
263 | .mode = 0644, | 263 | .mode = 0644, |
264 | .proc_handler = &proc_dointvec | 264 | .proc_handler = proc_dointvec |
265 | }, | 265 | }, |
266 | { | 266 | { |
267 | .ctl_name = NET_TCP_MAX_ORPHANS, | 267 | .ctl_name = NET_TCP_MAX_ORPHANS, |
@@ -269,7 +269,7 @@ static struct ctl_table ipv4_table[] = { | |||
269 | .data = &sysctl_tcp_max_orphans, | 269 | .data = &sysctl_tcp_max_orphans, |
270 | .maxlen = sizeof(int), | 270 | .maxlen = sizeof(int), |
271 | .mode = 0644, | 271 | .mode = 0644, |
272 | .proc_handler = &proc_dointvec | 272 | .proc_handler = proc_dointvec |
273 | }, | 273 | }, |
274 | { | 274 | { |
275 | .ctl_name = NET_TCP_MAX_TW_BUCKETS, | 275 | .ctl_name = NET_TCP_MAX_TW_BUCKETS, |
@@ -277,7 +277,7 @@ static struct ctl_table ipv4_table[] = { | |||
277 | .data = &tcp_death_row.sysctl_max_tw_buckets, | 277 | .data = &tcp_death_row.sysctl_max_tw_buckets, |
278 | .maxlen = sizeof(int), | 278 | .maxlen = sizeof(int), |
279 | .mode = 0644, | 279 | .mode = 0644, |
280 | .proc_handler = &proc_dointvec | 280 | .proc_handler = proc_dointvec |
281 | }, | 281 | }, |
282 | { | 282 | { |
283 | .ctl_name = NET_IPV4_DYNADDR, | 283 | .ctl_name = NET_IPV4_DYNADDR, |
@@ -285,7 +285,7 @@ static struct ctl_table ipv4_table[] = { | |||
285 | .data = &sysctl_ip_dynaddr, | 285 | .data = &sysctl_ip_dynaddr, |
286 | .maxlen = sizeof(int), | 286 | .maxlen = sizeof(int), |
287 | .mode = 0644, | 287 | .mode = 0644, |
288 | .proc_handler = &proc_dointvec | 288 | .proc_handler = proc_dointvec |
289 | }, | 289 | }, |
290 | { | 290 | { |
291 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_TIME, | 291 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_TIME, |
@@ -293,8 +293,8 @@ static struct ctl_table ipv4_table[] = { | |||
293 | .data = &sysctl_tcp_keepalive_time, | 293 | .data = &sysctl_tcp_keepalive_time, |
294 | .maxlen = sizeof(int), | 294 | .maxlen = sizeof(int), |
295 | .mode = 0644, | 295 | .mode = 0644, |
296 | .proc_handler = &proc_dointvec_jiffies, | 296 | .proc_handler = proc_dointvec_jiffies, |
297 | .strategy = &sysctl_jiffies | 297 | .strategy = sysctl_jiffies |
298 | }, | 298 | }, |
299 | { | 299 | { |
300 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_PROBES, | 300 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_PROBES, |
@@ -302,7 +302,7 @@ static struct ctl_table ipv4_table[] = { | |||
302 | .data = &sysctl_tcp_keepalive_probes, | 302 | .data = &sysctl_tcp_keepalive_probes, |
303 | .maxlen = sizeof(int), | 303 | .maxlen = sizeof(int), |
304 | .mode = 0644, | 304 | .mode = 0644, |
305 | .proc_handler = &proc_dointvec | 305 | .proc_handler = proc_dointvec |
306 | }, | 306 | }, |
307 | { | 307 | { |
308 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_INTVL, | 308 | .ctl_name = NET_IPV4_TCP_KEEPALIVE_INTVL, |
@@ -310,8 +310,8 @@ static struct ctl_table ipv4_table[] = { | |||
310 | .data = &sysctl_tcp_keepalive_intvl, | 310 | .data = &sysctl_tcp_keepalive_intvl, |
311 | .maxlen = sizeof(int), | 311 | .maxlen = sizeof(int), |
312 | .mode = 0644, | 312 | .mode = 0644, |
313 | .proc_handler = &proc_dointvec_jiffies, | 313 | .proc_handler = proc_dointvec_jiffies, |
314 | .strategy = &sysctl_jiffies | 314 | .strategy = sysctl_jiffies |
315 | }, | 315 | }, |
316 | { | 316 | { |
317 | .ctl_name = NET_IPV4_TCP_RETRIES1, | 317 | .ctl_name = NET_IPV4_TCP_RETRIES1, |
@@ -319,8 +319,8 @@ static struct ctl_table ipv4_table[] = { | |||
319 | .data = &sysctl_tcp_retries1, | 319 | .data = &sysctl_tcp_retries1, |
320 | .maxlen = sizeof(int), | 320 | .maxlen = sizeof(int), |
321 | .mode = 0644, | 321 | .mode = 0644, |
322 | .proc_handler = &proc_dointvec_minmax, | 322 | .proc_handler = proc_dointvec_minmax, |
323 | .strategy = &sysctl_intvec, | 323 | .strategy = sysctl_intvec, |
324 | .extra2 = &tcp_retr1_max | 324 | .extra2 = &tcp_retr1_max |
325 | }, | 325 | }, |
326 | { | 326 | { |
@@ -329,7 +329,7 @@ static struct ctl_table ipv4_table[] = { | |||
329 | .data = &sysctl_tcp_retries2, | 329 | .data = &sysctl_tcp_retries2, |
330 | .maxlen = sizeof(int), | 330 | .maxlen = sizeof(int), |
331 | .mode = 0644, | 331 | .mode = 0644, |
332 | .proc_handler = &proc_dointvec | 332 | .proc_handler = proc_dointvec |
333 | }, | 333 | }, |
334 | { | 334 | { |
335 | .ctl_name = NET_IPV4_TCP_FIN_TIMEOUT, | 335 | .ctl_name = NET_IPV4_TCP_FIN_TIMEOUT, |
@@ -337,8 +337,8 @@ static struct ctl_table ipv4_table[] = { | |||
337 | .data = &sysctl_tcp_fin_timeout, | 337 | .data = &sysctl_tcp_fin_timeout, |
338 | .maxlen = sizeof(int), | 338 | .maxlen = sizeof(int), |
339 | .mode = 0644, | 339 | .mode = 0644, |
340 | .proc_handler = &proc_dointvec_jiffies, | 340 | .proc_handler = proc_dointvec_jiffies, |
341 | .strategy = &sysctl_jiffies | 341 | .strategy = sysctl_jiffies |
342 | }, | 342 | }, |
343 | #ifdef CONFIG_SYN_COOKIES | 343 | #ifdef CONFIG_SYN_COOKIES |
344 | { | 344 | { |
@@ -347,7 +347,7 @@ static struct ctl_table ipv4_table[] = { | |||
347 | .data = &sysctl_tcp_syncookies, | 347 | .data = &sysctl_tcp_syncookies, |
348 | .maxlen = sizeof(int), | 348 | .maxlen = sizeof(int), |
349 | .mode = 0644, | 349 | .mode = 0644, |
350 | .proc_handler = &proc_dointvec | 350 | .proc_handler = proc_dointvec |
351 | }, | 351 | }, |
352 | #endif | 352 | #endif |
353 | { | 353 | { |
@@ -356,7 +356,7 @@ static struct ctl_table ipv4_table[] = { | |||
356 | .data = &tcp_death_row.sysctl_tw_recycle, | 356 | .data = &tcp_death_row.sysctl_tw_recycle, |
357 | .maxlen = sizeof(int), | 357 | .maxlen = sizeof(int), |
358 | .mode = 0644, | 358 | .mode = 0644, |
359 | .proc_handler = &proc_dointvec | 359 | .proc_handler = proc_dointvec |
360 | }, | 360 | }, |
361 | { | 361 | { |
362 | .ctl_name = NET_TCP_ABORT_ON_OVERFLOW, | 362 | .ctl_name = NET_TCP_ABORT_ON_OVERFLOW, |
@@ -364,7 +364,7 @@ static struct ctl_table ipv4_table[] = { | |||
364 | .data = &sysctl_tcp_abort_on_overflow, | 364 | .data = &sysctl_tcp_abort_on_overflow, |
365 | .maxlen = sizeof(int), | 365 | .maxlen = sizeof(int), |
366 | .mode = 0644, | 366 | .mode = 0644, |
367 | .proc_handler = &proc_dointvec | 367 | .proc_handler = proc_dointvec |
368 | }, | 368 | }, |
369 | { | 369 | { |
370 | .ctl_name = NET_TCP_STDURG, | 370 | .ctl_name = NET_TCP_STDURG, |
@@ -372,7 +372,7 @@ static struct ctl_table ipv4_table[] = { | |||
372 | .data = &sysctl_tcp_stdurg, | 372 | .data = &sysctl_tcp_stdurg, |
373 | .maxlen = sizeof(int), | 373 | .maxlen = sizeof(int), |
374 | .mode = 0644, | 374 | .mode = 0644, |
375 | .proc_handler = &proc_dointvec | 375 | .proc_handler = proc_dointvec |
376 | }, | 376 | }, |
377 | { | 377 | { |
378 | .ctl_name = NET_TCP_RFC1337, | 378 | .ctl_name = NET_TCP_RFC1337, |
@@ -380,7 +380,7 @@ static struct ctl_table ipv4_table[] = { | |||
380 | .data = &sysctl_tcp_rfc1337, | 380 | .data = &sysctl_tcp_rfc1337, |
381 | .maxlen = sizeof(int), | 381 | .maxlen = sizeof(int), |
382 | .mode = 0644, | 382 | .mode = 0644, |
383 | .proc_handler = &proc_dointvec | 383 | .proc_handler = proc_dointvec |
384 | }, | 384 | }, |
385 | { | 385 | { |
386 | .ctl_name = NET_TCP_MAX_SYN_BACKLOG, | 386 | .ctl_name = NET_TCP_MAX_SYN_BACKLOG, |
@@ -388,7 +388,7 @@ static struct ctl_table ipv4_table[] = { | |||
388 | .data = &sysctl_max_syn_backlog, | 388 | .data = &sysctl_max_syn_backlog, |
389 | .maxlen = sizeof(int), | 389 | .maxlen = sizeof(int), |
390 | .mode = 0644, | 390 | .mode = 0644, |
391 | .proc_handler = &proc_dointvec | 391 | .proc_handler = proc_dointvec |
392 | }, | 392 | }, |
393 | { | 393 | { |
394 | .ctl_name = NET_IPV4_LOCAL_PORT_RANGE, | 394 | .ctl_name = NET_IPV4_LOCAL_PORT_RANGE, |
@@ -396,8 +396,8 @@ static struct ctl_table ipv4_table[] = { | |||
396 | .data = &sysctl_local_ports.range, | 396 | .data = &sysctl_local_ports.range, |
397 | .maxlen = sizeof(sysctl_local_ports.range), | 397 | .maxlen = sizeof(sysctl_local_ports.range), |
398 | .mode = 0644, | 398 | .mode = 0644, |
399 | .proc_handler = &ipv4_local_port_range, | 399 | .proc_handler = ipv4_local_port_range, |
400 | .strategy = &ipv4_sysctl_local_port_range, | 400 | .strategy = ipv4_sysctl_local_port_range, |
401 | }, | 401 | }, |
402 | #ifdef CONFIG_IP_MULTICAST | 402 | #ifdef CONFIG_IP_MULTICAST |
403 | { | 403 | { |
@@ -406,7 +406,7 @@ static struct ctl_table ipv4_table[] = { | |||
406 | .data = &sysctl_igmp_max_memberships, | 406 | .data = &sysctl_igmp_max_memberships, |
407 | .maxlen = sizeof(int), | 407 | .maxlen = sizeof(int), |
408 | .mode = 0644, | 408 | .mode = 0644, |
409 | .proc_handler = &proc_dointvec | 409 | .proc_handler = proc_dointvec |
410 | }, | 410 | }, |
411 | 411 | ||
412 | #endif | 412 | #endif |
@@ -416,7 +416,7 @@ static struct ctl_table ipv4_table[] = { | |||
416 | .data = &sysctl_igmp_max_msf, | 416 | .data = &sysctl_igmp_max_msf, |
417 | .maxlen = sizeof(int), | 417 | .maxlen = sizeof(int), |
418 | .mode = 0644, | 418 | .mode = 0644, |
419 | .proc_handler = &proc_dointvec | 419 | .proc_handler = proc_dointvec |
420 | }, | 420 | }, |
421 | { | 421 | { |
422 | .ctl_name = NET_IPV4_INET_PEER_THRESHOLD, | 422 | .ctl_name = NET_IPV4_INET_PEER_THRESHOLD, |
@@ -424,7 +424,7 @@ static struct ctl_table ipv4_table[] = { | |||
424 | .data = &inet_peer_threshold, | 424 | .data = &inet_peer_threshold, |
425 | .maxlen = sizeof(int), | 425 | .maxlen = sizeof(int), |
426 | .mode = 0644, | 426 | .mode = 0644, |
427 | .proc_handler = &proc_dointvec | 427 | .proc_handler = proc_dointvec |
428 | }, | 428 | }, |
429 | { | 429 | { |
430 | .ctl_name = NET_IPV4_INET_PEER_MINTTL, | 430 | .ctl_name = NET_IPV4_INET_PEER_MINTTL, |
@@ -432,8 +432,8 @@ static struct ctl_table ipv4_table[] = { | |||
432 | .data = &inet_peer_minttl, | 432 | .data = &inet_peer_minttl, |
433 | .maxlen = sizeof(int), | 433 | .maxlen = sizeof(int), |
434 | .mode = 0644, | 434 | .mode = 0644, |
435 | .proc_handler = &proc_dointvec_jiffies, | 435 | .proc_handler = proc_dointvec_jiffies, |
436 | .strategy = &sysctl_jiffies | 436 | .strategy = sysctl_jiffies |
437 | }, | 437 | }, |
438 | { | 438 | { |
439 | .ctl_name = NET_IPV4_INET_PEER_MAXTTL, | 439 | .ctl_name = NET_IPV4_INET_PEER_MAXTTL, |
@@ -441,8 +441,8 @@ static struct ctl_table ipv4_table[] = { | |||
441 | .data = &inet_peer_maxttl, | 441 | .data = &inet_peer_maxttl, |
442 | .maxlen = sizeof(int), | 442 | .maxlen = sizeof(int), |
443 | .mode = 0644, | 443 | .mode = 0644, |
444 | .proc_handler = &proc_dointvec_jiffies, | 444 | .proc_handler = proc_dointvec_jiffies, |
445 | .strategy = &sysctl_jiffies | 445 | .strategy = sysctl_jiffies |
446 | }, | 446 | }, |
447 | { | 447 | { |
448 | .ctl_name = NET_IPV4_INET_PEER_GC_MINTIME, | 448 | .ctl_name = NET_IPV4_INET_PEER_GC_MINTIME, |
@@ -450,8 +450,8 @@ static struct ctl_table ipv4_table[] = { | |||
450 | .data = &inet_peer_gc_mintime, | 450 | .data = &inet_peer_gc_mintime, |
451 | .maxlen = sizeof(int), | 451 | .maxlen = sizeof(int), |
452 | .mode = 0644, | 452 | .mode = 0644, |
453 | .proc_handler = &proc_dointvec_jiffies, | 453 | .proc_handler = proc_dointvec_jiffies, |
454 | .strategy = &sysctl_jiffies | 454 | .strategy = sysctl_jiffies |
455 | }, | 455 | }, |
456 | { | 456 | { |
457 | .ctl_name = NET_IPV4_INET_PEER_GC_MAXTIME, | 457 | .ctl_name = NET_IPV4_INET_PEER_GC_MAXTIME, |
@@ -459,8 +459,8 @@ static struct ctl_table ipv4_table[] = { | |||
459 | .data = &inet_peer_gc_maxtime, | 459 | .data = &inet_peer_gc_maxtime, |
460 | .maxlen = sizeof(int), | 460 | .maxlen = sizeof(int), |
461 | .mode = 0644, | 461 | .mode = 0644, |
462 | .proc_handler = &proc_dointvec_jiffies, | 462 | .proc_handler = proc_dointvec_jiffies, |
463 | .strategy = &sysctl_jiffies | 463 | .strategy = sysctl_jiffies |
464 | }, | 464 | }, |
465 | { | 465 | { |
466 | .ctl_name = NET_TCP_ORPHAN_RETRIES, | 466 | .ctl_name = NET_TCP_ORPHAN_RETRIES, |
@@ -468,7 +468,7 @@ static struct ctl_table ipv4_table[] = { | |||
468 | .data = &sysctl_tcp_orphan_retries, | 468 | .data = &sysctl_tcp_orphan_retries, |
469 | .maxlen = sizeof(int), | 469 | .maxlen = sizeof(int), |
470 | .mode = 0644, | 470 | .mode = 0644, |
471 | .proc_handler = &proc_dointvec | 471 | .proc_handler = proc_dointvec |
472 | }, | 472 | }, |
473 | { | 473 | { |
474 | .ctl_name = NET_TCP_FACK, | 474 | .ctl_name = NET_TCP_FACK, |
@@ -476,7 +476,7 @@ static struct ctl_table ipv4_table[] = { | |||
476 | .data = &sysctl_tcp_fack, | 476 | .data = &sysctl_tcp_fack, |
477 | .maxlen = sizeof(int), | 477 | .maxlen = sizeof(int), |
478 | .mode = 0644, | 478 | .mode = 0644, |
479 | .proc_handler = &proc_dointvec | 479 | .proc_handler = proc_dointvec |
480 | }, | 480 | }, |
481 | { | 481 | { |
482 | .ctl_name = NET_TCP_REORDERING, | 482 | .ctl_name = NET_TCP_REORDERING, |
@@ -484,7 +484,7 @@ static struct ctl_table ipv4_table[] = { | |||
484 | .data = &sysctl_tcp_reordering, | 484 | .data = &sysctl_tcp_reordering, |
485 | .maxlen = sizeof(int), | 485 | .maxlen = sizeof(int), |
486 | .mode = 0644, | 486 | .mode = 0644, |
487 | .proc_handler = &proc_dointvec | 487 | .proc_handler = proc_dointvec |
488 | }, | 488 | }, |
489 | { | 489 | { |
490 | .ctl_name = NET_TCP_ECN, | 490 | .ctl_name = NET_TCP_ECN, |
@@ -492,7 +492,7 @@ static struct ctl_table ipv4_table[] = { | |||
492 | .data = &sysctl_tcp_ecn, | 492 | .data = &sysctl_tcp_ecn, |
493 | .maxlen = sizeof(int), | 493 | .maxlen = sizeof(int), |
494 | .mode = 0644, | 494 | .mode = 0644, |
495 | .proc_handler = &proc_dointvec | 495 | .proc_handler = proc_dointvec |
496 | }, | 496 | }, |
497 | { | 497 | { |
498 | .ctl_name = NET_TCP_DSACK, | 498 | .ctl_name = NET_TCP_DSACK, |
@@ -500,7 +500,7 @@ static struct ctl_table ipv4_table[] = { | |||
500 | .data = &sysctl_tcp_dsack, | 500 | .data = &sysctl_tcp_dsack, |
501 | .maxlen = sizeof(int), | 501 | .maxlen = sizeof(int), |
502 | .mode = 0644, | 502 | .mode = 0644, |
503 | .proc_handler = &proc_dointvec | 503 | .proc_handler = proc_dointvec |
504 | }, | 504 | }, |
505 | { | 505 | { |
506 | .ctl_name = NET_TCP_MEM, | 506 | .ctl_name = NET_TCP_MEM, |
@@ -508,7 +508,7 @@ static struct ctl_table ipv4_table[] = { | |||
508 | .data = &sysctl_tcp_mem, | 508 | .data = &sysctl_tcp_mem, |
509 | .maxlen = sizeof(sysctl_tcp_mem), | 509 | .maxlen = sizeof(sysctl_tcp_mem), |
510 | .mode = 0644, | 510 | .mode = 0644, |
511 | .proc_handler = &proc_dointvec | 511 | .proc_handler = proc_dointvec |
512 | }, | 512 | }, |
513 | { | 513 | { |
514 | .ctl_name = NET_TCP_WMEM, | 514 | .ctl_name = NET_TCP_WMEM, |
@@ -516,7 +516,7 @@ static struct ctl_table ipv4_table[] = { | |||
516 | .data = &sysctl_tcp_wmem, | 516 | .data = &sysctl_tcp_wmem, |
517 | .maxlen = sizeof(sysctl_tcp_wmem), | 517 | .maxlen = sizeof(sysctl_tcp_wmem), |
518 | .mode = 0644, | 518 | .mode = 0644, |
519 | .proc_handler = &proc_dointvec | 519 | .proc_handler = proc_dointvec |
520 | }, | 520 | }, |
521 | { | 521 | { |
522 | .ctl_name = NET_TCP_RMEM, | 522 | .ctl_name = NET_TCP_RMEM, |
@@ -524,7 +524,7 @@ static struct ctl_table ipv4_table[] = { | |||
524 | .data = &sysctl_tcp_rmem, | 524 | .data = &sysctl_tcp_rmem, |
525 | .maxlen = sizeof(sysctl_tcp_rmem), | 525 | .maxlen = sizeof(sysctl_tcp_rmem), |
526 | .mode = 0644, | 526 | .mode = 0644, |
527 | .proc_handler = &proc_dointvec | 527 | .proc_handler = proc_dointvec |
528 | }, | 528 | }, |
529 | { | 529 | { |
530 | .ctl_name = NET_TCP_APP_WIN, | 530 | .ctl_name = NET_TCP_APP_WIN, |
@@ -532,7 +532,7 @@ static struct ctl_table ipv4_table[] = { | |||
532 | .data = &sysctl_tcp_app_win, | 532 | .data = &sysctl_tcp_app_win, |
533 | .maxlen = sizeof(int), | 533 | .maxlen = sizeof(int), |
534 | .mode = 0644, | 534 | .mode = 0644, |
535 | .proc_handler = &proc_dointvec | 535 | .proc_handler = proc_dointvec |
536 | }, | 536 | }, |
537 | { | 537 | { |
538 | .ctl_name = NET_TCP_ADV_WIN_SCALE, | 538 | .ctl_name = NET_TCP_ADV_WIN_SCALE, |
@@ -540,7 +540,7 @@ static struct ctl_table ipv4_table[] = { | |||
540 | .data = &sysctl_tcp_adv_win_scale, | 540 | .data = &sysctl_tcp_adv_win_scale, |
541 | .maxlen = sizeof(int), | 541 | .maxlen = sizeof(int), |
542 | .mode = 0644, | 542 | .mode = 0644, |
543 | .proc_handler = &proc_dointvec | 543 | .proc_handler = proc_dointvec |
544 | }, | 544 | }, |
545 | { | 545 | { |
546 | .ctl_name = NET_TCP_TW_REUSE, | 546 | .ctl_name = NET_TCP_TW_REUSE, |
@@ -548,7 +548,7 @@ static struct ctl_table ipv4_table[] = { | |||
548 | .data = &sysctl_tcp_tw_reuse, | 548 | .data = &sysctl_tcp_tw_reuse, |
549 | .maxlen = sizeof(int), | 549 | .maxlen = sizeof(int), |
550 | .mode = 0644, | 550 | .mode = 0644, |
551 | .proc_handler = &proc_dointvec | 551 | .proc_handler = proc_dointvec |
552 | }, | 552 | }, |
553 | { | 553 | { |
554 | .ctl_name = NET_TCP_FRTO, | 554 | .ctl_name = NET_TCP_FRTO, |
@@ -556,7 +556,7 @@ static struct ctl_table ipv4_table[] = { | |||
556 | .data = &sysctl_tcp_frto, | 556 | .data = &sysctl_tcp_frto, |
557 | .maxlen = sizeof(int), | 557 | .maxlen = sizeof(int), |
558 | .mode = 0644, | 558 | .mode = 0644, |
559 | .proc_handler = &proc_dointvec | 559 | .proc_handler = proc_dointvec |
560 | }, | 560 | }, |
561 | { | 561 | { |
562 | .ctl_name = NET_TCP_FRTO_RESPONSE, | 562 | .ctl_name = NET_TCP_FRTO_RESPONSE, |
@@ -564,7 +564,7 @@ static struct ctl_table ipv4_table[] = { | |||
564 | .data = &sysctl_tcp_frto_response, | 564 | .data = &sysctl_tcp_frto_response, |
565 | .maxlen = sizeof(int), | 565 | .maxlen = sizeof(int), |
566 | .mode = 0644, | 566 | .mode = 0644, |
567 | .proc_handler = &proc_dointvec | 567 | .proc_handler = proc_dointvec |
568 | }, | 568 | }, |
569 | { | 569 | { |
570 | .ctl_name = NET_TCP_LOW_LATENCY, | 570 | .ctl_name = NET_TCP_LOW_LATENCY, |
@@ -572,7 +572,7 @@ static struct ctl_table ipv4_table[] = { | |||
572 | .data = &sysctl_tcp_low_latency, | 572 | .data = &sysctl_tcp_low_latency, |
573 | .maxlen = sizeof(int), | 573 | .maxlen = sizeof(int), |
574 | .mode = 0644, | 574 | .mode = 0644, |
575 | .proc_handler = &proc_dointvec | 575 | .proc_handler = proc_dointvec |
576 | }, | 576 | }, |
577 | { | 577 | { |
578 | .ctl_name = NET_TCP_NO_METRICS_SAVE, | 578 | .ctl_name = NET_TCP_NO_METRICS_SAVE, |
@@ -580,7 +580,7 @@ static struct ctl_table ipv4_table[] = { | |||
580 | .data = &sysctl_tcp_nometrics_save, | 580 | .data = &sysctl_tcp_nometrics_save, |
581 | .maxlen = sizeof(int), | 581 | .maxlen = sizeof(int), |
582 | .mode = 0644, | 582 | .mode = 0644, |
583 | .proc_handler = &proc_dointvec, | 583 | .proc_handler = proc_dointvec, |
584 | }, | 584 | }, |
585 | { | 585 | { |
586 | .ctl_name = NET_TCP_MODERATE_RCVBUF, | 586 | .ctl_name = NET_TCP_MODERATE_RCVBUF, |
@@ -588,7 +588,7 @@ static struct ctl_table ipv4_table[] = { | |||
588 | .data = &sysctl_tcp_moderate_rcvbuf, | 588 | .data = &sysctl_tcp_moderate_rcvbuf, |
589 | .maxlen = sizeof(int), | 589 | .maxlen = sizeof(int), |
590 | .mode = 0644, | 590 | .mode = 0644, |
591 | .proc_handler = &proc_dointvec, | 591 | .proc_handler = proc_dointvec, |
592 | }, | 592 | }, |
593 | { | 593 | { |
594 | .ctl_name = NET_TCP_TSO_WIN_DIVISOR, | 594 | .ctl_name = NET_TCP_TSO_WIN_DIVISOR, |
@@ -596,15 +596,15 @@ static struct ctl_table ipv4_table[] = { | |||
596 | .data = &sysctl_tcp_tso_win_divisor, | 596 | .data = &sysctl_tcp_tso_win_divisor, |
597 | .maxlen = sizeof(int), | 597 | .maxlen = sizeof(int), |
598 | .mode = 0644, | 598 | .mode = 0644, |
599 | .proc_handler = &proc_dointvec, | 599 | .proc_handler = proc_dointvec, |
600 | }, | 600 | }, |
601 | { | 601 | { |
602 | .ctl_name = NET_TCP_CONG_CONTROL, | 602 | .ctl_name = NET_TCP_CONG_CONTROL, |
603 | .procname = "tcp_congestion_control", | 603 | .procname = "tcp_congestion_control", |
604 | .mode = 0644, | 604 | .mode = 0644, |
605 | .maxlen = TCP_CA_NAME_MAX, | 605 | .maxlen = TCP_CA_NAME_MAX, |
606 | .proc_handler = &proc_tcp_congestion_control, | 606 | .proc_handler = proc_tcp_congestion_control, |
607 | .strategy = &sysctl_tcp_congestion_control, | 607 | .strategy = sysctl_tcp_congestion_control, |
608 | }, | 608 | }, |
609 | { | 609 | { |
610 | .ctl_name = NET_TCP_ABC, | 610 | .ctl_name = NET_TCP_ABC, |
@@ -612,7 +612,7 @@ static struct ctl_table ipv4_table[] = { | |||
612 | .data = &sysctl_tcp_abc, | 612 | .data = &sysctl_tcp_abc, |
613 | .maxlen = sizeof(int), | 613 | .maxlen = sizeof(int), |
614 | .mode = 0644, | 614 | .mode = 0644, |
615 | .proc_handler = &proc_dointvec, | 615 | .proc_handler = proc_dointvec, |
616 | }, | 616 | }, |
617 | { | 617 | { |
618 | .ctl_name = NET_TCP_MTU_PROBING, | 618 | .ctl_name = NET_TCP_MTU_PROBING, |
@@ -620,7 +620,7 @@ static struct ctl_table ipv4_table[] = { | |||
620 | .data = &sysctl_tcp_mtu_probing, | 620 | .data = &sysctl_tcp_mtu_probing, |
621 | .maxlen = sizeof(int), | 621 | .maxlen = sizeof(int), |
622 | .mode = 0644, | 622 | .mode = 0644, |
623 | .proc_handler = &proc_dointvec, | 623 | .proc_handler = proc_dointvec, |
624 | }, | 624 | }, |
625 | { | 625 | { |
626 | .ctl_name = NET_TCP_BASE_MSS, | 626 | .ctl_name = NET_TCP_BASE_MSS, |
@@ -628,7 +628,7 @@ static struct ctl_table ipv4_table[] = { | |||
628 | .data = &sysctl_tcp_base_mss, | 628 | .data = &sysctl_tcp_base_mss, |
629 | .maxlen = sizeof(int), | 629 | .maxlen = sizeof(int), |
630 | .mode = 0644, | 630 | .mode = 0644, |
631 | .proc_handler = &proc_dointvec, | 631 | .proc_handler = proc_dointvec, |
632 | }, | 632 | }, |
633 | { | 633 | { |
634 | .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, | 634 | .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, |
@@ -636,7 +636,7 @@ static struct ctl_table ipv4_table[] = { | |||
636 | .data = &sysctl_tcp_workaround_signed_windows, | 636 | .data = &sysctl_tcp_workaround_signed_windows, |
637 | .maxlen = sizeof(int), | 637 | .maxlen = sizeof(int), |
638 | .mode = 0644, | 638 | .mode = 0644, |
639 | .proc_handler = &proc_dointvec | 639 | .proc_handler = proc_dointvec |
640 | }, | 640 | }, |
641 | #ifdef CONFIG_NET_DMA | 641 | #ifdef CONFIG_NET_DMA |
642 | { | 642 | { |
@@ -645,7 +645,7 @@ static struct ctl_table ipv4_table[] = { | |||
645 | .data = &sysctl_tcp_dma_copybreak, | 645 | .data = &sysctl_tcp_dma_copybreak, |
646 | .maxlen = sizeof(int), | 646 | .maxlen = sizeof(int), |
647 | .mode = 0644, | 647 | .mode = 0644, |
648 | .proc_handler = &proc_dointvec | 648 | .proc_handler = proc_dointvec |
649 | }, | 649 | }, |
650 | #endif | 650 | #endif |
651 | { | 651 | { |
@@ -654,7 +654,7 @@ static struct ctl_table ipv4_table[] = { | |||
654 | .data = &sysctl_tcp_slow_start_after_idle, | 654 | .data = &sysctl_tcp_slow_start_after_idle, |
655 | .maxlen = sizeof(int), | 655 | .maxlen = sizeof(int), |
656 | .mode = 0644, | 656 | .mode = 0644, |
657 | .proc_handler = &proc_dointvec | 657 | .proc_handler = proc_dointvec |
658 | }, | 658 | }, |
659 | #ifdef CONFIG_NETLABEL | 659 | #ifdef CONFIG_NETLABEL |
660 | { | 660 | { |
@@ -663,7 +663,7 @@ static struct ctl_table ipv4_table[] = { | |||
663 | .data = &cipso_v4_cache_enabled, | 663 | .data = &cipso_v4_cache_enabled, |
664 | .maxlen = sizeof(int), | 664 | .maxlen = sizeof(int), |
665 | .mode = 0644, | 665 | .mode = 0644, |
666 | .proc_handler = &proc_dointvec, | 666 | .proc_handler = proc_dointvec, |
667 | }, | 667 | }, |
668 | { | 668 | { |
669 | .ctl_name = NET_CIPSOV4_CACHE_BUCKET_SIZE, | 669 | .ctl_name = NET_CIPSOV4_CACHE_BUCKET_SIZE, |
@@ -671,7 +671,7 @@ static struct ctl_table ipv4_table[] = { | |||
671 | .data = &cipso_v4_cache_bucketsize, | 671 | .data = &cipso_v4_cache_bucketsize, |
672 | .maxlen = sizeof(int), | 672 | .maxlen = sizeof(int), |
673 | .mode = 0644, | 673 | .mode = 0644, |
674 | .proc_handler = &proc_dointvec, | 674 | .proc_handler = proc_dointvec, |
675 | }, | 675 | }, |
676 | { | 676 | { |
677 | .ctl_name = NET_CIPSOV4_RBM_OPTFMT, | 677 | .ctl_name = NET_CIPSOV4_RBM_OPTFMT, |
@@ -679,7 +679,7 @@ static struct ctl_table ipv4_table[] = { | |||
679 | .data = &cipso_v4_rbm_optfmt, | 679 | .data = &cipso_v4_rbm_optfmt, |
680 | .maxlen = sizeof(int), | 680 | .maxlen = sizeof(int), |
681 | .mode = 0644, | 681 | .mode = 0644, |
682 | .proc_handler = &proc_dointvec, | 682 | .proc_handler = proc_dointvec, |
683 | }, | 683 | }, |
684 | { | 684 | { |
685 | .ctl_name = NET_CIPSOV4_RBM_STRICTVALID, | 685 | .ctl_name = NET_CIPSOV4_RBM_STRICTVALID, |
@@ -687,22 +687,22 @@ static struct ctl_table ipv4_table[] = { | |||
687 | .data = &cipso_v4_rbm_strictvalid, | 687 | .data = &cipso_v4_rbm_strictvalid, |
688 | .maxlen = sizeof(int), | 688 | .maxlen = sizeof(int), |
689 | .mode = 0644, | 689 | .mode = 0644, |
690 | .proc_handler = &proc_dointvec, | 690 | .proc_handler = proc_dointvec, |
691 | }, | 691 | }, |
692 | #endif /* CONFIG_NETLABEL */ | 692 | #endif /* CONFIG_NETLABEL */ |
693 | { | 693 | { |
694 | .procname = "tcp_available_congestion_control", | 694 | .procname = "tcp_available_congestion_control", |
695 | .maxlen = TCP_CA_BUF_MAX, | 695 | .maxlen = TCP_CA_BUF_MAX, |
696 | .mode = 0444, | 696 | .mode = 0444, |
697 | .proc_handler = &proc_tcp_available_congestion_control, | 697 | .proc_handler = proc_tcp_available_congestion_control, |
698 | }, | 698 | }, |
699 | { | 699 | { |
700 | .ctl_name = NET_TCP_ALLOWED_CONG_CONTROL, | 700 | .ctl_name = NET_TCP_ALLOWED_CONG_CONTROL, |
701 | .procname = "tcp_allowed_congestion_control", | 701 | .procname = "tcp_allowed_congestion_control", |
702 | .maxlen = TCP_CA_BUF_MAX, | 702 | .maxlen = TCP_CA_BUF_MAX, |
703 | .mode = 0644, | 703 | .mode = 0644, |
704 | .proc_handler = &proc_allowed_congestion_control, | 704 | .proc_handler = proc_allowed_congestion_control, |
705 | .strategy = &strategy_allowed_congestion_control, | 705 | .strategy = strategy_allowed_congestion_control, |
706 | }, | 706 | }, |
707 | { | 707 | { |
708 | .ctl_name = NET_TCP_MAX_SSTHRESH, | 708 | .ctl_name = NET_TCP_MAX_SSTHRESH, |
@@ -710,7 +710,7 @@ static struct ctl_table ipv4_table[] = { | |||
710 | .data = &sysctl_tcp_max_ssthresh, | 710 | .data = &sysctl_tcp_max_ssthresh, |
711 | .maxlen = sizeof(int), | 711 | .maxlen = sizeof(int), |
712 | .mode = 0644, | 712 | .mode = 0644, |
713 | .proc_handler = &proc_dointvec, | 713 | .proc_handler = proc_dointvec, |
714 | }, | 714 | }, |
715 | { | 715 | { |
716 | .ctl_name = CTL_UNNUMBERED, | 716 | .ctl_name = CTL_UNNUMBERED, |
@@ -718,8 +718,8 @@ static struct ctl_table ipv4_table[] = { | |||
718 | .data = &sysctl_udp_mem, | 718 | .data = &sysctl_udp_mem, |
719 | .maxlen = sizeof(sysctl_udp_mem), | 719 | .maxlen = sizeof(sysctl_udp_mem), |
720 | .mode = 0644, | 720 | .mode = 0644, |
721 | .proc_handler = &proc_dointvec_minmax, | 721 | .proc_handler = proc_dointvec_minmax, |
722 | .strategy = &sysctl_intvec, | 722 | .strategy = sysctl_intvec, |
723 | .extra1 = &zero | 723 | .extra1 = &zero |
724 | }, | 724 | }, |
725 | { | 725 | { |
@@ -728,8 +728,8 @@ static struct ctl_table ipv4_table[] = { | |||
728 | .data = &sysctl_udp_rmem_min, | 728 | .data = &sysctl_udp_rmem_min, |
729 | .maxlen = sizeof(sysctl_udp_rmem_min), | 729 | .maxlen = sizeof(sysctl_udp_rmem_min), |
730 | .mode = 0644, | 730 | .mode = 0644, |
731 | .proc_handler = &proc_dointvec_minmax, | 731 | .proc_handler = proc_dointvec_minmax, |
732 | .strategy = &sysctl_intvec, | 732 | .strategy = sysctl_intvec, |
733 | .extra1 = &zero | 733 | .extra1 = &zero |
734 | }, | 734 | }, |
735 | { | 735 | { |
@@ -738,8 +738,8 @@ static struct ctl_table ipv4_table[] = { | |||
738 | .data = &sysctl_udp_wmem_min, | 738 | .data = &sysctl_udp_wmem_min, |
739 | .maxlen = sizeof(sysctl_udp_wmem_min), | 739 | .maxlen = sizeof(sysctl_udp_wmem_min), |
740 | .mode = 0644, | 740 | .mode = 0644, |
741 | .proc_handler = &proc_dointvec_minmax, | 741 | .proc_handler = proc_dointvec_minmax, |
742 | .strategy = &sysctl_intvec, | 742 | .strategy = sysctl_intvec, |
743 | .extra1 = &zero | 743 | .extra1 = &zero |
744 | }, | 744 | }, |
745 | { .ctl_name = 0 } | 745 | { .ctl_name = 0 } |
@@ -752,7 +752,7 @@ static struct ctl_table ipv4_net_table[] = { | |||
752 | .data = &init_net.ipv4.sysctl_icmp_echo_ignore_all, | 752 | .data = &init_net.ipv4.sysctl_icmp_echo_ignore_all, |
753 | .maxlen = sizeof(int), | 753 | .maxlen = sizeof(int), |
754 | .mode = 0644, | 754 | .mode = 0644, |
755 | .proc_handler = &proc_dointvec | 755 | .proc_handler = proc_dointvec |
756 | }, | 756 | }, |
757 | { | 757 | { |
758 | .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, | 758 | .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, |
@@ -760,7 +760,7 @@ static struct ctl_table ipv4_net_table[] = { | |||
760 | .data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts, | 760 | .data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts, |
761 | .maxlen = sizeof(int), | 761 | .maxlen = sizeof(int), |
762 | .mode = 0644, | 762 | .mode = 0644, |
763 | .proc_handler = &proc_dointvec | 763 | .proc_handler = proc_dointvec |
764 | }, | 764 | }, |
765 | { | 765 | { |
766 | .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, | 766 | .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, |
@@ -768,7 +768,7 @@ static struct ctl_table ipv4_net_table[] = { | |||
768 | .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, | 768 | .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, |
769 | .maxlen = sizeof(int), | 769 | .maxlen = sizeof(int), |
770 | .mode = 0644, | 770 | .mode = 0644, |
771 | .proc_handler = &proc_dointvec | 771 | .proc_handler = proc_dointvec |
772 | }, | 772 | }, |
773 | { | 773 | { |
774 | .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, | 774 | .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, |
@@ -776,7 +776,7 @@ static struct ctl_table ipv4_net_table[] = { | |||
776 | .data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr, | 776 | .data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr, |
777 | .maxlen = sizeof(int), | 777 | .maxlen = sizeof(int), |
778 | .mode = 0644, | 778 | .mode = 0644, |
779 | .proc_handler = &proc_dointvec | 779 | .proc_handler = proc_dointvec |
780 | }, | 780 | }, |
781 | { | 781 | { |
782 | .ctl_name = NET_IPV4_ICMP_RATELIMIT, | 782 | .ctl_name = NET_IPV4_ICMP_RATELIMIT, |
@@ -784,8 +784,8 @@ static struct ctl_table ipv4_net_table[] = { | |||
784 | .data = &init_net.ipv4.sysctl_icmp_ratelimit, | 784 | .data = &init_net.ipv4.sysctl_icmp_ratelimit, |
785 | .maxlen = sizeof(int), | 785 | .maxlen = sizeof(int), |
786 | .mode = 0644, | 786 | .mode = 0644, |
787 | .proc_handler = &proc_dointvec_ms_jiffies, | 787 | .proc_handler = proc_dointvec_ms_jiffies, |
788 | .strategy = &sysctl_ms_jiffies | 788 | .strategy = sysctl_ms_jiffies |
789 | }, | 789 | }, |
790 | { | 790 | { |
791 | .ctl_name = NET_IPV4_ICMP_RATEMASK, | 791 | .ctl_name = NET_IPV4_ICMP_RATEMASK, |
@@ -793,7 +793,15 @@ static struct ctl_table ipv4_net_table[] = { | |||
793 | .data = &init_net.ipv4.sysctl_icmp_ratemask, | 793 | .data = &init_net.ipv4.sysctl_icmp_ratemask, |
794 | .maxlen = sizeof(int), | 794 | .maxlen = sizeof(int), |
795 | .mode = 0644, | 795 | .mode = 0644, |
796 | .proc_handler = &proc_dointvec | 796 | .proc_handler = proc_dointvec |
797 | }, | ||
798 | { | ||
799 | .ctl_name = CTL_UNNUMBERED, | ||
800 | .procname = "rt_cache_rebuild_count", | ||
801 | .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count, | ||
802 | .maxlen = sizeof(int), | ||
803 | .mode = 0644, | ||
804 | .proc_handler = proc_dointvec | ||
797 | }, | 805 | }, |
798 | { } | 806 | { } |
799 | }; | 807 | }; |
@@ -827,8 +835,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
827 | &net->ipv4.sysctl_icmp_ratelimit; | 835 | &net->ipv4.sysctl_icmp_ratelimit; |
828 | table[5].data = | 836 | table[5].data = |
829 | &net->ipv4.sysctl_icmp_ratemask; | 837 | &net->ipv4.sysctl_icmp_ratemask; |
838 | table[6].data = | ||
839 | &net->ipv4.sysctl_rt_cache_rebuild_count; | ||
830 | } | 840 | } |
831 | 841 | ||
842 | net->ipv4.sysctl_rt_cache_rebuild_count = 4; | ||
843 | |||
832 | net->ipv4.ipv4_hdr = register_net_sysctl_table(net, | 844 | net->ipv4.ipv4_hdr = register_net_sysctl_table(net, |
833 | net_ipv4_ctl_path, table); | 845 | net_ipv4_ctl_path, table); |
834 | if (net->ipv4.ipv4_hdr == NULL) | 846 | if (net->ipv4.ipv4_hdr == NULL) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c5aca0bb116a..f60a5917e54d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1680,7 +1680,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
1680 | inet_put_port(sk); | 1680 | inet_put_port(sk); |
1681 | /* fall through */ | 1681 | /* fall through */ |
1682 | default: | 1682 | default: |
1683 | if (oldstate==TCP_ESTABLISHED) | 1683 | if (oldstate == TCP_ESTABLISHED) |
1684 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); | 1684 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1685 | } | 1685 | } |
1686 | 1686 | ||
@@ -1690,7 +1690,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
1690 | sk->sk_state = state; | 1690 | sk->sk_state = state; |
1691 | 1691 | ||
1692 | #ifdef STATE_TRACE | 1692 | #ifdef STATE_TRACE |
1693 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); | 1693 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); |
1694 | #endif | 1694 | #endif |
1695 | } | 1695 | } |
1696 | EXPORT_SYMBOL_GPL(tcp_set_state); | 1696 | EXPORT_SYMBOL_GPL(tcp_set_state); |
@@ -2650,7 +2650,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key); | |||
2650 | 2650 | ||
2651 | void tcp_done(struct sock *sk) | 2651 | void tcp_done(struct sock *sk) |
2652 | { | 2652 | { |
2653 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | 2653 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
2654 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); | 2654 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
2655 | 2655 | ||
2656 | tcp_set_state(sk, TCP_CLOSE); | 2656 | tcp_set_state(sk, TCP_CLOSE); |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4a1221e5e8ee..ee467ec40c4f 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -1,13 +1,23 @@ | |||
1 | /* | 1 | /* |
2 | * TCP CUBIC: Binary Increase Congestion control for TCP v2.2 | 2 | * TCP CUBIC: Binary Increase Congestion control for TCP v2.3 |
3 | * Home page: | 3 | * Home page: |
4 | * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC | 4 | * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC |
5 | * This is from the implementation of CUBIC TCP in | 5 | * This is from the implementation of CUBIC TCP in |
6 | * Injong Rhee, Lisong Xu. | 6 | * Sangtae Ha, Injong Rhee and Lisong Xu, |
7 | * "CUBIC: A New TCP-Friendly High-Speed TCP Variant | 7 | * "CUBIC: A New TCP-Friendly High-Speed TCP Variant" |
8 | * in PFLDnet 2005 | 8 | * in ACM SIGOPS Operating System Review, July 2008. |
9 | * Available from: | 9 | * Available from: |
10 | * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf | 10 | * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf |
11 | * | ||
12 | * CUBIC integrates a new slow start algorithm, called HyStart. | ||
13 | * The details of HyStart are presented in | ||
14 | * Sangtae Ha and Injong Rhee, | ||
15 | * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008. | ||
16 | * Available from: | ||
17 | * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf | ||
18 | * | ||
19 | * All testing results are available from: | ||
20 | * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing | ||
11 | * | 21 | * |
12 | * Unless CUBIC is enabled and congestion window is large | 22 | * Unless CUBIC is enabled and congestion window is large |
13 | * this behaves the same as the original Reno. | 23 | * this behaves the same as the original Reno. |
@@ -23,12 +33,26 @@ | |||
23 | */ | 33 | */ |
24 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ | 34 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ |
25 | 35 | ||
36 | /* Two methods of hybrid slow start */ | ||
37 | #define HYSTART_ACK_TRAIN 0x1 | ||
38 | #define HYSTART_DELAY 0x2 | ||
39 | |||
40 | /* Number of delay samples for detecting the increase of delay */ | ||
41 | #define HYSTART_MIN_SAMPLES 8 | ||
42 | #define HYSTART_DELAY_MIN (2U<<3) | ||
43 | #define HYSTART_DELAY_MAX (16U<<3) | ||
44 | #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) | ||
45 | |||
26 | static int fast_convergence __read_mostly = 1; | 46 | static int fast_convergence __read_mostly = 1; |
27 | static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ | 47 | static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ |
28 | static int initial_ssthresh __read_mostly; | 48 | static int initial_ssthresh __read_mostly; |
29 | static int bic_scale __read_mostly = 41; | 49 | static int bic_scale __read_mostly = 41; |
30 | static int tcp_friendliness __read_mostly = 1; | 50 | static int tcp_friendliness __read_mostly = 1; |
31 | 51 | ||
52 | static int hystart __read_mostly = 1; | ||
53 | static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY; | ||
54 | static int hystart_low_window __read_mostly = 16; | ||
55 | |||
32 | static u32 cube_rtt_scale __read_mostly; | 56 | static u32 cube_rtt_scale __read_mostly; |
33 | static u32 beta_scale __read_mostly; | 57 | static u32 beta_scale __read_mostly; |
34 | static u64 cube_factor __read_mostly; | 58 | static u64 cube_factor __read_mostly; |
@@ -44,6 +68,13 @@ module_param(bic_scale, int, 0444); | |||
44 | MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)"); | 68 | MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)"); |
45 | module_param(tcp_friendliness, int, 0644); | 69 | module_param(tcp_friendliness, int, 0644); |
46 | MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); | 70 | MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); |
71 | module_param(hystart, int, 0644); | ||
72 | MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm"); | ||
73 | module_param(hystart_detect, int, 0644); | ||
74 | MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms" | ||
75 | " 1: packet-train 2: delay 3: both packet-train and delay"); | ||
76 | module_param(hystart_low_window, int, 0644); | ||
77 | MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); | ||
47 | 78 | ||
48 | /* BIC TCP Parameters */ | 79 | /* BIC TCP Parameters */ |
49 | struct bictcp { | 80 | struct bictcp { |
@@ -59,7 +90,13 @@ struct bictcp { | |||
59 | u32 ack_cnt; /* number of acks */ | 90 | u32 ack_cnt; /* number of acks */ |
60 | u32 tcp_cwnd; /* estimated tcp cwnd */ | 91 | u32 tcp_cwnd; /* estimated tcp cwnd */ |
61 | #define ACK_RATIO_SHIFT 4 | 92 | #define ACK_RATIO_SHIFT 4 |
62 | u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ | 93 | u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ |
94 | u8 sample_cnt; /* number of samples to decide curr_rtt */ | ||
95 | u8 found; /* the exit point is found? */ | ||
96 | u32 round_start; /* beginning of each round */ | ||
97 | u32 end_seq; /* end_seq of the round */ | ||
98 | u32 last_jiffies; /* last time when the ACK spacing is close */ | ||
99 | u32 curr_rtt; /* the minimum rtt of current round */ | ||
63 | }; | 100 | }; |
64 | 101 | ||
65 | static inline void bictcp_reset(struct bictcp *ca) | 102 | static inline void bictcp_reset(struct bictcp *ca) |
@@ -76,12 +113,28 @@ static inline void bictcp_reset(struct bictcp *ca) | |||
76 | ca->delayed_ack = 2 << ACK_RATIO_SHIFT; | 113 | ca->delayed_ack = 2 << ACK_RATIO_SHIFT; |
77 | ca->ack_cnt = 0; | 114 | ca->ack_cnt = 0; |
78 | ca->tcp_cwnd = 0; | 115 | ca->tcp_cwnd = 0; |
116 | ca->found = 0; | ||
117 | } | ||
118 | |||
119 | static inline void bictcp_hystart_reset(struct sock *sk) | ||
120 | { | ||
121 | struct tcp_sock *tp = tcp_sk(sk); | ||
122 | struct bictcp *ca = inet_csk_ca(sk); | ||
123 | |||
124 | ca->round_start = ca->last_jiffies = jiffies; | ||
125 | ca->end_seq = tp->snd_nxt; | ||
126 | ca->curr_rtt = 0; | ||
127 | ca->sample_cnt = 0; | ||
79 | } | 128 | } |
80 | 129 | ||
81 | static void bictcp_init(struct sock *sk) | 130 | static void bictcp_init(struct sock *sk) |
82 | { | 131 | { |
83 | bictcp_reset(inet_csk_ca(sk)); | 132 | bictcp_reset(inet_csk_ca(sk)); |
84 | if (initial_ssthresh) | 133 | |
134 | if (hystart) | ||
135 | bictcp_hystart_reset(sk); | ||
136 | |||
137 | if (!hystart && initial_ssthresh) | ||
85 | tcp_sk(sk)->snd_ssthresh = initial_ssthresh; | 138 | tcp_sk(sk)->snd_ssthresh = initial_ssthresh; |
86 | } | 139 | } |
87 | 140 | ||
@@ -235,9 +288,11 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
235 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 288 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
236 | return; | 289 | return; |
237 | 290 | ||
238 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 291 | if (tp->snd_cwnd <= tp->snd_ssthresh) { |
292 | if (hystart && after(ack, ca->end_seq)) | ||
293 | bictcp_hystart_reset(sk); | ||
239 | tcp_slow_start(tp); | 294 | tcp_slow_start(tp); |
240 | else { | 295 | } else { |
241 | bictcp_update(ca, tp->snd_cwnd); | 296 | bictcp_update(ca, tp->snd_cwnd); |
242 | 297 | ||
243 | /* In dangerous area, increase slowly. | 298 | /* In dangerous area, increase slowly. |
@@ -281,8 +336,45 @@ static u32 bictcp_undo_cwnd(struct sock *sk) | |||
281 | 336 | ||
282 | static void bictcp_state(struct sock *sk, u8 new_state) | 337 | static void bictcp_state(struct sock *sk, u8 new_state) |
283 | { | 338 | { |
284 | if (new_state == TCP_CA_Loss) | 339 | if (new_state == TCP_CA_Loss) { |
285 | bictcp_reset(inet_csk_ca(sk)); | 340 | bictcp_reset(inet_csk_ca(sk)); |
341 | bictcp_hystart_reset(sk); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void hystart_update(struct sock *sk, u32 delay) | ||
346 | { | ||
347 | struct tcp_sock *tp = tcp_sk(sk); | ||
348 | struct bictcp *ca = inet_csk_ca(sk); | ||
349 | |||
350 | if (!(ca->found & hystart_detect)) { | ||
351 | u32 curr_jiffies = jiffies; | ||
352 | |||
353 | /* first detection parameter - ack-train detection */ | ||
354 | if (curr_jiffies - ca->last_jiffies <= msecs_to_jiffies(2)) { | ||
355 | ca->last_jiffies = curr_jiffies; | ||
356 | if (curr_jiffies - ca->round_start >= ca->delay_min>>4) | ||
357 | ca->found |= HYSTART_ACK_TRAIN; | ||
358 | } | ||
359 | |||
360 | /* obtain the minimum delay of more than sampling packets */ | ||
361 | if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { | ||
362 | if (ca->curr_rtt == 0 || ca->curr_rtt > delay) | ||
363 | ca->curr_rtt = delay; | ||
364 | |||
365 | ca->sample_cnt++; | ||
366 | } else { | ||
367 | if (ca->curr_rtt > ca->delay_min + | ||
368 | HYSTART_DELAY_THRESH(ca->delay_min>>4)) | ||
369 | ca->found |= HYSTART_DELAY; | ||
370 | } | ||
371 | /* | ||
372 | * Either one of two conditions are met, | ||
373 | * we exit from slow start immediately. | ||
374 | */ | ||
375 | if (ca->found & hystart_detect) | ||
376 | tp->snd_ssthresh = tp->snd_cwnd; | ||
377 | } | ||
286 | } | 378 | } |
287 | 379 | ||
288 | /* Track delayed acknowledgment ratio using sliding window | 380 | /* Track delayed acknowledgment ratio using sliding window |
@@ -291,6 +383,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) | |||
291 | static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | 383 | static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) |
292 | { | 384 | { |
293 | const struct inet_connection_sock *icsk = inet_csk(sk); | 385 | const struct inet_connection_sock *icsk = inet_csk(sk); |
386 | const struct tcp_sock *tp = tcp_sk(sk); | ||
294 | struct bictcp *ca = inet_csk_ca(sk); | 387 | struct bictcp *ca = inet_csk_ca(sk); |
295 | u32 delay; | 388 | u32 delay; |
296 | 389 | ||
@@ -314,6 +407,11 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
314 | /* first time call or link delay decreases */ | 407 | /* first time call or link delay decreases */ |
315 | if (ca->delay_min == 0 || ca->delay_min > delay) | 408 | if (ca->delay_min == 0 || ca->delay_min > delay) |
316 | ca->delay_min = delay; | 409 | ca->delay_min = delay; |
410 | |||
411 | /* hystart triggers when cwnd is larger than some threshold */ | ||
412 | if (hystart && tp->snd_cwnd <= tp->snd_ssthresh && | ||
413 | tp->snd_cwnd >= hystart_low_window) | ||
414 | hystart_update(sk, delay); | ||
317 | } | 415 | } |
318 | 416 | ||
319 | static struct tcp_congestion_ops cubictcp = { | 417 | static struct tcp_congestion_ops cubictcp = { |
@@ -372,4 +470,4 @@ module_exit(cubictcp_unregister); | |||
372 | MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); | 470 | MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); |
373 | MODULE_LICENSE("GPL"); | 471 | MODULE_LICENSE("GPL"); |
374 | MODULE_DESCRIPTION("CUBIC TCP"); | 472 | MODULE_DESCRIPTION("CUBIC TCP"); |
375 | MODULE_VERSION("2.2"); | 473 | MODULE_VERSION("2.3"); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d77c0d29e239..097294b7da3e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2336,9 +2336,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2336 | struct inet_sock *inet = inet_sk(sk); | 2336 | struct inet_sock *inet = inet_sk(sk); |
2337 | 2337 | ||
2338 | if (sk->sk_family == AF_INET) { | 2338 | if (sk->sk_family == AF_INET) { |
2339 | printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n", | 2339 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2340 | msg, | 2340 | msg, |
2341 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 2341 | &inet->daddr, ntohs(inet->dport), |
2342 | tp->snd_cwnd, tcp_left_out(tp), | 2342 | tp->snd_cwnd, tcp_left_out(tp), |
2343 | tp->snd_ssthresh, tp->prior_ssthresh, | 2343 | tp->snd_ssthresh, tp->prior_ssthresh, |
2344 | tp->packets_out); | 2344 | tp->packets_out); |
@@ -2346,9 +2346,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2346 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 2346 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
2347 | else if (sk->sk_family == AF_INET6) { | 2347 | else if (sk->sk_family == AF_INET6) { |
2348 | struct ipv6_pinfo *np = inet6_sk(sk); | 2348 | struct ipv6_pinfo *np = inet6_sk(sk); |
2349 | printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n", | 2349 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2350 | msg, | 2350 | msg, |
2351 | NIP6(np->daddr), ntohs(inet->dport), | 2351 | &np->daddr, ntohs(inet->dport), |
2352 | tp->snd_cwnd, tcp_left_out(tp), | 2352 | tp->snd_cwnd, tcp_left_out(tp), |
2353 | tp->snd_ssthresh, tp->prior_ssthresh, | 2353 | tp->snd_ssthresh, tp->prior_ssthresh, |
2354 | tp->packets_out); | 2354 | tp->packets_out); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5c8fa7f1e327..d49233f409b5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1139,10 +1139,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
1139 | 1139 | ||
1140 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 1140 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
1141 | if (net_ratelimit()) { | 1141 | if (net_ratelimit()) { |
1142 | printk(KERN_INFO "MD5 Hash failed for " | 1142 | printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", |
1143 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", | 1143 | &iph->saddr, ntohs(th->source), |
1144 | NIPQUAD(iph->saddr), ntohs(th->source), | 1144 | &iph->daddr, ntohs(th->dest), |
1145 | NIPQUAD(iph->daddr), ntohs(th->dest), | ||
1146 | genhash ? " tcp_v4_calc_md5_hash failed" : ""); | 1145 | genhash ? " tcp_v4_calc_md5_hash failed" : ""); |
1147 | } | 1146 | } |
1148 | return 1; | 1147 | return 1; |
@@ -1297,10 +1296,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1297 | * to destinations, already remembered | 1296 | * to destinations, already remembered |
1298 | * to the moment of synflood. | 1297 | * to the moment of synflood. |
1299 | */ | 1298 | */ |
1300 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " | 1299 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n", |
1301 | "request from " NIPQUAD_FMT "/%u\n", | 1300 | &saddr, ntohs(tcp_hdr(skb)->source)); |
1302 | NIPQUAD(saddr), | ||
1303 | ntohs(tcp_hdr(skb)->source)); | ||
1304 | goto drop_and_release; | 1301 | goto drop_and_release; |
1305 | } | 1302 | } |
1306 | 1303 | ||
@@ -1877,7 +1874,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1877 | struct inet_connection_sock *icsk; | 1874 | struct inet_connection_sock *icsk; |
1878 | struct hlist_node *node; | 1875 | struct hlist_node *node; |
1879 | struct sock *sk = cur; | 1876 | struct sock *sk = cur; |
1880 | struct tcp_iter_state* st = seq->private; | 1877 | struct tcp_iter_state *st = seq->private; |
1881 | struct net *net = seq_file_net(seq); | 1878 | struct net *net = seq_file_net(seq); |
1882 | 1879 | ||
1883 | if (!sk) { | 1880 | if (!sk) { |
@@ -1963,7 +1960,7 @@ static inline int empty_bucket(struct tcp_iter_state *st) | |||
1963 | 1960 | ||
1964 | static void *established_get_first(struct seq_file *seq) | 1961 | static void *established_get_first(struct seq_file *seq) |
1965 | { | 1962 | { |
1966 | struct tcp_iter_state* st = seq->private; | 1963 | struct tcp_iter_state *st = seq->private; |
1967 | struct net *net = seq_file_net(seq); | 1964 | struct net *net = seq_file_net(seq); |
1968 | void *rc = NULL; | 1965 | void *rc = NULL; |
1969 | 1966 | ||
@@ -2008,7 +2005,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) | |||
2008 | struct sock *sk = cur; | 2005 | struct sock *sk = cur; |
2009 | struct inet_timewait_sock *tw; | 2006 | struct inet_timewait_sock *tw; |
2010 | struct hlist_node *node; | 2007 | struct hlist_node *node; |
2011 | struct tcp_iter_state* st = seq->private; | 2008 | struct tcp_iter_state *st = seq->private; |
2012 | struct net *net = seq_file_net(seq); | 2009 | struct net *net = seq_file_net(seq); |
2013 | 2010 | ||
2014 | ++st->num; | 2011 | ++st->num; |
@@ -2067,7 +2064,7 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos) | |||
2067 | static void *tcp_get_idx(struct seq_file *seq, loff_t pos) | 2064 | static void *tcp_get_idx(struct seq_file *seq, loff_t pos) |
2068 | { | 2065 | { |
2069 | void *rc; | 2066 | void *rc; |
2070 | struct tcp_iter_state* st = seq->private; | 2067 | struct tcp_iter_state *st = seq->private; |
2071 | 2068 | ||
2072 | inet_listen_lock(&tcp_hashinfo); | 2069 | inet_listen_lock(&tcp_hashinfo); |
2073 | st->state = TCP_SEQ_STATE_LISTENING; | 2070 | st->state = TCP_SEQ_STATE_LISTENING; |
@@ -2084,7 +2081,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) | |||
2084 | 2081 | ||
2085 | static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) | 2082 | static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) |
2086 | { | 2083 | { |
2087 | struct tcp_iter_state* st = seq->private; | 2084 | struct tcp_iter_state *st = seq->private; |
2088 | st->state = TCP_SEQ_STATE_LISTENING; | 2085 | st->state = TCP_SEQ_STATE_LISTENING; |
2089 | st->num = 0; | 2086 | st->num = 0; |
2090 | return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2087 | return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
@@ -2093,7 +2090,7 @@ static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) | |||
2093 | static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2090 | static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2094 | { | 2091 | { |
2095 | void *rc = NULL; | 2092 | void *rc = NULL; |
2096 | struct tcp_iter_state* st; | 2093 | struct tcp_iter_state *st; |
2097 | 2094 | ||
2098 | if (v == SEQ_START_TOKEN) { | 2095 | if (v == SEQ_START_TOKEN) { |
2099 | rc = tcp_get_idx(seq, 0); | 2096 | rc = tcp_get_idx(seq, 0); |
@@ -2123,7 +2120,7 @@ out: | |||
2123 | 2120 | ||
2124 | static void tcp_seq_stop(struct seq_file *seq, void *v) | 2121 | static void tcp_seq_stop(struct seq_file *seq, void *v) |
2125 | { | 2122 | { |
2126 | struct tcp_iter_state* st = seq->private; | 2123 | struct tcp_iter_state *st = seq->private; |
2127 | 2124 | ||
2128 | switch (st->state) { | 2125 | switch (st->state) { |
2129 | case TCP_SEQ_STATE_OPENREQ: | 2126 | case TCP_SEQ_STATE_OPENREQ: |
@@ -2284,7 +2281,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, | |||
2284 | 2281 | ||
2285 | static int tcp4_seq_show(struct seq_file *seq, void *v) | 2282 | static int tcp4_seq_show(struct seq_file *seq, void *v) |
2286 | { | 2283 | { |
2287 | struct tcp_iter_state* st; | 2284 | struct tcp_iter_state *st; |
2288 | int len; | 2285 | int len; |
2289 | 2286 | ||
2290 | if (v == SEQ_START_TOKEN) { | 2287 | if (v == SEQ_START_TOKEN) { |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 779f2e9d0689..f67effbb102b 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -491,7 +491,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
491 | * as a request_sock. | 491 | * as a request_sock. |
492 | */ | 492 | */ |
493 | 493 | ||
494 | struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | 494 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
495 | struct request_sock *req, | 495 | struct request_sock *req, |
496 | struct request_sock **prev) | 496 | struct request_sock **prev) |
497 | { | 497 | { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ba85d8831893..a524627923ae 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -42,7 +42,7 @@ | |||
42 | /* People can turn this off for buggy TCP's found in printers etc. */ | 42 | /* People can turn this off for buggy TCP's found in printers etc. */ |
43 | int sysctl_tcp_retrans_collapse __read_mostly = 1; | 43 | int sysctl_tcp_retrans_collapse __read_mostly = 1; |
44 | 44 | ||
45 | /* People can turn this on to work with those rare, broken TCPs that | 45 | /* People can turn this on to work with those rare, broken TCPs that |
46 | * interpret the window field as a signed quantity. | 46 | * interpret the window field as a signed quantity. |
47 | */ | 47 | */ |
48 | int sysctl_tcp_workaround_signed_windows __read_mostly = 0; | 48 | int sysctl_tcp_workaround_signed_windows __read_mostly = 0; |
@@ -484,7 +484,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
484 | } | 484 | } |
485 | if (likely(sysctl_tcp_window_scaling)) { | 485 | if (likely(sysctl_tcp_window_scaling)) { |
486 | opts->ws = tp->rx_opt.rcv_wscale; | 486 | opts->ws = tp->rx_opt.rcv_wscale; |
487 | if(likely(opts->ws)) | 487 | if (likely(opts->ws)) |
488 | size += TCPOLEN_WSCALE_ALIGNED; | 488 | size += TCPOLEN_WSCALE_ALIGNED; |
489 | } | 489 | } |
490 | if (likely(sysctl_tcp_sack)) { | 490 | if (likely(sysctl_tcp_sack)) { |
@@ -526,7 +526,7 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
526 | 526 | ||
527 | if (likely(ireq->wscale_ok)) { | 527 | if (likely(ireq->wscale_ok)) { |
528 | opts->ws = ireq->rcv_wscale; | 528 | opts->ws = ireq->rcv_wscale; |
529 | if(likely(opts->ws)) | 529 | if (likely(opts->ws)) |
530 | size += TCPOLEN_WSCALE_ALIGNED; | 530 | size += TCPOLEN_WSCALE_ALIGNED; |
531 | } | 531 | } |
532 | if (likely(doing_ts)) { | 532 | if (likely(doing_ts)) { |
@@ -1172,7 +1172,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, | |||
1172 | 1172 | ||
1173 | static inline int tcp_minshall_check(const struct tcp_sock *tp) | 1173 | static inline int tcp_minshall_check(const struct tcp_sock *tp) |
1174 | { | 1174 | { |
1175 | return after(tp->snd_sml,tp->snd_una) && | 1175 | return after(tp->snd_sml, tp->snd_una) && |
1176 | !after(tp->snd_sml, tp->snd_nxt); | 1176 | !after(tp->snd_sml, tp->snd_nxt); |
1177 | } | 1177 | } |
1178 | 1178 | ||
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 7ddc30f0744f..25524d4e372a 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -153,12 +153,11 @@ static int tcpprobe_sprint(char *tbuf, int n) | |||
153 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); | 153 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); |
154 | 154 | ||
155 | return snprintf(tbuf, n, | 155 | return snprintf(tbuf, n, |
156 | "%lu.%09lu " NIPQUAD_FMT ":%u " NIPQUAD_FMT ":%u" | 156 | "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n", |
157 | " %d %#x %#x %u %u %u %u\n", | ||
158 | (unsigned long) tv.tv_sec, | 157 | (unsigned long) tv.tv_sec, |
159 | (unsigned long) tv.tv_nsec, | 158 | (unsigned long) tv.tv_nsec, |
160 | NIPQUAD(p->saddr), ntohs(p->sport), | 159 | &p->saddr, ntohs(p->sport), |
161 | NIPQUAD(p->daddr), ntohs(p->dport), | 160 | &p->daddr, ntohs(p->dport), |
162 | p->length, p->snd_nxt, p->snd_una, | 161 | p->length, p->snd_nxt, p->snd_una, |
163 | p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt); | 162 | p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt); |
164 | } | 163 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6b6dff1164b9..3df339e3e363 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -171,7 +171,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
171 | 171 | ||
172 | static void tcp_delack_timer(unsigned long data) | 172 | static void tcp_delack_timer(unsigned long data) |
173 | { | 173 | { |
174 | struct sock *sk = (struct sock*)data; | 174 | struct sock *sk = (struct sock *)data; |
175 | struct tcp_sock *tp = tcp_sk(sk); | 175 | struct tcp_sock *tp = tcp_sk(sk); |
176 | struct inet_connection_sock *icsk = inet_csk(sk); | 176 | struct inet_connection_sock *icsk = inet_csk(sk); |
177 | 177 | ||
@@ -299,15 +299,15 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
299 | #ifdef TCP_DEBUG | 299 | #ifdef TCP_DEBUG |
300 | struct inet_sock *inet = inet_sk(sk); | 300 | struct inet_sock *inet = inet_sk(sk); |
301 | if (sk->sk_family == AF_INET) { | 301 | if (sk->sk_family == AF_INET) { |
302 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIPQUAD_FMT ":%u/%u shrinks window %u:%u. Repaired.\n", | 302 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %pI4:%u/%u shrinks window %u:%u. Repaired.\n", |
303 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 303 | &inet->daddr, ntohs(inet->dport), |
304 | inet->num, tp->snd_una, tp->snd_nxt); | 304 | inet->num, tp->snd_una, tp->snd_nxt); |
305 | } | 305 | } |
306 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 306 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
307 | else if (sk->sk_family == AF_INET6) { | 307 | else if (sk->sk_family == AF_INET6) { |
308 | struct ipv6_pinfo *np = inet6_sk(sk); | 308 | struct ipv6_pinfo *np = inet6_sk(sk); |
309 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer " NIP6_FMT ":%u/%u shrinks window %u:%u. Repaired.\n", | 309 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %pI6:%u/%u shrinks window %u:%u. Repaired.\n", |
310 | NIP6(np->daddr), ntohs(inet->dport), | 310 | &np->daddr, ntohs(inet->dport), |
311 | inet->num, tp->snd_una, tp->snd_nxt); | 311 | inet->num, tp->snd_una, tp->snd_nxt); |
312 | } | 312 | } |
313 | #endif | 313 | #endif |
@@ -396,7 +396,7 @@ out:; | |||
396 | 396 | ||
397 | static void tcp_write_timer(unsigned long data) | 397 | static void tcp_write_timer(unsigned long data) |
398 | { | 398 | { |
399 | struct sock *sk = (struct sock*)data; | 399 | struct sock *sk = (struct sock *)data; |
400 | struct inet_connection_sock *icsk = inet_csk(sk); | 400 | struct inet_connection_sock *icsk = inet_csk(sk); |
401 | int event; | 401 | int event; |
402 | 402 | ||
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index e03b10183a8b..9ec843a9bbb2 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c | |||
@@ -83,7 +83,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
83 | else if (!yeah->doing_reno_now) { | 83 | else if (!yeah->doing_reno_now) { |
84 | /* Scalable */ | 84 | /* Scalable */ |
85 | 85 | ||
86 | tp->snd_cwnd_cnt+=yeah->pkts_acked; | 86 | tp->snd_cwnd_cnt += yeah->pkts_acked; |
87 | if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ | 87 | if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ |
88 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 88 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
89 | tp->snd_cwnd++; | 89 | tp->snd_cwnd++; |
@@ -224,7 +224,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) { | |||
224 | 224 | ||
225 | reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); | 225 | reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); |
226 | } else | 226 | } else |
227 | reduction = max(tp->snd_cwnd>>1,2U); | 227 | reduction = max(tp->snd_cwnd>>1, 2U); |
228 | 228 | ||
229 | yeah->fast_count = 0; | 229 | yeah->fast_count = 0; |
230 | yeah->reno_count = max(yeah->reno_count>>1, 2U); | 230 | yeah->reno_count = max(yeah->reno_count>>1, 2U); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf02701ced48..54badc9a019d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -81,6 +81,8 @@ | |||
81 | #include <asm/uaccess.h> | 81 | #include <asm/uaccess.h> |
82 | #include <asm/ioctls.h> | 82 | #include <asm/ioctls.h> |
83 | #include <linux/bootmem.h> | 83 | #include <linux/bootmem.h> |
84 | #include <linux/highmem.h> | ||
85 | #include <linux/swap.h> | ||
84 | #include <linux/types.h> | 86 | #include <linux/types.h> |
85 | #include <linux/fcntl.h> | 87 | #include <linux/fcntl.h> |
86 | #include <linux/module.h> | 88 | #include <linux/module.h> |
@@ -104,12 +106,8 @@ | |||
104 | #include <net/xfrm.h> | 106 | #include <net/xfrm.h> |
105 | #include "udp_impl.h" | 107 | #include "udp_impl.h" |
106 | 108 | ||
107 | /* | 109 | struct udp_table udp_table; |
108 | * Snmp MIB for the UDP layer | 110 | EXPORT_SYMBOL(udp_table); |
109 | */ | ||
110 | |||
111 | struct hlist_head udp_hash[UDP_HTABLE_SIZE]; | ||
112 | DEFINE_RWLOCK(udp_hash_lock); | ||
113 | 111 | ||
114 | int sysctl_udp_mem[3] __read_mostly; | 112 | int sysctl_udp_mem[3] __read_mostly; |
115 | int sysctl_udp_rmem_min __read_mostly; | 113 | int sysctl_udp_rmem_min __read_mostly; |
@@ -123,7 +121,7 @@ atomic_t udp_memory_allocated; | |||
123 | EXPORT_SYMBOL(udp_memory_allocated); | 121 | EXPORT_SYMBOL(udp_memory_allocated); |
124 | 122 | ||
125 | static int udp_lib_lport_inuse(struct net *net, __u16 num, | 123 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
126 | const struct hlist_head udptable[], | 124 | const struct udp_hslot *hslot, |
127 | struct sock *sk, | 125 | struct sock *sk, |
128 | int (*saddr_comp)(const struct sock *sk1, | 126 | int (*saddr_comp)(const struct sock *sk1, |
129 | const struct sock *sk2)) | 127 | const struct sock *sk2)) |
@@ -131,7 +129,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
131 | struct sock *sk2; | 129 | struct sock *sk2; |
132 | struct hlist_node *node; | 130 | struct hlist_node *node; |
133 | 131 | ||
134 | sk_for_each(sk2, node, &udptable[udp_hashfn(net, num)]) | 132 | sk_for_each(sk2, node, &hslot->head) |
135 | if (net_eq(sock_net(sk2), net) && | 133 | if (net_eq(sock_net(sk2), net) && |
136 | sk2 != sk && | 134 | sk2 != sk && |
137 | sk2->sk_hash == num && | 135 | sk2->sk_hash == num && |
@@ -154,12 +152,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
154 | int (*saddr_comp)(const struct sock *sk1, | 152 | int (*saddr_comp)(const struct sock *sk1, |
155 | const struct sock *sk2 ) ) | 153 | const struct sock *sk2 ) ) |
156 | { | 154 | { |
157 | struct hlist_head *udptable = sk->sk_prot->h.udp_hash; | 155 | struct udp_hslot *hslot; |
156 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | ||
158 | int error = 1; | 157 | int error = 1; |
159 | struct net *net = sock_net(sk); | 158 | struct net *net = sock_net(sk); |
160 | 159 | ||
161 | write_lock_bh(&udp_hash_lock); | ||
162 | |||
163 | if (!snum) { | 160 | if (!snum) { |
164 | int low, high, remaining; | 161 | int low, high, remaining; |
165 | unsigned rand; | 162 | unsigned rand; |
@@ -171,26 +168,39 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
171 | rand = net_random(); | 168 | rand = net_random(); |
172 | snum = first = rand % remaining + low; | 169 | snum = first = rand % remaining + low; |
173 | rand |= 1; | 170 | rand |= 1; |
174 | while (udp_lib_lport_inuse(net, snum, udptable, sk, | 171 | for (;;) { |
175 | saddr_comp)) { | 172 | hslot = &udptable->hash[udp_hashfn(net, snum)]; |
173 | spin_lock_bh(&hslot->lock); | ||
174 | if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) | ||
175 | break; | ||
176 | spin_unlock_bh(&hslot->lock); | ||
176 | do { | 177 | do { |
177 | snum = snum + rand; | 178 | snum = snum + rand; |
178 | } while (snum < low || snum > high); | 179 | } while (snum < low || snum > high); |
179 | if (snum == first) | 180 | if (snum == first) |
180 | goto fail; | 181 | goto fail; |
181 | } | 182 | } |
182 | } else if (udp_lib_lport_inuse(net, snum, udptable, sk, saddr_comp)) | 183 | } else { |
183 | goto fail; | 184 | hslot = &udptable->hash[udp_hashfn(net, snum)]; |
184 | 185 | spin_lock_bh(&hslot->lock); | |
186 | if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) | ||
187 | goto fail_unlock; | ||
188 | } | ||
185 | inet_sk(sk)->num = snum; | 189 | inet_sk(sk)->num = snum; |
186 | sk->sk_hash = snum; | 190 | sk->sk_hash = snum; |
187 | if (sk_unhashed(sk)) { | 191 | if (sk_unhashed(sk)) { |
188 | sk_add_node(sk, &udptable[udp_hashfn(net, snum)]); | 192 | /* |
193 | * We need that previous write to sk->sk_hash committed | ||
194 | * before write to sk->next done in following add_node() variant | ||
195 | */ | ||
196 | smp_wmb(); | ||
197 | sk_add_node_rcu(sk, &hslot->head); | ||
189 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 198 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
190 | } | 199 | } |
191 | error = 0; | 200 | error = 0; |
201 | fail_unlock: | ||
202 | spin_unlock_bh(&hslot->lock); | ||
192 | fail: | 203 | fail: |
193 | write_unlock_bh(&udp_hash_lock); | ||
194 | return error; | 204 | return error; |
195 | } | 205 | } |
196 | 206 | ||
@@ -208,63 +218,89 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) | |||
208 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); | 218 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); |
209 | } | 219 | } |
210 | 220 | ||
221 | static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | ||
222 | unsigned short hnum, | ||
223 | __be16 sport, __be32 daddr, __be16 dport, int dif) | ||
224 | { | ||
225 | int score = -1; | ||
226 | |||
227 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | ||
228 | !ipv6_only_sock(sk)) { | ||
229 | struct inet_sock *inet = inet_sk(sk); | ||
230 | |||
231 | score = (sk->sk_family == PF_INET ? 1 : 0); | ||
232 | if (inet->rcv_saddr) { | ||
233 | if (inet->rcv_saddr != daddr) | ||
234 | return -1; | ||
235 | score += 2; | ||
236 | } | ||
237 | if (inet->daddr) { | ||
238 | if (inet->daddr != saddr) | ||
239 | return -1; | ||
240 | score += 2; | ||
241 | } | ||
242 | if (inet->dport) { | ||
243 | if (inet->dport != sport) | ||
244 | return -1; | ||
245 | score += 2; | ||
246 | } | ||
247 | if (sk->sk_bound_dev_if) { | ||
248 | if (sk->sk_bound_dev_if != dif) | ||
249 | return -1; | ||
250 | score += 2; | ||
251 | } | ||
252 | } | ||
253 | return score; | ||
254 | } | ||
255 | |||
211 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try | 256 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
212 | * harder than this. -DaveM | 257 | * harder than this. -DaveM |
213 | */ | 258 | */ |
214 | static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | 259 | static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, |
215 | __be16 sport, __be32 daddr, __be16 dport, | 260 | __be16 sport, __be32 daddr, __be16 dport, |
216 | int dif, struct hlist_head udptable[]) | 261 | int dif, struct udp_table *udptable) |
217 | { | 262 | { |
218 | struct sock *sk, *result = NULL; | 263 | struct sock *sk, *result; |
219 | struct hlist_node *node; | 264 | struct hlist_node *node, *next; |
220 | unsigned short hnum = ntohs(dport); | 265 | unsigned short hnum = ntohs(dport); |
221 | int badness = -1; | 266 | unsigned int hash = udp_hashfn(net, hnum); |
222 | 267 | struct udp_hslot *hslot = &udptable->hash[hash]; | |
223 | read_lock(&udp_hash_lock); | 268 | int score, badness; |
224 | sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) { | 269 | |
225 | struct inet_sock *inet = inet_sk(sk); | 270 | rcu_read_lock(); |
226 | 271 | begin: | |
227 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | 272 | result = NULL; |
228 | !ipv6_only_sock(sk)) { | 273 | badness = -1; |
229 | int score = (sk->sk_family == PF_INET ? 1 : 0); | 274 | sk_for_each_rcu_safenext(sk, node, &hslot->head, next) { |
230 | if (inet->rcv_saddr) { | 275 | /* |
231 | if (inet->rcv_saddr != daddr) | 276 | * lockless reader, and SLAB_DESTROY_BY_RCU items: |
232 | continue; | 277 | * We must check this item was not moved to another chain |
233 | score+=2; | 278 | */ |
234 | } | 279 | if (udp_hashfn(net, sk->sk_hash) != hash) |
235 | if (inet->daddr) { | 280 | goto begin; |
236 | if (inet->daddr != saddr) | 281 | score = compute_score(sk, net, saddr, hnum, sport, |
237 | continue; | 282 | daddr, dport, dif); |
238 | score+=2; | 283 | if (score > badness) { |
239 | } | 284 | result = sk; |
240 | if (inet->dport) { | 285 | badness = score; |
241 | if (inet->dport != sport) | ||
242 | continue; | ||
243 | score+=2; | ||
244 | } | ||
245 | if (sk->sk_bound_dev_if) { | ||
246 | if (sk->sk_bound_dev_if != dif) | ||
247 | continue; | ||
248 | score+=2; | ||
249 | } | ||
250 | if (score == 9) { | ||
251 | result = sk; | ||
252 | break; | ||
253 | } else if (score > badness) { | ||
254 | result = sk; | ||
255 | badness = score; | ||
256 | } | ||
257 | } | 286 | } |
258 | } | 287 | } |
259 | if (result) | 288 | if (result) { |
260 | sock_hold(result); | 289 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) |
261 | read_unlock(&udp_hash_lock); | 290 | result = NULL; |
291 | else if (unlikely(compute_score(result, net, saddr, hnum, sport, | ||
292 | daddr, dport, dif) < badness)) { | ||
293 | sock_put(result); | ||
294 | goto begin; | ||
295 | } | ||
296 | } | ||
297 | rcu_read_unlock(); | ||
262 | return result; | 298 | return result; |
263 | } | 299 | } |
264 | 300 | ||
265 | static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | 301 | static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, |
266 | __be16 sport, __be16 dport, | 302 | __be16 sport, __be16 dport, |
267 | struct hlist_head udptable[]) | 303 | struct udp_table *udptable) |
268 | { | 304 | { |
269 | struct sock *sk; | 305 | struct sock *sk; |
270 | const struct iphdr *iph = ip_hdr(skb); | 306 | const struct iphdr *iph = ip_hdr(skb); |
@@ -280,7 +316,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | |||
280 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, | 316 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, |
281 | __be32 daddr, __be16 dport, int dif) | 317 | __be32 daddr, __be16 dport, int dif) |
282 | { | 318 | { |
283 | return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, udp_hash); | 319 | return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); |
284 | } | 320 | } |
285 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); | 321 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); |
286 | 322 | ||
@@ -324,7 +360,7 @@ found: | |||
324 | * to find the appropriate port. | 360 | * to find the appropriate port. |
325 | */ | 361 | */ |
326 | 362 | ||
327 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) | 363 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
328 | { | 364 | { |
329 | struct inet_sock *inet; | 365 | struct inet_sock *inet; |
330 | struct iphdr *iph = (struct iphdr*)skb->data; | 366 | struct iphdr *iph = (struct iphdr*)skb->data; |
@@ -393,7 +429,7 @@ out: | |||
393 | 429 | ||
394 | void udp_err(struct sk_buff *skb, u32 info) | 430 | void udp_err(struct sk_buff *skb, u32 info) |
395 | { | 431 | { |
396 | __udp4_lib_err(skb, info, udp_hash); | 432 | __udp4_lib_err(skb, info, &udp_table); |
397 | } | 433 | } |
398 | 434 | ||
399 | /* | 435 | /* |
@@ -934,6 +970,21 @@ int udp_disconnect(struct sock *sk, int flags) | |||
934 | return 0; | 970 | return 0; |
935 | } | 971 | } |
936 | 972 | ||
973 | void udp_lib_unhash(struct sock *sk) | ||
974 | { | ||
975 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | ||
976 | unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash); | ||
977 | struct udp_hslot *hslot = &udptable->hash[hash]; | ||
978 | |||
979 | spin_lock_bh(&hslot->lock); | ||
980 | if (sk_del_node_init_rcu(sk)) { | ||
981 | inet_sk(sk)->num = 0; | ||
982 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | ||
983 | } | ||
984 | spin_unlock_bh(&hslot->lock); | ||
985 | } | ||
986 | EXPORT_SYMBOL(udp_lib_unhash); | ||
987 | |||
937 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 988 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
938 | { | 989 | { |
939 | int is_udplite = IS_UDPLITE(sk); | 990 | int is_udplite = IS_UDPLITE(sk); |
@@ -1072,13 +1123,14 @@ drop: | |||
1072 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | 1123 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
1073 | struct udphdr *uh, | 1124 | struct udphdr *uh, |
1074 | __be32 saddr, __be32 daddr, | 1125 | __be32 saddr, __be32 daddr, |
1075 | struct hlist_head udptable[]) | 1126 | struct udp_table *udptable) |
1076 | { | 1127 | { |
1077 | struct sock *sk; | 1128 | struct sock *sk; |
1129 | struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; | ||
1078 | int dif; | 1130 | int dif; |
1079 | 1131 | ||
1080 | read_lock(&udp_hash_lock); | 1132 | spin_lock(&hslot->lock); |
1081 | sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); | 1133 | sk = sk_head(&hslot->head); |
1082 | dif = skb->dev->ifindex; | 1134 | dif = skb->dev->ifindex; |
1083 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 1135 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); |
1084 | if (sk) { | 1136 | if (sk) { |
@@ -1104,7 +1156,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1104 | } while (sknext); | 1156 | } while (sknext); |
1105 | } else | 1157 | } else |
1106 | kfree_skb(skb); | 1158 | kfree_skb(skb); |
1107 | read_unlock(&udp_hash_lock); | 1159 | spin_unlock(&hslot->lock); |
1108 | return 0; | 1160 | return 0; |
1109 | } | 1161 | } |
1110 | 1162 | ||
@@ -1150,7 +1202,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
1150 | * All we need to do is get the socket, and then do a checksum. | 1202 | * All we need to do is get the socket, and then do a checksum. |
1151 | */ | 1203 | */ |
1152 | 1204 | ||
1153 | int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | 1205 | int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
1154 | int proto) | 1206 | int proto) |
1155 | { | 1207 | { |
1156 | struct sock *sk; | 1208 | struct sock *sk; |
@@ -1218,13 +1270,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1218 | return 0; | 1270 | return 0; |
1219 | 1271 | ||
1220 | short_packet: | 1272 | short_packet: |
1221 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n", | 1273 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", |
1222 | proto == IPPROTO_UDPLITE ? "-Lite" : "", | 1274 | proto == IPPROTO_UDPLITE ? "-Lite" : "", |
1223 | NIPQUAD(saddr), | 1275 | &saddr, |
1224 | ntohs(uh->source), | 1276 | ntohs(uh->source), |
1225 | ulen, | 1277 | ulen, |
1226 | skb->len, | 1278 | skb->len, |
1227 | NIPQUAD(daddr), | 1279 | &daddr, |
1228 | ntohs(uh->dest)); | 1280 | ntohs(uh->dest)); |
1229 | goto drop; | 1281 | goto drop; |
1230 | 1282 | ||
@@ -1233,11 +1285,11 @@ csum_error: | |||
1233 | * RFC1122: OK. Discards the bad packet silently (as far as | 1285 | * RFC1122: OK. Discards the bad packet silently (as far as |
1234 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). | 1286 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
1235 | */ | 1287 | */ |
1236 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n", | 1288 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", |
1237 | proto == IPPROTO_UDPLITE ? "-Lite" : "", | 1289 | proto == IPPROTO_UDPLITE ? "-Lite" : "", |
1238 | NIPQUAD(saddr), | 1290 | &saddr, |
1239 | ntohs(uh->source), | 1291 | ntohs(uh->source), |
1240 | NIPQUAD(daddr), | 1292 | &daddr, |
1241 | ntohs(uh->dest), | 1293 | ntohs(uh->dest), |
1242 | ulen); | 1294 | ulen); |
1243 | drop: | 1295 | drop: |
@@ -1248,7 +1300,7 @@ drop: | |||
1248 | 1300 | ||
1249 | int udp_rcv(struct sk_buff *skb) | 1301 | int udp_rcv(struct sk_buff *skb) |
1250 | { | 1302 | { |
1251 | return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); | 1303 | return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); |
1252 | } | 1304 | } |
1253 | 1305 | ||
1254 | void udp_destroy_sock(struct sock *sk) | 1306 | void udp_destroy_sock(struct sock *sk) |
@@ -1490,7 +1542,8 @@ struct proto udp_prot = { | |||
1490 | .sysctl_wmem = &sysctl_udp_wmem_min, | 1542 | .sysctl_wmem = &sysctl_udp_wmem_min, |
1491 | .sysctl_rmem = &sysctl_udp_rmem_min, | 1543 | .sysctl_rmem = &sysctl_udp_rmem_min, |
1492 | .obj_size = sizeof(struct udp_sock), | 1544 | .obj_size = sizeof(struct udp_sock), |
1493 | .h.udp_hash = udp_hash, | 1545 | .slab_flags = SLAB_DESTROY_BY_RCU, |
1546 | .h.udp_table = &udp_table, | ||
1494 | #ifdef CONFIG_COMPAT | 1547 | #ifdef CONFIG_COMPAT |
1495 | .compat_setsockopt = compat_udp_setsockopt, | 1548 | .compat_setsockopt = compat_udp_setsockopt, |
1496 | .compat_getsockopt = compat_udp_getsockopt, | 1549 | .compat_getsockopt = compat_udp_getsockopt, |
@@ -1500,20 +1553,23 @@ struct proto udp_prot = { | |||
1500 | /* ------------------------------------------------------------------------ */ | 1553 | /* ------------------------------------------------------------------------ */ |
1501 | #ifdef CONFIG_PROC_FS | 1554 | #ifdef CONFIG_PROC_FS |
1502 | 1555 | ||
1503 | static struct sock *udp_get_first(struct seq_file *seq) | 1556 | static struct sock *udp_get_first(struct seq_file *seq, int start) |
1504 | { | 1557 | { |
1505 | struct sock *sk; | 1558 | struct sock *sk; |
1506 | struct udp_iter_state *state = seq->private; | 1559 | struct udp_iter_state *state = seq->private; |
1507 | struct net *net = seq_file_net(seq); | 1560 | struct net *net = seq_file_net(seq); |
1508 | 1561 | ||
1509 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | 1562 | for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { |
1510 | struct hlist_node *node; | 1563 | struct hlist_node *node; |
1511 | sk_for_each(sk, node, state->hashtable + state->bucket) { | 1564 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; |
1565 | spin_lock_bh(&hslot->lock); | ||
1566 | sk_for_each(sk, node, &hslot->head) { | ||
1512 | if (!net_eq(sock_net(sk), net)) | 1567 | if (!net_eq(sock_net(sk), net)) |
1513 | continue; | 1568 | continue; |
1514 | if (sk->sk_family == state->family) | 1569 | if (sk->sk_family == state->family) |
1515 | goto found; | 1570 | goto found; |
1516 | } | 1571 | } |
1572 | spin_unlock_bh(&hslot->lock); | ||
1517 | } | 1573 | } |
1518 | sk = NULL; | 1574 | sk = NULL; |
1519 | found: | 1575 | found: |
@@ -1527,20 +1583,18 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |||
1527 | 1583 | ||
1528 | do { | 1584 | do { |
1529 | sk = sk_next(sk); | 1585 | sk = sk_next(sk); |
1530 | try_again: | ||
1531 | ; | ||
1532 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); | 1586 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); |
1533 | 1587 | ||
1534 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { | 1588 | if (!sk) { |
1535 | sk = sk_head(state->hashtable + state->bucket); | 1589 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); |
1536 | goto try_again; | 1590 | return udp_get_first(seq, state->bucket + 1); |
1537 | } | 1591 | } |
1538 | return sk; | 1592 | return sk; |
1539 | } | 1593 | } |
1540 | 1594 | ||
1541 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | 1595 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) |
1542 | { | 1596 | { |
1543 | struct sock *sk = udp_get_first(seq); | 1597 | struct sock *sk = udp_get_first(seq, 0); |
1544 | 1598 | ||
1545 | if (sk) | 1599 | if (sk) |
1546 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) | 1600 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) |
@@ -1549,9 +1603,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | |||
1549 | } | 1603 | } |
1550 | 1604 | ||
1551 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) | 1605 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
1552 | __acquires(udp_hash_lock) | ||
1553 | { | 1606 | { |
1554 | read_lock(&udp_hash_lock); | ||
1555 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; | 1607 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; |
1556 | } | 1608 | } |
1557 | 1609 | ||
@@ -1569,9 +1621,11 @@ static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1569 | } | 1621 | } |
1570 | 1622 | ||
1571 | static void udp_seq_stop(struct seq_file *seq, void *v) | 1623 | static void udp_seq_stop(struct seq_file *seq, void *v) |
1572 | __releases(udp_hash_lock) | ||
1573 | { | 1624 | { |
1574 | read_unlock(&udp_hash_lock); | 1625 | struct udp_iter_state *state = seq->private; |
1626 | |||
1627 | if (state->bucket < UDP_HTABLE_SIZE) | ||
1628 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | ||
1575 | } | 1629 | } |
1576 | 1630 | ||
1577 | static int udp_seq_open(struct inode *inode, struct file *file) | 1631 | static int udp_seq_open(struct inode *inode, struct file *file) |
@@ -1587,7 +1641,7 @@ static int udp_seq_open(struct inode *inode, struct file *file) | |||
1587 | 1641 | ||
1588 | s = ((struct seq_file *)file->private_data)->private; | 1642 | s = ((struct seq_file *)file->private_data)->private; |
1589 | s->family = afinfo->family; | 1643 | s->family = afinfo->family; |
1590 | s->hashtable = afinfo->hashtable; | 1644 | s->udp_table = afinfo->udp_table; |
1591 | return err; | 1645 | return err; |
1592 | } | 1646 | } |
1593 | 1647 | ||
@@ -1659,7 +1713,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) | |||
1659 | static struct udp_seq_afinfo udp4_seq_afinfo = { | 1713 | static struct udp_seq_afinfo udp4_seq_afinfo = { |
1660 | .name = "udp", | 1714 | .name = "udp", |
1661 | .family = AF_INET, | 1715 | .family = AF_INET, |
1662 | .hashtable = udp_hash, | 1716 | .udp_table = &udp_table, |
1663 | .seq_fops = { | 1717 | .seq_fops = { |
1664 | .owner = THIS_MODULE, | 1718 | .owner = THIS_MODULE, |
1665 | }, | 1719 | }, |
@@ -1694,16 +1748,28 @@ void udp4_proc_exit(void) | |||
1694 | } | 1748 | } |
1695 | #endif /* CONFIG_PROC_FS */ | 1749 | #endif /* CONFIG_PROC_FS */ |
1696 | 1750 | ||
1751 | void __init udp_table_init(struct udp_table *table) | ||
1752 | { | ||
1753 | int i; | ||
1754 | |||
1755 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { | ||
1756 | INIT_HLIST_HEAD(&table->hash[i].head); | ||
1757 | spin_lock_init(&table->hash[i].lock); | ||
1758 | } | ||
1759 | } | ||
1760 | |||
1697 | void __init udp_init(void) | 1761 | void __init udp_init(void) |
1698 | { | 1762 | { |
1699 | unsigned long limit; | 1763 | unsigned long nr_pages, limit; |
1700 | 1764 | ||
1765 | udp_table_init(&udp_table); | ||
1701 | /* Set the pressure threshold up by the same strategy of TCP. It is a | 1766 | /* Set the pressure threshold up by the same strategy of TCP. It is a |
1702 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | 1767 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing |
1703 | * toward zero with the amount of memory, with a floor of 128 pages. | 1768 | * toward zero with the amount of memory, with a floor of 128 pages. |
1704 | */ | 1769 | */ |
1705 | limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | 1770 | nr_pages = totalram_pages - totalhigh_pages; |
1706 | limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | 1771 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); |
1772 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
1707 | limit = max(limit, 128UL); | 1773 | limit = max(limit, 128UL); |
1708 | sysctl_udp_mem[0] = limit / 4 * 3; | 1774 | sysctl_udp_mem[0] = limit / 4 * 3; |
1709 | sysctl_udp_mem[1] = limit; | 1775 | sysctl_udp_mem[1] = limit; |
@@ -1714,8 +1780,6 @@ void __init udp_init(void) | |||
1714 | } | 1780 | } |
1715 | 1781 | ||
1716 | EXPORT_SYMBOL(udp_disconnect); | 1782 | EXPORT_SYMBOL(udp_disconnect); |
1717 | EXPORT_SYMBOL(udp_hash); | ||
1718 | EXPORT_SYMBOL(udp_hash_lock); | ||
1719 | EXPORT_SYMBOL(udp_ioctl); | 1783 | EXPORT_SYMBOL(udp_ioctl); |
1720 | EXPORT_SYMBOL(udp_prot); | 1784 | EXPORT_SYMBOL(udp_prot); |
1721 | EXPORT_SYMBOL(udp_sendmsg); | 1785 | EXPORT_SYMBOL(udp_sendmsg); |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 2e9bad2fa1bc..9f4a6165f722 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -5,8 +5,8 @@ | |||
5 | #include <net/protocol.h> | 5 | #include <net/protocol.h> |
6 | #include <net/inet_common.h> | 6 | #include <net/inet_common.h> |
7 | 7 | ||
8 | extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); | 8 | extern int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int ); |
9 | extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); | 9 | extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); |
10 | 10 | ||
11 | extern int udp_v4_get_port(struct sock *sk, unsigned short snum); | 11 | extern int udp_v4_get_port(struct sock *sk, unsigned short snum); |
12 | 12 | ||
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 3c807964da96..c784891cb7e5 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -12,16 +12,17 @@ | |||
12 | */ | 12 | */ |
13 | #include "udp_impl.h" | 13 | #include "udp_impl.h" |
14 | 14 | ||
15 | struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; | 15 | struct udp_table udplite_table; |
16 | EXPORT_SYMBOL(udplite_table); | ||
16 | 17 | ||
17 | static int udplite_rcv(struct sk_buff *skb) | 18 | static int udplite_rcv(struct sk_buff *skb) |
18 | { | 19 | { |
19 | return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE); | 20 | return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); |
20 | } | 21 | } |
21 | 22 | ||
22 | static void udplite_err(struct sk_buff *skb, u32 info) | 23 | static void udplite_err(struct sk_buff *skb, u32 info) |
23 | { | 24 | { |
24 | __udp4_lib_err(skb, info, udplite_hash); | 25 | __udp4_lib_err(skb, info, &udplite_table); |
25 | } | 26 | } |
26 | 27 | ||
27 | static struct net_protocol udplite_protocol = { | 28 | static struct net_protocol udplite_protocol = { |
@@ -50,7 +51,8 @@ struct proto udplite_prot = { | |||
50 | .unhash = udp_lib_unhash, | 51 | .unhash = udp_lib_unhash, |
51 | .get_port = udp_v4_get_port, | 52 | .get_port = udp_v4_get_port, |
52 | .obj_size = sizeof(struct udp_sock), | 53 | .obj_size = sizeof(struct udp_sock), |
53 | .h.udp_hash = udplite_hash, | 54 | .slab_flags = SLAB_DESTROY_BY_RCU, |
55 | .h.udp_table = &udplite_table, | ||
54 | #ifdef CONFIG_COMPAT | 56 | #ifdef CONFIG_COMPAT |
55 | .compat_setsockopt = compat_udp_setsockopt, | 57 | .compat_setsockopt = compat_udp_setsockopt, |
56 | .compat_getsockopt = compat_udp_getsockopt, | 58 | .compat_getsockopt = compat_udp_getsockopt, |
@@ -71,7 +73,7 @@ static struct inet_protosw udplite4_protosw = { | |||
71 | static struct udp_seq_afinfo udplite4_seq_afinfo = { | 73 | static struct udp_seq_afinfo udplite4_seq_afinfo = { |
72 | .name = "udplite", | 74 | .name = "udplite", |
73 | .family = AF_INET, | 75 | .family = AF_INET, |
74 | .hashtable = udplite_hash, | 76 | .udp_table = &udplite_table, |
75 | .seq_fops = { | 77 | .seq_fops = { |
76 | .owner = THIS_MODULE, | 78 | .owner = THIS_MODULE, |
77 | }, | 79 | }, |
@@ -108,6 +110,7 @@ static inline int udplite4_proc_init(void) | |||
108 | 110 | ||
109 | void __init udplite4_register(void) | 111 | void __init udplite4_register(void) |
110 | { | 112 | { |
113 | udp_table_init(&udplite_table); | ||
111 | if (proto_register(&udplite_prot, 1)) | 114 | if (proto_register(&udplite_prot, 1)) |
112 | goto out_register_err; | 115 | goto out_register_err; |
113 | 116 | ||
@@ -126,5 +129,4 @@ out_register_err: | |||
126 | printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); | 129 | printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); |
127 | } | 130 | } |
128 | 131 | ||
129 | EXPORT_SYMBOL(udplite_hash); | ||
130 | EXPORT_SYMBOL(udplite_prot); | 132 | EXPORT_SYMBOL(udplite_prot); |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index c63de0a72aba..f9a775b7e796 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -65,7 +65,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | |||
65 | 65 | ||
66 | read_lock_bh(&policy->lock); | 66 | read_lock_bh(&policy->lock); |
67 | for (dst = policy->bundles; dst; dst = dst->next) { | 67 | for (dst = policy->bundles; dst; dst = dst->next) { |
68 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; | 68 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; |
69 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | 69 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ |
70 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | 70 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && |
71 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | 71 | xdst->u.rt.fl.fl4_src == fl->fl4_src && |