aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/devinet.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2012-10-28 14:28:52 -0400
committerJiri Kosina <jkosina@suse.cz>2012-10-28 14:29:19 -0400
commit3bd7bf1f0fe14f591c089ae61bbfa9bd356f178a (patch)
tree0058693cc9e70b7461dae551f8a19aff2efd13ca /net/ipv4/devinet.c
parentf16f84937d769c893492160b1a8c3672e3992beb (diff)
parente657e078d3dfa9f96976db7a2b5fd7d7c9f1f1a6 (diff)
Merge branch 'master' into for-next
Sync up with Linus' tree to be able to apply Cesar's patch against newer version of the code. Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'net/ipv4/devinet.c')
-rw-r--r--net/ipv4/devinet.c67
1 files changed, 32 insertions, 35 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e12fad773852..2a6abc163ed2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -94,25 +94,22 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
95}; 95};
96 96
97/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE 97#define IN4_ADDR_HSIZE_SHIFT 8
98 * value. So if you change this define, make appropriate changes to 98#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
99 * inet_addr_hash as well. 99
100 */
101#define IN4_ADDR_HSIZE 256
102static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; 100static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
103static DEFINE_SPINLOCK(inet_addr_hash_lock); 101static DEFINE_SPINLOCK(inet_addr_hash_lock);
104 102
105static inline unsigned int inet_addr_hash(struct net *net, __be32 addr) 103static u32 inet_addr_hash(struct net *net, __be32 addr)
106{ 104{
107 u32 val = (__force u32) addr ^ hash_ptr(net, 8); 105 u32 val = (__force u32) addr ^ net_hash_mix(net);
108 106
109 return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) & 107 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
110 (IN4_ADDR_HSIZE - 1));
111} 108}
112 109
113static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) 110static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
114{ 111{
115 unsigned int hash = inet_addr_hash(net, ifa->ifa_local); 112 u32 hash = inet_addr_hash(net, ifa->ifa_local);
116 113
117 spin_lock(&inet_addr_hash_lock); 114 spin_lock(&inet_addr_hash_lock);
118 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); 115 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@ static void inet_hash_remove(struct in_ifaddr *ifa)
136 */ 133 */
137struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) 134struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
138{ 135{
139 unsigned int hash = inet_addr_hash(net, addr); 136 u32 hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL; 137 struct net_device *result = NULL;
141 struct in_ifaddr *ifa; 138 struct in_ifaddr *ifa;
142 struct hlist_node *node; 139 struct hlist_node *node;
143 140
144 rcu_read_lock(); 141 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { 142 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
146 struct net_device *dev = ifa->ifa_dev->dev;
147
148 if (!net_eq(dev_net(dev), net))
149 continue;
150 if (ifa->ifa_local == addr) { 143 if (ifa->ifa_local == addr) {
144 struct net_device *dev = ifa->ifa_dev->dev;
145
146 if (!net_eq(dev_net(dev), net))
147 continue;
151 result = dev; 148 result = dev;
152 break; 149 break;
153 } 150 }
@@ -182,10 +179,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
182static void devinet_sysctl_register(struct in_device *idev); 179static void devinet_sysctl_register(struct in_device *idev);
183static void devinet_sysctl_unregister(struct in_device *idev); 180static void devinet_sysctl_unregister(struct in_device *idev);
184#else 181#else
185static inline void devinet_sysctl_register(struct in_device *idev) 182static void devinet_sysctl_register(struct in_device *idev)
186{ 183{
187} 184}
188static inline void devinet_sysctl_unregister(struct in_device *idev) 185static void devinet_sysctl_unregister(struct in_device *idev)
189{ 186{
190} 187}
191#endif 188#endif
@@ -205,7 +202,7 @@ static void inet_rcu_free_ifa(struct rcu_head *head)
205 kfree(ifa); 202 kfree(ifa);
206} 203}
207 204
208static inline void inet_free_ifa(struct in_ifaddr *ifa) 205static void inet_free_ifa(struct in_ifaddr *ifa)
209{ 206{
210 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa); 207 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
211} 208}
@@ -314,7 +311,7 @@ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
314} 311}
315 312
316static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, 313static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
317 int destroy, struct nlmsghdr *nlh, u32 pid) 314 int destroy, struct nlmsghdr *nlh, u32 portid)
318{ 315{
319 struct in_ifaddr *promote = NULL; 316 struct in_ifaddr *promote = NULL;
320 struct in_ifaddr *ifa, *ifa1 = *ifap; 317 struct in_ifaddr *ifa, *ifa1 = *ifap;
@@ -348,7 +345,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
348 inet_hash_remove(ifa); 345 inet_hash_remove(ifa);
349 *ifap1 = ifa->ifa_next; 346 *ifap1 = ifa->ifa_next;
350 347
351 rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid); 348 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
352 blocking_notifier_call_chain(&inetaddr_chain, 349 blocking_notifier_call_chain(&inetaddr_chain,
353 NETDEV_DOWN, ifa); 350 NETDEV_DOWN, ifa);
354 inet_free_ifa(ifa); 351 inet_free_ifa(ifa);
@@ -385,7 +382,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
385 is valid, it will try to restore deleted routes... Grr. 382 is valid, it will try to restore deleted routes... Grr.
386 So that, this order is correct. 383 So that, this order is correct.
387 */ 384 */
388 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid); 385 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
389 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); 386 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
390 387
391 if (promote) { 388 if (promote) {
@@ -398,7 +395,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
398 } 395 }
399 396
400 promote->ifa_flags &= ~IFA_F_SECONDARY; 397 promote->ifa_flags &= ~IFA_F_SECONDARY;
401 rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); 398 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
402 blocking_notifier_call_chain(&inetaddr_chain, 399 blocking_notifier_call_chain(&inetaddr_chain,
403 NETDEV_UP, promote); 400 NETDEV_UP, promote);
404 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { 401 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
@@ -420,7 +417,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
420} 417}
421 418
422static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, 419static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
423 u32 pid) 420 u32 portid)
424{ 421{
425 struct in_device *in_dev = ifa->ifa_dev; 422 struct in_device *in_dev = ifa->ifa_dev;
426 struct in_ifaddr *ifa1, **ifap, **last_primary; 423 struct in_ifaddr *ifa1, **ifap, **last_primary;
@@ -467,7 +464,7 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
467 /* Send message first, then call notifier. 464 /* Send message first, then call notifier.
468 Notifier will trigger FIB update, so that 465 Notifier will trigger FIB update, so that
469 listeners of netlink will know about new ifaddr */ 466 listeners of netlink will know about new ifaddr */
470 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid); 467 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
471 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); 468 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
472 469
473 return 0; 470 return 0;
@@ -566,7 +563,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
566 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) 563 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
567 continue; 564 continue;
568 565
569 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); 566 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
570 return 0; 567 return 0;
571 } 568 }
572 569
@@ -652,14 +649,14 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
652 if (IS_ERR(ifa)) 649 if (IS_ERR(ifa))
653 return PTR_ERR(ifa); 650 return PTR_ERR(ifa);
654 651
655 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid); 652 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
656} 653}
657 654
658/* 655/*
659 * Determine a default network mask, based on the IP address. 656 * Determine a default network mask, based on the IP address.
660 */ 657 */
661 658
662static inline int inet_abc_len(__be32 addr) 659static int inet_abc_len(__be32 addr)
663{ 660{
664 int rc = -1; /* Something else, probably a multicast. */ 661 int rc = -1; /* Something else, probably a multicast. */
665 662
@@ -1124,7 +1121,7 @@ skip:
1124 } 1121 }
1125} 1122}
1126 1123
1127static inline bool inetdev_valid_mtu(unsigned int mtu) 1124static bool inetdev_valid_mtu(unsigned int mtu)
1128{ 1125{
1129 return mtu >= 68; 1126 return mtu >= 68;
1130} 1127}
@@ -1239,7 +1236,7 @@ static struct notifier_block ip_netdev_notifier = {
1239 .notifier_call = inetdev_event, 1236 .notifier_call = inetdev_event,
1240}; 1237};
1241 1238
1242static inline size_t inet_nlmsg_size(void) 1239static size_t inet_nlmsg_size(void)
1243{ 1240{
1244 return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) 1241 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1245 + nla_total_size(4) /* IFA_ADDRESS */ 1242 + nla_total_size(4) /* IFA_ADDRESS */
@@ -1249,12 +1246,12 @@ static inline size_t inet_nlmsg_size(void)
1249} 1246}
1250 1247
1251static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, 1248static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1252 u32 pid, u32 seq, int event, unsigned int flags) 1249 u32 portid, u32 seq, int event, unsigned int flags)
1253{ 1250{
1254 struct ifaddrmsg *ifm; 1251 struct ifaddrmsg *ifm;
1255 struct nlmsghdr *nlh; 1252 struct nlmsghdr *nlh;
1256 1253
1257 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 1254 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1258 if (nlh == NULL) 1255 if (nlh == NULL)
1259 return -EMSGSIZE; 1256 return -EMSGSIZE;
1260 1257
@@ -1316,7 +1313,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1316 if (ip_idx < s_ip_idx) 1313 if (ip_idx < s_ip_idx)
1317 continue; 1314 continue;
1318 if (inet_fill_ifaddr(skb, ifa, 1315 if (inet_fill_ifaddr(skb, ifa,
1319 NETLINK_CB(cb->skb).pid, 1316 NETLINK_CB(cb->skb).portid,
1320 cb->nlh->nlmsg_seq, 1317 cb->nlh->nlmsg_seq,
1321 RTM_NEWADDR, NLM_F_MULTI) <= 0) { 1318 RTM_NEWADDR, NLM_F_MULTI) <= 0) {
1322 rcu_read_unlock(); 1319 rcu_read_unlock();
@@ -1338,7 +1335,7 @@ done:
1338} 1335}
1339 1336
1340static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, 1337static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1341 u32 pid) 1338 u32 portid)
1342{ 1339{
1343 struct sk_buff *skb; 1340 struct sk_buff *skb;
1344 u32 seq = nlh ? nlh->nlmsg_seq : 0; 1341 u32 seq = nlh ? nlh->nlmsg_seq : 0;
@@ -1350,14 +1347,14 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1350 if (skb == NULL) 1347 if (skb == NULL)
1351 goto errout; 1348 goto errout;
1352 1349
1353 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); 1350 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1354 if (err < 0) { 1351 if (err < 0) {
1355 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ 1352 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1356 WARN_ON(err == -EMSGSIZE); 1353 WARN_ON(err == -EMSGSIZE);
1357 kfree_skb(skb); 1354 kfree_skb(skb);
1358 goto errout; 1355 goto errout;
1359 } 1356 }
1360 rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); 1357 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1361 return; 1358 return;
1362errout: 1359errout:
1363 if (err < 0) 1360 if (err < 0)