aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-04-05 17:21:11 -0400
committerDavid S. Miller <davem@davemloft.net>2011-04-05 17:21:11 -0400
commit9d9305949778c41b92d4394a2f2a6bcdb1c41a9c (patch)
treed49d536ae8156ad9989dc7b26e3897f21e549b02
parent738faca34335cd1d5d87fa7c58703139c7fa15bd (diff)
parent96120d86fe302c006259baee9061eea9e1b9e486 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h3
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv6/netfilter.c13
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c109
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c42
-rw-r--r--net/netfilter/xt_conntrack.c2
18 files changed, 151 insertions, 115 deletions
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eeec00abb664..7fa95df60146 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -270,7 +270,8 @@ struct nf_afinfo {
270 unsigned int dataoff, 270 unsigned int dataoff,
271 unsigned int len, 271 unsigned int len,
272 u_int8_t protocol); 272 u_int8_t protocol);
273 int (*route)(struct dst_entry **dst, struct flowi *fl); 273 int (*route)(struct net *net, struct dst_entry **dst,
274 struct flowi *fl, bool strict);
274 void (*saveroute)(const struct sk_buff *skb, 275 void (*saveroute)(const struct sk_buff *skb,
275 struct nf_queue_entry *entry); 276 struct nf_queue_entry *entry);
276 int (*reroute)(struct sk_buff *skb, 277 int (*reroute)(struct sk_buff *skb,
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ec333d83f3b4..5a262e3ae715 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -293,7 +293,7 @@ struct ip_set {
293 /* Lock protecting the set data */ 293 /* Lock protecting the set data */
294 rwlock_t lock; 294 rwlock_t lock;
295 /* References to the set */ 295 /* References to the set */
296 atomic_t ref; 296 u32 ref;
297 /* The core set type */ 297 /* The core set type */
298 struct ip_set_type *type; 298 struct ip_set_type *type;
299 /* The type variant doing the real job */ 299 /* The type variant doing the real job */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ec9d9bea1e37..a0196ac79051 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
515 if (h->netmask != HOST_MASK) 515 if (h->netmask != HOST_MASK)
516 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); 516 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
517#endif 517#endif
518 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 518 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
519 htonl(atomic_read(&set->ref) - 1));
520 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); 519 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
521 if (with_timeout(h->timeout)) 520 if (with_timeout(h->timeout))
522 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); 521 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 30b49ed72f0d..4d1b71ae82ba 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
52 */ 52 */
53 if (likely(skb->dev && skb->dev->nd_net)) 53 if (likely(skb->dev && skb->dev->nd_net))
54 return dev_net(skb->dev); 54 return dev_net(skb->dev);
55 if (skb_dst(skb)->dev) 55 if (skb_dst(skb) && skb_dst(skb)->dev)
56 return dev_net(skb_dst(skb)->dev); 56 return dev_net(skb_dst(skb)->dev);
57 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", 57 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
58 __func__, __LINE__); 58 __func__, __LINE__);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f3c0b549b8e1..4614babdc45f 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
221 return csum; 221 return csum;
222} 222}
223 223
224static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) 224static int nf_ip_route(struct net *net, struct dst_entry **dst,
225 struct flowi *fl, bool strict __always_unused)
225{ 226{
226 struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); 227 struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
227 if (IS_ERR(rt)) 228 if (IS_ERR(rt))
228 return PTR_ERR(rt); 229 return PTR_ERR(rt);
229 *dst = &rt->dst; 230 *dst = &rt->dst;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39aaca2b4fd2..28bc1f644b7b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
90 return 0; 90 return 0;
91} 91}
92 92
93static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) 93static int nf_ip6_route(struct net *net, struct dst_entry **dst,
94 struct flowi *fl, bool strict)
94{ 95{
95 *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); 96 static const struct ipv6_pinfo fake_pinfo;
97 static const struct inet_sock fake_sk = {
98 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
99 .sk.sk_bound_dev_if = 1,
100 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
101 };
102 const void *sk = strict ? &fake_sk : NULL;
103
104 *dst = ip6_route_output(net, sk, &fl->u.ip6);
96 return (*dst)->error; 105 return (*dst)->error;
97} 106}
98 107
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3f988aa1152..32bff6d86cb2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -652,7 +652,6 @@ comment "Xtables matches"
652config NETFILTER_XT_MATCH_ADDRTYPE 652config NETFILTER_XT_MATCH_ADDRTYPE
653 tristate '"addrtype" address type match support' 653 tristate '"addrtype" address type match support'
654 depends on NETFILTER_ADVANCED 654 depends on NETFILTER_ADVANCED
655 depends on (IPV6 || IPV6=n)
656 ---help--- 655 ---help---
657 This option allows you to match what routing thinks of an address, 656 This option allows you to match what routing thinks of an address,
658 eg. UNICAST, LOCAL, BROADCAST, ... 657 eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218d..a113ff066928 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
339 if (map->netmask != 32) 339 if (map->netmask != 32)
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
342 htonl(atomic_read(&set->ref) - 1));
343 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
344 htonl(sizeof(*map) + map->memsize)); 343 htonl(sizeof(*map) + map->memsize));
345 if (with_timeout(map->timeout)) 344 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172deff..00a33242e90c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
434 goto nla_put_failure; 434 goto nla_put_failure;
435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
438 htonl(atomic_read(&set->ref) - 1));
439 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 438 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
440 htonl(sizeof(*map) 439 htonl(sizeof(*map)
441 + (map->last_ip - map->first_ip + 1) * map->dsize)); 440 + (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9cb..6b38eb8f6ed8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 goto nla_put_failure; 320 goto nla_put_failure;
321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
324 htonl(atomic_read(&set->ref) - 1));
325 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 324 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
326 htonl(sizeof(*map) + map->memsize)); 325 htonl(sizeof(*map) + map->memsize));
327 if (with_timeout(map->timeout)) 326 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d6b48230a540..e88ac3c3ed07 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(ip_set_type_list); /* all registered set types */ 27static LIST_HEAD(ip_set_type_list); /* all registered set types */
28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ 28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
29 30
30static struct ip_set **ip_set_list; /* all individual sets */ 31static struct ip_set **ip_set_list; /* all individual sets */
31static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ 32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
301static inline void 302static inline void
302__ip_set_get(ip_set_id_t index) 303__ip_set_get(ip_set_id_t index)
303{ 304{
304 atomic_inc(&ip_set_list[index]->ref); 305 write_lock_bh(&ip_set_ref_lock);
306 ip_set_list[index]->ref++;
307 write_unlock_bh(&ip_set_ref_lock);
305} 308}
306 309
307static inline void 310static inline void
308__ip_set_put(ip_set_id_t index) 311__ip_set_put(ip_set_id_t index)
309{ 312{
310 atomic_dec(&ip_set_list[index]->ref); 313 write_lock_bh(&ip_set_ref_lock);
314 BUG_ON(ip_set_list[index]->ref == 0);
315 ip_set_list[index]->ref--;
316 write_unlock_bh(&ip_set_ref_lock);
311} 317}
312 318
313/* 319/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
324 struct ip_set *set = ip_set_list[index]; 330 struct ip_set *set = ip_set_list[index];
325 int ret = 0; 331 int ret = 0;
326 332
327 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 333 BUG_ON(set == NULL);
328 pr_debug("set %s, index %u\n", set->name, index); 334 pr_debug("set %s, index %u\n", set->name, index);
329 335
330 if (dim < set->type->dimension || 336 if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
356 struct ip_set *set = ip_set_list[index]; 362 struct ip_set *set = ip_set_list[index];
357 int ret; 363 int ret;
358 364
359 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 365 BUG_ON(set == NULL);
360 pr_debug("set %s, index %u\n", set->name, index); 366 pr_debug("set %s, index %u\n", set->name, index);
361 367
362 if (dim < set->type->dimension || 368 if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
378 struct ip_set *set = ip_set_list[index]; 384 struct ip_set *set = ip_set_list[index];
379 int ret = 0; 385 int ret = 0;
380 386
381 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 387 BUG_ON(set == NULL);
382 pr_debug("set %s, index %u\n", set->name, index); 388 pr_debug("set %s, index %u\n", set->name, index);
383 389
384 if (dim < set->type->dimension || 390 if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
397 * Find set by name, reference it once. The reference makes sure the 403 * Find set by name, reference it once. The reference makes sure the
398 * thing pointed to, does not go away under our feet. 404 * thing pointed to, does not go away under our feet.
399 * 405 *
400 * The nfnl mutex must already be activated.
401 */ 406 */
402ip_set_id_t 407ip_set_id_t
403ip_set_get_byname(const char *name, struct ip_set **set) 408ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
423 * reference count by 1. The caller shall not assume the index 428 * reference count by 1. The caller shall not assume the index
424 * to be valid, after calling this function. 429 * to be valid, after calling this function.
425 * 430 *
426 * The nfnl mutex must already be activated.
427 */ 431 */
428void 432void
429ip_set_put_byindex(ip_set_id_t index) 433ip_set_put_byindex(ip_set_id_t index)
430{ 434{
431 if (ip_set_list[index] != NULL) { 435 if (ip_set_list[index] != NULL)
432 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
433 __ip_set_put(index); 436 __ip_set_put(index);
434 }
435} 437}
436EXPORT_SYMBOL_GPL(ip_set_put_byindex); 438EXPORT_SYMBOL_GPL(ip_set_put_byindex);
437 439
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
441 * can't be destroyed. The set cannot be renamed due to 443 * can't be destroyed. The set cannot be renamed due to
442 * the referencing either. 444 * the referencing either.
443 * 445 *
444 * The nfnl mutex must already be activated.
445 */ 446 */
446const char * 447const char *
447ip_set_name_byindex(ip_set_id_t index) 448ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
449 const struct ip_set *set = ip_set_list[index]; 450 const struct ip_set *set = ip_set_list[index];
450 451
451 BUG_ON(set == NULL); 452 BUG_ON(set == NULL);
452 BUG_ON(atomic_read(&set->ref) == 0); 453 BUG_ON(set->ref == 0);
453 454
454 /* Referenced, so it's safe */ 455 /* Referenced, so it's safe */
455 return set->name; 456 return set->name;
@@ -515,10 +516,7 @@ void
515ip_set_nfnl_put(ip_set_id_t index) 516ip_set_nfnl_put(ip_set_id_t index)
516{ 517{
517 nfnl_lock(); 518 nfnl_lock();
518 if (ip_set_list[index] != NULL) { 519 ip_set_put_byindex(index);
519 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
520 __ip_set_put(index);
521 }
522 nfnl_unlock(); 520 nfnl_unlock();
523} 521}
524EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 522EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
526/* 524/*
527 * Communication protocol with userspace over netlink. 525 * Communication protocol with userspace over netlink.
528 * 526 *
529 * We already locked by nfnl_lock. 527 * The commands are serialized by the nfnl mutex.
530 */ 528 */
531 529
532static inline bool 530static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
657 return -ENOMEM; 655 return -ENOMEM;
658 rwlock_init(&set->lock); 656 rwlock_init(&set->lock);
659 strlcpy(set->name, name, IPSET_MAXNAMELEN); 657 strlcpy(set->name, name, IPSET_MAXNAMELEN);
660 atomic_set(&set->ref, 0);
661 set->family = family; 658 set->family = family;
662 659
663 /* 660 /*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
690 687
691 /* 688 /*
692 * Here, we have a valid, constructed set and we are protected 689 * Here, we have a valid, constructed set and we are protected
693 * by nfnl_lock. Find the first free index in ip_set_list and 690 * by the nfnl mutex. Find the first free index in ip_set_list
694 * check clashing. 691 * and check clashing.
695 */ 692 */
696 if ((ret = find_free_id(set->name, &index, &clash)) != 0) { 693 if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
697 /* If this is the same set and requested, ignore error */ 694 /* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
751 const struct nlattr * const attr[]) 748 const struct nlattr * const attr[])
752{ 749{
753 ip_set_id_t i; 750 ip_set_id_t i;
751 int ret = 0;
754 752
755 if (unlikely(protocol_failed(attr))) 753 if (unlikely(protocol_failed(attr)))
756 return -IPSET_ERR_PROTOCOL; 754 return -IPSET_ERR_PROTOCOL;
757 755
758 /* References are protected by the nfnl mutex */ 756 /* Commands are serialized and references are
757 * protected by the ip_set_ref_lock.
758 * External systems (i.e. xt_set) must call
759 * ip_set_put|get_nfnl_* functions, that way we
760 * can safely check references here.
761 *
762 * list:set timer can only decrement the reference
763 * counter, so if it's already zero, we can proceed
764 * without holding the lock.
765 */
766 read_lock_bh(&ip_set_ref_lock);
759 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
760 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
761 if (ip_set_list[i] != NULL && 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
762 (atomic_read(&ip_set_list[i]->ref))) 770 ret = IPSET_ERR_BUSY;
763 return -IPSET_ERR_BUSY; 771 goto out;
772 }
764 } 773 }
774 read_unlock_bh(&ip_set_ref_lock);
765 for (i = 0; i < ip_set_max; i++) { 775 for (i = 0; i < ip_set_max; i++) {
766 if (ip_set_list[i] != NULL) 776 if (ip_set_list[i] != NULL)
767 ip_set_destroy_set(i); 777 ip_set_destroy_set(i);
768 } 778 }
769 } else { 779 } else {
770 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 780 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
771 if (i == IPSET_INVALID_ID) 781 if (i == IPSET_INVALID_ID) {
772 return -ENOENT; 782 ret = -ENOENT;
773 else if (atomic_read(&ip_set_list[i]->ref)) 783 goto out;
774 return -IPSET_ERR_BUSY; 784 } else if (ip_set_list[i]->ref) {
785 ret = -IPSET_ERR_BUSY;
786 goto out;
787 }
788 read_unlock_bh(&ip_set_ref_lock);
775 789
776 ip_set_destroy_set(i); 790 ip_set_destroy_set(i);
777 } 791 }
778 return 0; 792 return 0;
793out:
794 read_unlock_bh(&ip_set_ref_lock);
795 return ret;
779} 796}
780 797
781/* Flush sets */ 798/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
834 struct ip_set *set; 851 struct ip_set *set;
835 const char *name2; 852 const char *name2;
836 ip_set_id_t i; 853 ip_set_id_t i;
854 int ret = 0;
837 855
838 if (unlikely(protocol_failed(attr) || 856 if (unlikely(protocol_failed(attr) ||
839 attr[IPSET_ATTR_SETNAME] == NULL || 857 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
843 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); 861 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
844 if (set == NULL) 862 if (set == NULL)
845 return -ENOENT; 863 return -ENOENT;
846 if (atomic_read(&set->ref) != 0) 864
847 return -IPSET_ERR_REFERENCED; 865 read_lock_bh(&ip_set_ref_lock);
866 if (set->ref != 0) {
867 ret = -IPSET_ERR_REFERENCED;
868 goto out;
869 }
848 870
849 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); 871 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
850 for (i = 0; i < ip_set_max; i++) { 872 for (i = 0; i < ip_set_max; i++) {
851 if (ip_set_list[i] != NULL && 873 if (ip_set_list[i] != NULL &&
852 STREQ(ip_set_list[i]->name, name2)) 874 STREQ(ip_set_list[i]->name, name2)) {
853 return -IPSET_ERR_EXIST_SETNAME2; 875 ret = -IPSET_ERR_EXIST_SETNAME2;
876 goto out;
877 }
854 } 878 }
855 strncpy(set->name, name2, IPSET_MAXNAMELEN); 879 strncpy(set->name, name2, IPSET_MAXNAMELEN);
856 880
857 return 0; 881out:
882 read_unlock_bh(&ip_set_ref_lock);
883 return ret;
858} 884}
859 885
860/* Swap two sets so that name/index points to the other. 886/* Swap two sets so that name/index points to the other.
861 * References and set names are also swapped. 887 * References and set names are also swapped.
862 * 888 *
863 * We are protected by the nfnl mutex and references are 889 * The commands are serialized by the nfnl mutex and references are
864 * manipulated only by holding the mutex. The kernel interfaces 890 * protected by the ip_set_ref_lock. The kernel interfaces
865 * do not hold the mutex but the pointer settings are atomic 891 * do not hold the mutex but the pointer settings are atomic
866 * so the ip_set_list always contains valid pointers to the sets. 892 * so the ip_set_list always contains valid pointers to the sets.
867 */ 893 */
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
874 struct ip_set *from, *to; 900 struct ip_set *from, *to;
875 ip_set_id_t from_id, to_id; 901 ip_set_id_t from_id, to_id;
876 char from_name[IPSET_MAXNAMELEN]; 902 char from_name[IPSET_MAXNAMELEN];
877 u32 from_ref;
878 903
879 if (unlikely(protocol_failed(attr) || 904 if (unlikely(protocol_failed(attr) ||
880 attr[IPSET_ATTR_SETNAME] == NULL || 905 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
899 from->type->family == to->type->family)) 924 from->type->family == to->type->family))
900 return -IPSET_ERR_TYPE_MISMATCH; 925 return -IPSET_ERR_TYPE_MISMATCH;
901 926
902 /* No magic here: ref munging protected by the nfnl_lock */
903 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 927 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
904 from_ref = atomic_read(&from->ref);
905
906 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 928 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
907 atomic_set(&from->ref, atomic_read(&to->ref));
908 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 929 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
909 atomic_set(&to->ref, from_ref);
910 930
931 write_lock_bh(&ip_set_ref_lock);
932 swap(from->ref, to->ref);
911 ip_set_list[from_id] = to; 933 ip_set_list[from_id] = to;
912 ip_set_list[to_id] = from; 934 ip_set_list[to_id] = from;
935 write_unlock_bh(&ip_set_ref_lock);
913 936
914 return 0; 937 return 0;
915} 938}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
926{ 949{
927 if (cb->args[2]) { 950 if (cb->args[2]) {
928 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); 951 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
929 __ip_set_put((ip_set_id_t) cb->args[1]); 952 ip_set_put_byindex((ip_set_id_t) cb->args[1]);
930 } 953 }
931 return 0; 954 return 0;
932} 955}
@@ -1068,7 +1091,7 @@ release_refcount:
1068 /* If there was an error or set is done, release set */ 1091 /* If there was an error or set is done, release set */
1069 if (ret || !cb->args[2]) { 1092 if (ret || !cb->args[2]) {
1070 pr_debug("release set %s\n", ip_set_list[index]->name); 1093 pr_debug("release set %s\n", ip_set_list[index]->name);
1071 __ip_set_put(index); 1094 ip_set_put_byindex(index);
1072 } 1095 }
1073 1096
1074 /* If we dump all sets, continue with dumping last ones */ 1097 /* If we dump all sets, continue with dumping last ones */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f06..e9159e99fc4b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
43static inline struct set_elem * 43static inline struct set_elem *
44list_set_elem(const struct list_set *map, u32 id) 44list_set_elem(const struct list_set *map, u32 id)
45{ 45{
46 return (struct set_elem *)((char *)map->members + id * map->dsize); 46 return (struct set_elem *)((void *)map->members + id * map->dsize);
47}
48
49static inline struct set_telem *
50list_set_telem(const struct list_set *map, u32 id)
51{
52 return (struct set_telem *)((void *)map->members + id * map->dsize);
47} 53}
48 54
49static inline bool 55static inline bool
50list_set_timeout(const struct list_set *map, u32 id) 56list_set_timeout(const struct list_set *map, u32 id)
51{ 57{
52 const struct set_telem *elem = 58 const struct set_telem *elem = list_set_telem(map, id);
53 (const struct set_telem *) list_set_elem(map, id);
54 59
55 return ip_set_timeout_test(elem->timeout); 60 return ip_set_timeout_test(elem->timeout);
56} 61}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
58static inline bool 63static inline bool
59list_set_expired(const struct list_set *map, u32 id) 64list_set_expired(const struct list_set *map, u32 id)
60{ 65{
61 const struct set_telem *elem = 66 const struct set_telem *elem = list_set_telem(map, id);
62 (const struct set_telem *) list_set_elem(map, id);
63 67
64 return ip_set_timeout_expired(elem->timeout); 68 return ip_set_timeout_expired(elem->timeout);
65} 69}
66 70
67static inline int
68list_set_exist(const struct set_telem *elem)
69{
70 return elem->id != IPSET_INVALID_ID &&
71 !ip_set_timeout_expired(elem->timeout);
72}
73
74/* Set list without and with timeout */ 71/* Set list without and with timeout */
75 72
76static int 73static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
146 struct set_telem *e; 143 struct set_telem *e;
147 144
148 for (; i < map->size; i++) { 145 for (; i < map->size; i++) {
149 e = (struct set_telem *)list_set_elem(map, i); 146 e = list_set_telem(map, i);
150 swap(e->id, id); 147 swap(e->id, id);
148 swap(e->timeout, timeout);
151 if (e->id == IPSET_INVALID_ID) 149 if (e->id == IPSET_INVALID_ID)
152 break; 150 break;
153 swap(e->timeout, timeout);
154 } 151 }
155} 152}
156 153
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
164 /* Last element replaced: e.g. add new,before,last */ 161 /* Last element replaced: e.g. add new,before,last */
165 ip_set_put_byindex(e->id); 162 ip_set_put_byindex(e->id);
166 if (with_timeout(map->timeout)) 163 if (with_timeout(map->timeout))
167 list_elem_tadd(map, i, id, timeout); 164 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
168 else 165 else
169 list_elem_add(map, i, id); 166 list_elem_add(map, i, id);
170 167
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
172} 169}
173 170
174static int 171static int
175list_set_del(struct list_set *map, ip_set_id_t id, u32 i) 172list_set_del(struct list_set *map, u32 i)
176{ 173{
177 struct set_elem *a = list_set_elem(map, i), *b; 174 struct set_elem *a = list_set_elem(map, i), *b;
178 175
179 ip_set_put_byindex(id); 176 ip_set_put_byindex(a->id);
180 177
181 for (; i < map->size - 1; i++) { 178 for (; i < map->size - 1; i++) {
182 b = list_set_elem(map, i + 1); 179 b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
308 (before == 0 || 305 (before == 0 ||
309 (before > 0 && 306 (before > 0 &&
310 next_id_eq(map, i, refid)))) 307 next_id_eq(map, i, refid))))
311 ret = list_set_del(map, id, i); 308 ret = list_set_del(map, i);
312 else if (before < 0 && 309 else if (before < 0 &&
313 elem->id == refid && 310 elem->id == refid &&
314 next_id_eq(map, i, id)) 311 next_id_eq(map, i, id))
315 ret = list_set_del(map, id, i + 1); 312 ret = list_set_del(map, i + 1);
316 } 313 }
317 break; 314 break;
318 default: 315 default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
369 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 366 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
370 if (with_timeout(map->timeout)) 367 if (with_timeout(map->timeout))
371 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 368 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
372 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 369 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
373 htonl(atomic_read(&set->ref) - 1));
374 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 370 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
375 htonl(sizeof(*map) + map->size * map->dsize)); 371 htonl(sizeof(*map) + map->size * map->dsize));
376 ipset_nest_end(skb, nested); 372 ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
461 struct set_telem *e; 457 struct set_telem *e;
462 u32 i; 458 u32 i;
463 459
464 /* We run parallel with other readers (test element) 460 write_lock_bh(&set->lock);
465 * but adding/deleting new entries is locked out */ 461 for (i = 0; i < map->size; i++) {
466 read_lock_bh(&set->lock); 462 e = list_set_telem(map, i);
467 for (i = map->size - 1; i >= 0; i--) { 463 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
468 e = (struct set_telem *) list_set_elem(map, i); 464 list_set_del(map, i);
469 if (e->id != IPSET_INVALID_ID &&
470 list_set_expired(map, i))
471 list_set_del(map, e->id, i);
472 } 465 }
473 read_unlock_bh(&set->lock); 466 write_unlock_bh(&set->lock);
474 467
475 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; 468 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
476 add_timer(&map->gc); 469 add_timer(&map->gc);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 33733c8872e7..ae47090bf45f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3120,7 +3120,7 @@ nla_put_failure:
3120static int ip_vs_genl_dump_daemons(struct sk_buff *skb, 3120static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3121 struct netlink_callback *cb) 3121 struct netlink_callback *cb)
3122{ 3122{
3123 struct net *net = skb_net(skb); 3123 struct net *net = skb_sknet(skb);
3124 struct netns_ipvs *ipvs = net_ipvs(net); 3124 struct netns_ipvs *ipvs = net_ipvs(net);
3125 3125
3126 mutex_lock(&__ip_vs_mutex); 3126 mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 867882313e49..bcd5ed6b7130 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
631 CHECK_BOUND(bs, 2); 631 CHECK_BOUND(bs, 2);
632 count = *bs->cur++; 632 count = *bs->cur++;
633 count <<= 8; 633 count <<= 8;
634 count = *bs->cur++; 634 count += *bs->cur++;
635 break; 635 break;
636 case SEMI: 636 case SEMI:
637 BYTE_ALIGN(bs); 637 BYTE_ALIGN(bs);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 533a183e6661..18b2ce5c8ced 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
731 731
732 memset(&fl2, 0, sizeof(fl2)); 732 memset(&fl2, 0, sizeof(fl2));
733 fl2.daddr = dst->ip; 733 fl2.daddr = dst->ip;
734 if (!afinfo->route((struct dst_entry **)&rt1, 734 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
735 flowi4_to_flowi(&fl1))) { 735 flowi4_to_flowi(&fl1), false)) {
736 if (!afinfo->route((struct dst_entry **)&rt2, 736 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
737 flowi4_to_flowi(&fl2))) { 737 flowi4_to_flowi(&fl2), false)) {
738 if (rt1->rt_gateway == rt2->rt_gateway && 738 if (rt1->rt_gateway == rt2->rt_gateway &&
739 rt1->dst.dev == rt2->dst.dev) 739 rt1->dst.dev == rt2->dst.dev)
740 ret = 1; 740 ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
755 755
756 memset(&fl2, 0, sizeof(fl2)); 756 memset(&fl2, 0, sizeof(fl2));
757 ipv6_addr_copy(&fl2.daddr, &dst->in6); 757 ipv6_addr_copy(&fl2.daddr, &dst->in6);
758 if (!afinfo->route((struct dst_entry **)&rt1, 758 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
759 flowi6_to_flowi(&fl1))) { 759 flowi6_to_flowi(&fl1), false)) {
760 if (!afinfo->route((struct dst_entry **)&rt2, 760 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
761 flowi6_to_flowi(&fl2))) { 761 flowi6_to_flowi(&fl2), false)) {
762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
763 sizeof(rt1->rt6i_gateway)) && 763 sizeof(rt1->rt6i_gateway)) &&
764 rt1->dst.dev == rt2->dst.dev) 764 rt1->dst.dev == rt2->dst.dev)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6e6b46cb1db9..9e63b43faeed 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
166 rcu_read_lock(); 166 rcu_read_lock();
167 ai = nf_get_afinfo(family); 167 ai = nf_get_afinfo(family);
168 if (ai != NULL) 168 if (ai != NULL)
169 ai->route((struct dst_entry **)&rt, &fl); 169 ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
170 rcu_read_unlock(); 170 rcu_read_unlock();
171 171
172 if (rt != NULL) { 172 if (rt != NULL) {
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 2220b85e9519..b77d383cec78 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
32MODULE_ALIAS("ip6t_addrtype"); 32MODULE_ALIAS("ip6t_addrtype");
33 33
34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
35static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) 35static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
36 const struct in6_addr *addr)
36{ 37{
38 const struct nf_afinfo *afinfo;
39 struct flowi6 flow;
40 struct rt6_info *rt;
37 u32 ret; 41 u32 ret;
42 int route_err;
38 43
39 if (!rt) 44 memset(&flow, 0, sizeof(flow));
45 ipv6_addr_copy(&flow.daddr, addr);
46 if (dev)
47 flow.flowi6_oif = dev->ifindex;
48
49 rcu_read_lock();
50
51 afinfo = nf_get_afinfo(NFPROTO_IPV6);
52 if (afinfo != NULL)
53 route_err = afinfo->route(net, (struct dst_entry **)&rt,
54 flowi6_to_flowi(&flow), !!dev);
55 else
56 route_err = 1;
57
58 rcu_read_unlock();
59
60 if (route_err)
40 return XT_ADDRTYPE_UNREACHABLE; 61 return XT_ADDRTYPE_UNREACHABLE;
41 62
42 if (rt->rt6i_flags & RTF_REJECT) 63 if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
48 ret |= XT_ADDRTYPE_LOCAL; 69 ret |= XT_ADDRTYPE_LOCAL;
49 if (rt->rt6i_flags & RTF_ANYCAST) 70 if (rt->rt6i_flags & RTF_ANYCAST)
50 ret |= XT_ADDRTYPE_ANYCAST; 71 ret |= XT_ADDRTYPE_ANYCAST;
72
73
74 dst_release(&rt->dst);
51 return ret; 75 return ret;
52} 76}
53 77
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
65 return false; 89 return false;
66 90
67 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | 91 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
68 XT_ADDRTYPE_UNREACHABLE) & mask) { 92 XT_ADDRTYPE_UNREACHABLE) & mask)
69 struct rt6_info *rt; 93 return !!(mask & match_lookup_rt6(net, dev, addr));
70 u32 type;
71 int ifindex = dev ? dev->ifindex : 0;
72
73 rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
74
75 type = xt_addrtype_rt6_to_type(rt);
76
77 dst_release(&rt->dst);
78 return !!(mask & type);
79 }
80 return true; 94 return true;
81} 95}
82 96
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 2c0086a4751e..481a86fdc409 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
195 return info->match_flags & XT_CONNTRACK_STATE; 195 return info->match_flags & XT_CONNTRACK_STATE;
196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) && 196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ 197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
198 !!(info->invert_flags & XT_CONNTRACK_DIRECTION)) 198 !(info->invert_flags & XT_CONNTRACK_DIRECTION))
199 return false; 199 return false;
200 200
201 if (info->match_flags & XT_CONNTRACK_ORIGSRC) 201 if (info->match_flags & XT_CONNTRACK_ORIGSRC)