aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter/ipset
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-04-11 10:27:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-11 10:27:24 -0400
commitc44eaf41a5a423993932c9a9ad279ee132779b48 (patch)
tree3554fc0bfdcd97936417d6d001d00710d11e67e5 /net/netfilter/ipset
parent4263a2f1dad8c8e7ce2352a0cbc882c2b0c044a9 (diff)
parent88edaa415966af965bb7eb7056d8b58145462c8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (34 commits) net: Add support for SMSC LAN9530, LAN9730 and LAN89530 mlx4_en: Restoring RX buffer pointer in case of failure mlx4: Sensing link type at device initialization ipv4: Fix "Set rt->rt_iif more sanely on output routes." MAINTAINERS: add entry for Xen network backend be2net: Fix suspend/resume operation be2net: Rename some struct members for clarity pppoe: drop PPPOX_ZOMBIEs in pppoe_flush_dev dsa/mv88e6131: add support for mv88e6085 switch ipv6: Enable RFS sk_rxhash tracking for ipv6 sockets (v2) be2net: Fix a potential crash during shutdown. bna: Fix for handling firmware heartbeat failure can: mcp251x: Allow pass IRQ flags through platform data. smsc911x: fix mac_lock acquision before calling smsc911x_mac_read iwlwifi: accept EEPROM version 0x423 for iwl6000 rt2x00: fix cancelling uninitialized work rtlwifi: Fix some warnings/bugs p54usb: IDs for two new devices wl12xx: fix potential buffer overflow in testmode nvs push zd1211rw: reset rx idle timer from tasklet ...
Diffstat (limited to 'net/netfilter/ipset')
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c109
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
5 files changed, 92 insertions, 79 deletions
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218..a113ff06692 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
339 if (map->netmask != 32) 339 if (map->netmask != 32)
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
342 htonl(atomic_read(&set->ref) - 1));
343 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
344 htonl(sizeof(*map) + map->memsize)); 343 htonl(sizeof(*map) + map->memsize));
345 if (with_timeout(map->timeout)) 344 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172def..00a33242e90 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
434 goto nla_put_failure; 434 goto nla_put_failure;
435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
438 htonl(atomic_read(&set->ref) - 1));
439 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 438 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
440 htonl(sizeof(*map) 439 htonl(sizeof(*map)
441 + (map->last_ip - map->first_ip + 1) * map->dsize)); 440 + (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9c..6b38eb8f6ed 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 goto nla_put_failure; 320 goto nla_put_failure;
321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
324 htonl(atomic_read(&set->ref) - 1));
325 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 324 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
326 htonl(sizeof(*map) + map->memsize)); 325 htonl(sizeof(*map) + map->memsize));
327 if (with_timeout(map->timeout)) 326 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 253326e8d99..9152e69a162 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(ip_set_type_list); /* all registered set types */ 27static LIST_HEAD(ip_set_type_list); /* all registered set types */
28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ 28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
29 30
30static struct ip_set **ip_set_list; /* all individual sets */ 31static struct ip_set **ip_set_list; /* all individual sets */
31static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ 32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
301static inline void 302static inline void
302__ip_set_get(ip_set_id_t index) 303__ip_set_get(ip_set_id_t index)
303{ 304{
304 atomic_inc(&ip_set_list[index]->ref); 305 write_lock_bh(&ip_set_ref_lock);
306 ip_set_list[index]->ref++;
307 write_unlock_bh(&ip_set_ref_lock);
305} 308}
306 309
307static inline void 310static inline void
308__ip_set_put(ip_set_id_t index) 311__ip_set_put(ip_set_id_t index)
309{ 312{
310 atomic_dec(&ip_set_list[index]->ref); 313 write_lock_bh(&ip_set_ref_lock);
314 BUG_ON(ip_set_list[index]->ref == 0);
315 ip_set_list[index]->ref--;
316 write_unlock_bh(&ip_set_ref_lock);
311} 317}
312 318
313/* 319/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
324 struct ip_set *set = ip_set_list[index]; 330 struct ip_set *set = ip_set_list[index];
325 int ret = 0; 331 int ret = 0;
326 332
327 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 333 BUG_ON(set == NULL);
328 pr_debug("set %s, index %u\n", set->name, index); 334 pr_debug("set %s, index %u\n", set->name, index);
329 335
330 if (dim < set->type->dimension || 336 if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
356 struct ip_set *set = ip_set_list[index]; 362 struct ip_set *set = ip_set_list[index];
357 int ret; 363 int ret;
358 364
359 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 365 BUG_ON(set == NULL);
360 pr_debug("set %s, index %u\n", set->name, index); 366 pr_debug("set %s, index %u\n", set->name, index);
361 367
362 if (dim < set->type->dimension || 368 if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
378 struct ip_set *set = ip_set_list[index]; 384 struct ip_set *set = ip_set_list[index];
379 int ret = 0; 385 int ret = 0;
380 386
381 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 387 BUG_ON(set == NULL);
382 pr_debug("set %s, index %u\n", set->name, index); 388 pr_debug("set %s, index %u\n", set->name, index);
383 389
384 if (dim < set->type->dimension || 390 if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
397 * Find set by name, reference it once. The reference makes sure the 403 * Find set by name, reference it once. The reference makes sure the
398 * thing pointed to, does not go away under our feet. 404 * thing pointed to, does not go away under our feet.
399 * 405 *
400 * The nfnl mutex must already be activated.
401 */ 406 */
402ip_set_id_t 407ip_set_id_t
403ip_set_get_byname(const char *name, struct ip_set **set) 408ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
423 * reference count by 1. The caller shall not assume the index 428 * reference count by 1. The caller shall not assume the index
424 * to be valid, after calling this function. 429 * to be valid, after calling this function.
425 * 430 *
426 * The nfnl mutex must already be activated.
427 */ 431 */
428void 432void
429ip_set_put_byindex(ip_set_id_t index) 433ip_set_put_byindex(ip_set_id_t index)
430{ 434{
431 if (ip_set_list[index] != NULL) { 435 if (ip_set_list[index] != NULL)
432 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
433 __ip_set_put(index); 436 __ip_set_put(index);
434 }
435} 437}
436EXPORT_SYMBOL_GPL(ip_set_put_byindex); 438EXPORT_SYMBOL_GPL(ip_set_put_byindex);
437 439
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
441 * can't be destroyed. The set cannot be renamed due to 443 * can't be destroyed. The set cannot be renamed due to
442 * the referencing either. 444 * the referencing either.
443 * 445 *
444 * The nfnl mutex must already be activated.
445 */ 446 */
446const char * 447const char *
447ip_set_name_byindex(ip_set_id_t index) 448ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
449 const struct ip_set *set = ip_set_list[index]; 450 const struct ip_set *set = ip_set_list[index];
450 451
451 BUG_ON(set == NULL); 452 BUG_ON(set == NULL);
452 BUG_ON(atomic_read(&set->ref) == 0); 453 BUG_ON(set->ref == 0);
453 454
454 /* Referenced, so it's safe */ 455 /* Referenced, so it's safe */
455 return set->name; 456 return set->name;
@@ -515,10 +516,7 @@ void
515ip_set_nfnl_put(ip_set_id_t index) 516ip_set_nfnl_put(ip_set_id_t index)
516{ 517{
517 nfnl_lock(); 518 nfnl_lock();
518 if (ip_set_list[index] != NULL) { 519 ip_set_put_byindex(index);
519 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
520 __ip_set_put(index);
521 }
522 nfnl_unlock(); 520 nfnl_unlock();
523} 521}
524EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 522EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
526/* 524/*
527 * Communication protocol with userspace over netlink. 525 * Communication protocol with userspace over netlink.
528 * 526 *
529 * We already locked by nfnl_lock. 527 * The commands are serialized by the nfnl mutex.
530 */ 528 */
531 529
532static inline bool 530static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
657 return -ENOMEM; 655 return -ENOMEM;
658 rwlock_init(&set->lock); 656 rwlock_init(&set->lock);
659 strlcpy(set->name, name, IPSET_MAXNAMELEN); 657 strlcpy(set->name, name, IPSET_MAXNAMELEN);
660 atomic_set(&set->ref, 0);
661 set->family = family; 658 set->family = family;
662 659
663 /* 660 /*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
690 687
691 /* 688 /*
692 * Here, we have a valid, constructed set and we are protected 689 * Here, we have a valid, constructed set and we are protected
693 * by nfnl_lock. Find the first free index in ip_set_list and 690 * by the nfnl mutex. Find the first free index in ip_set_list
694 * check clashing. 691 * and check clashing.
695 */ 692 */
696 if ((ret = find_free_id(set->name, &index, &clash)) != 0) { 693 if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
697 /* If this is the same set and requested, ignore error */ 694 /* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
751 const struct nlattr * const attr[]) 748 const struct nlattr * const attr[])
752{ 749{
753 ip_set_id_t i; 750 ip_set_id_t i;
751 int ret = 0;
754 752
755 if (unlikely(protocol_failed(attr))) 753 if (unlikely(protocol_failed(attr)))
756 return -IPSET_ERR_PROTOCOL; 754 return -IPSET_ERR_PROTOCOL;
757 755
758 /* References are protected by the nfnl mutex */ 756 /* Commands are serialized and references are
757 * protected by the ip_set_ref_lock.
758 * External systems (i.e. xt_set) must call
759 * ip_set_put|get_nfnl_* functions, that way we
760 * can safely check references here.
761 *
762 * list:set timer can only decrement the reference
763 * counter, so if it's already zero, we can proceed
764 * without holding the lock.
765 */
766 read_lock_bh(&ip_set_ref_lock);
759 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
760 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
761 if (ip_set_list[i] != NULL && 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
762 (atomic_read(&ip_set_list[i]->ref))) 770 ret = IPSET_ERR_BUSY;
763 return -IPSET_ERR_BUSY; 771 goto out;
772 }
764 } 773 }
774 read_unlock_bh(&ip_set_ref_lock);
765 for (i = 0; i < ip_set_max; i++) { 775 for (i = 0; i < ip_set_max; i++) {
766 if (ip_set_list[i] != NULL) 776 if (ip_set_list[i] != NULL)
767 ip_set_destroy_set(i); 777 ip_set_destroy_set(i);
768 } 778 }
769 } else { 779 } else {
770 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 780 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
771 if (i == IPSET_INVALID_ID) 781 if (i == IPSET_INVALID_ID) {
772 return -ENOENT; 782 ret = -ENOENT;
773 else if (atomic_read(&ip_set_list[i]->ref)) 783 goto out;
774 return -IPSET_ERR_BUSY; 784 } else if (ip_set_list[i]->ref) {
785 ret = -IPSET_ERR_BUSY;
786 goto out;
787 }
788 read_unlock_bh(&ip_set_ref_lock);
775 789
776 ip_set_destroy_set(i); 790 ip_set_destroy_set(i);
777 } 791 }
778 return 0; 792 return 0;
793out:
794 read_unlock_bh(&ip_set_ref_lock);
795 return ret;
779} 796}
780 797
781/* Flush sets */ 798/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
834 struct ip_set *set; 851 struct ip_set *set;
835 const char *name2; 852 const char *name2;
836 ip_set_id_t i; 853 ip_set_id_t i;
854 int ret = 0;
837 855
838 if (unlikely(protocol_failed(attr) || 856 if (unlikely(protocol_failed(attr) ||
839 attr[IPSET_ATTR_SETNAME] == NULL || 857 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
843 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); 861 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
844 if (set == NULL) 862 if (set == NULL)
845 return -ENOENT; 863 return -ENOENT;
846 if (atomic_read(&set->ref) != 0) 864
847 return -IPSET_ERR_REFERENCED; 865 read_lock_bh(&ip_set_ref_lock);
866 if (set->ref != 0) {
867 ret = -IPSET_ERR_REFERENCED;
868 goto out;
869 }
848 870
849 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); 871 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
850 for (i = 0; i < ip_set_max; i++) { 872 for (i = 0; i < ip_set_max; i++) {
851 if (ip_set_list[i] != NULL && 873 if (ip_set_list[i] != NULL &&
852 STREQ(ip_set_list[i]->name, name2)) 874 STREQ(ip_set_list[i]->name, name2)) {
853 return -IPSET_ERR_EXIST_SETNAME2; 875 ret = -IPSET_ERR_EXIST_SETNAME2;
876 goto out;
877 }
854 } 878 }
855 strncpy(set->name, name2, IPSET_MAXNAMELEN); 879 strncpy(set->name, name2, IPSET_MAXNAMELEN);
856 880
857 return 0; 881out:
882 read_unlock_bh(&ip_set_ref_lock);
883 return ret;
858} 884}
859 885
860/* Swap two sets so that name/index points to the other. 886/* Swap two sets so that name/index points to the other.
861 * References and set names are also swapped. 887 * References and set names are also swapped.
862 * 888 *
863 * We are protected by the nfnl mutex and references are 889 * The commands are serialized by the nfnl mutex and references are
864 * manipulated only by holding the mutex. The kernel interfaces 890 * protected by the ip_set_ref_lock. The kernel interfaces
865 * do not hold the mutex but the pointer settings are atomic 891 * do not hold the mutex but the pointer settings are atomic
866 * so the ip_set_list always contains valid pointers to the sets. 892 * so the ip_set_list always contains valid pointers to the sets.
867 */ 893 */
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
874 struct ip_set *from, *to; 900 struct ip_set *from, *to;
875 ip_set_id_t from_id, to_id; 901 ip_set_id_t from_id, to_id;
876 char from_name[IPSET_MAXNAMELEN]; 902 char from_name[IPSET_MAXNAMELEN];
877 u32 from_ref;
878 903
879 if (unlikely(protocol_failed(attr) || 904 if (unlikely(protocol_failed(attr) ||
880 attr[IPSET_ATTR_SETNAME] == NULL || 905 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
899 from->type->family == to->type->family)) 924 from->type->family == to->type->family))
900 return -IPSET_ERR_TYPE_MISMATCH; 925 return -IPSET_ERR_TYPE_MISMATCH;
901 926
902 /* No magic here: ref munging protected by the nfnl_lock */
903 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 927 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
904 from_ref = atomic_read(&from->ref);
905
906 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 928 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
907 atomic_set(&from->ref, atomic_read(&to->ref));
908 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 929 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
909 atomic_set(&to->ref, from_ref);
910 930
931 write_lock_bh(&ip_set_ref_lock);
932 swap(from->ref, to->ref);
911 ip_set_list[from_id] = to; 933 ip_set_list[from_id] = to;
912 ip_set_list[to_id] = from; 934 ip_set_list[to_id] = from;
935 write_unlock_bh(&ip_set_ref_lock);
913 936
914 return 0; 937 return 0;
915} 938}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
926{ 949{
927 if (cb->args[2]) { 950 if (cb->args[2]) {
928 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); 951 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
929 __ip_set_put((ip_set_id_t) cb->args[1]); 952 ip_set_put_byindex((ip_set_id_t) cb->args[1]);
930 } 953 }
931 return 0; 954 return 0;
932} 955}
@@ -1068,7 +1091,7 @@ release_refcount:
1068 /* If there was an error or set is done, release set */ 1091 /* If there was an error or set is done, release set */
1069 if (ret || !cb->args[2]) { 1092 if (ret || !cb->args[2]) {
1070 pr_debug("release set %s\n", ip_set_list[index]->name); 1093 pr_debug("release set %s\n", ip_set_list[index]->name);
1071 __ip_set_put(index); 1094 ip_set_put_byindex(index);
1072 } 1095 }
1073 1096
1074 /* If we dump all sets, continue with dumping last ones */ 1097 /* If we dump all sets, continue with dumping last ones */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f0..e9159e99fc4 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
43static inline struct set_elem * 43static inline struct set_elem *
44list_set_elem(const struct list_set *map, u32 id) 44list_set_elem(const struct list_set *map, u32 id)
45{ 45{
46 return (struct set_elem *)((char *)map->members + id * map->dsize); 46 return (struct set_elem *)((void *)map->members + id * map->dsize);
47}
48
49static inline struct set_telem *
50list_set_telem(const struct list_set *map, u32 id)
51{
52 return (struct set_telem *)((void *)map->members + id * map->dsize);
47} 53}
48 54
49static inline bool 55static inline bool
50list_set_timeout(const struct list_set *map, u32 id) 56list_set_timeout(const struct list_set *map, u32 id)
51{ 57{
52 const struct set_telem *elem = 58 const struct set_telem *elem = list_set_telem(map, id);
53 (const struct set_telem *) list_set_elem(map, id);
54 59
55 return ip_set_timeout_test(elem->timeout); 60 return ip_set_timeout_test(elem->timeout);
56} 61}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
58static inline bool 63static inline bool
59list_set_expired(const struct list_set *map, u32 id) 64list_set_expired(const struct list_set *map, u32 id)
60{ 65{
61 const struct set_telem *elem = 66 const struct set_telem *elem = list_set_telem(map, id);
62 (const struct set_telem *) list_set_elem(map, id);
63 67
64 return ip_set_timeout_expired(elem->timeout); 68 return ip_set_timeout_expired(elem->timeout);
65} 69}
66 70
67static inline int
68list_set_exist(const struct set_telem *elem)
69{
70 return elem->id != IPSET_INVALID_ID &&
71 !ip_set_timeout_expired(elem->timeout);
72}
73
74/* Set list without and with timeout */ 71/* Set list without and with timeout */
75 72
76static int 73static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
146 struct set_telem *e; 143 struct set_telem *e;
147 144
148 for (; i < map->size; i++) { 145 for (; i < map->size; i++) {
149 e = (struct set_telem *)list_set_elem(map, i); 146 e = list_set_telem(map, i);
150 swap(e->id, id); 147 swap(e->id, id);
148 swap(e->timeout, timeout);
151 if (e->id == IPSET_INVALID_ID) 149 if (e->id == IPSET_INVALID_ID)
152 break; 150 break;
153 swap(e->timeout, timeout);
154 } 151 }
155} 152}
156 153
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
164 /* Last element replaced: e.g. add new,before,last */ 161 /* Last element replaced: e.g. add new,before,last */
165 ip_set_put_byindex(e->id); 162 ip_set_put_byindex(e->id);
166 if (with_timeout(map->timeout)) 163 if (with_timeout(map->timeout))
167 list_elem_tadd(map, i, id, timeout); 164 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
168 else 165 else
169 list_elem_add(map, i, id); 166 list_elem_add(map, i, id);
170 167
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
172} 169}
173 170
174static int 171static int
175list_set_del(struct list_set *map, ip_set_id_t id, u32 i) 172list_set_del(struct list_set *map, u32 i)
176{ 173{
177 struct set_elem *a = list_set_elem(map, i), *b; 174 struct set_elem *a = list_set_elem(map, i), *b;
178 175
179 ip_set_put_byindex(id); 176 ip_set_put_byindex(a->id);
180 177
181 for (; i < map->size - 1; i++) { 178 for (; i < map->size - 1; i++) {
182 b = list_set_elem(map, i + 1); 179 b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
308 (before == 0 || 305 (before == 0 ||
309 (before > 0 && 306 (before > 0 &&
310 next_id_eq(map, i, refid)))) 307 next_id_eq(map, i, refid))))
311 ret = list_set_del(map, id, i); 308 ret = list_set_del(map, i);
312 else if (before < 0 && 309 else if (before < 0 &&
313 elem->id == refid && 310 elem->id == refid &&
314 next_id_eq(map, i, id)) 311 next_id_eq(map, i, id))
315 ret = list_set_del(map, id, i + 1); 312 ret = list_set_del(map, i + 1);
316 } 313 }
317 break; 314 break;
318 default: 315 default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
369 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 366 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
370 if (with_timeout(map->timeout)) 367 if (with_timeout(map->timeout))
371 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 368 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
372 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 369 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
373 htonl(atomic_read(&set->ref) - 1));
374 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 370 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
375 htonl(sizeof(*map) + map->size * map->dsize)); 371 htonl(sizeof(*map) + map->size * map->dsize));
376 ipset_nest_end(skb, nested); 372 ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
461 struct set_telem *e; 457 struct set_telem *e;
462 u32 i; 458 u32 i;
463 459
464 /* We run parallel with other readers (test element) 460 write_lock_bh(&set->lock);
465 * but adding/deleting new entries is locked out */ 461 for (i = 0; i < map->size; i++) {
466 read_lock_bh(&set->lock); 462 e = list_set_telem(map, i);
467 for (i = map->size - 1; i >= 0; i--) { 463 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
468 e = (struct set_telem *) list_set_elem(map, i); 464 list_set_del(map, i);
469 if (e->id != IPSET_INVALID_ID &&
470 list_set_expired(map, i))
471 list_set_del(map, e->id, i);
472 } 465 }
473 read_unlock_bh(&set->lock); 466 write_unlock_bh(&set->lock);
474 467
475 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; 468 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
476 add_timer(&map->gc); 469 add_timer(&map->gc);