diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-07-16 08:03:40 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2009-07-16 08:03:40 -0400 |
commit | 941297f443f871b8c3372feccf27a8733f6ce9e9 (patch) | |
tree | c98b7e30bf14255cfd87a3d8d929f26f35764d21 | |
parent | aa6a03eb0ae859c1371555ef381de4c96ca1e4e6 (diff) |
netfilter: nf_conntrack: nf_conntrack_alloc() fixes
When a slab cache uses SLAB_DESTROY_BY_RCU, we must be careful when allocating
objects, since slab allocator could give a freed object still used by lockless
readers.
In particular, nf_conntrack RCU lookups rely on ct->tuplehash[xxx].hnnode.next
being always valid (ie containing a valid 'nulls' value, or a valid pointer to next
object in hash chain.)
kmem_cache_zalloc() setups object with NULL values, but a NULL value is not valid
for ct->tuplehash[xxx].hnnode.next.
Fix is to call kmem_cache_alloc() and do the zeroing ourself.
As spotted by Patrick, we also need to make sure lookup keys are committed to
memory before setting refcount to 1, or a lockless reader could get a reference
on the old version of the object. Its key re-check could then pass the barrier.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r-- | Documentation/RCU/rculist_nulls.txt | 7 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 21 |
2 files changed, 24 insertions, 4 deletions
diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt index 93cb28d05dcd..18f9651ff23d 100644 --- a/Documentation/RCU/rculist_nulls.txt +++ b/Documentation/RCU/rculist_nulls.txt | |||
@@ -83,11 +83,12 @@ not detect it missed following items in original chain. | |||
83 | obj = kmem_cache_alloc(...); | 83 | obj = kmem_cache_alloc(...); |
84 | lock_chain(); // typically a spin_lock() | 84 | lock_chain(); // typically a spin_lock() |
85 | obj->key = key; | 85 | obj->key = key; |
86 | atomic_inc(&obj->refcnt); | ||
87 | /* | 86 | /* |
88 | * we need to make sure obj->key is updated before obj->next | 87 | * we need to make sure obj->key is updated before obj->next |
88 | * or obj->refcnt | ||
89 | */ | 89 | */ |
90 | smp_wmb(); | 90 | smp_wmb(); |
91 | atomic_set(&obj->refcnt, 1); | ||
91 | hlist_add_head_rcu(&obj->obj_node, list); | 92 | hlist_add_head_rcu(&obj->obj_node, list); |
92 | unlock_chain(); // typically a spin_unlock() | 93 | unlock_chain(); // typically a spin_unlock() |
93 | 94 | ||
@@ -159,6 +160,10 @@ out: | |||
159 | obj = kmem_cache_alloc(cachep); | 160 | obj = kmem_cache_alloc(cachep); |
160 | lock_chain(); // typically a spin_lock() | 161 | lock_chain(); // typically a spin_lock() |
161 | obj->key = key; | 162 | obj->key = key; |
163 | /* | ||
164 | * changes to obj->key must be visible before refcnt one | ||
165 | */ | ||
166 | smp_wmb(); | ||
162 | atomic_set(&obj->refcnt, 1); | 167 | atomic_set(&obj->refcnt, 1); |
163 | /* | 168 | /* |
164 | * insert obj in RCU way (readers might be traversing chain) | 169 | * insert obj in RCU way (readers might be traversing chain) |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7508f11c5b39..b5869b9574b0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -561,23 +561,38 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, | |||
561 | } | 561 | } |
562 | } | 562 | } |
563 | 563 | ||
564 | ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); | 564 | /* |
565 | * Do not use kmem_cache_zalloc(), as this cache uses | ||
566 | * SLAB_DESTROY_BY_RCU. | ||
567 | */ | ||
568 | ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); | ||
565 | if (ct == NULL) { | 569 | if (ct == NULL) { |
566 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); | 570 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); |
567 | atomic_dec(&net->ct.count); | 571 | atomic_dec(&net->ct.count); |
568 | return ERR_PTR(-ENOMEM); | 572 | return ERR_PTR(-ENOMEM); |
569 | } | 573 | } |
570 | 574 | /* | |
575 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next | ||
576 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. | ||
577 | */ | ||
578 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, | ||
579 | sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); | ||
571 | spin_lock_init(&ct->lock); | 580 | spin_lock_init(&ct->lock); |
572 | atomic_set(&ct->ct_general.use, 1); | ||
573 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | 581 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
582 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | ||
574 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | 583 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; |
584 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; | ||
575 | /* Don't set timer yet: wait for confirmation */ | 585 | /* Don't set timer yet: wait for confirmation */ |
576 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | 586 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); |
577 | #ifdef CONFIG_NET_NS | 587 | #ifdef CONFIG_NET_NS |
578 | ct->ct_net = net; | 588 | ct->ct_net = net; |
579 | #endif | 589 | #endif |
580 | 590 | ||
591 | /* | ||
592 | * changes to lookup keys must be done before setting refcnt to 1 | ||
593 | */ | ||
594 | smp_wmb(); | ||
595 | atomic_set(&ct->ct_general.use, 1); | ||
581 | return ct; | 596 | return ct; |
582 | } | 597 | } |
583 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 598 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |