aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2014-05-28 09:27:18 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2014-06-02 04:54:31 -0400
commit7632667d26a99d3b33ec8dd522c4086653ff9388 (patch)
tree66149d2c11f33559e123ee0fd2efb9d233d21861 /net
parenta1cee076f4d4774504c62e0f1846a11a6fcb6be3 (diff)
netfilter: nft_rbtree: introduce locking
There's no rbtree rcu version yet, so let's fall back on the spinlock to protect the concurrent access of this structure both from user (to update the set content) and kernel-space (in the packet path). Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nft_rbtree.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 072e611e9f71..e1836ff88199 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -18,6 +18,8 @@
18#include <linux/netfilter/nf_tables.h> 18#include <linux/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables.h> 19#include <net/netfilter/nf_tables.h>
20 20
21static DEFINE_SPINLOCK(nft_rbtree_lock);
22
21struct nft_rbtree { 23struct nft_rbtree {
22 struct rb_root root; 24 struct rb_root root;
23}; 25};
@@ -38,6 +40,7 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
38 const struct rb_node *parent = priv->root.rb_node; 40 const struct rb_node *parent = priv->root.rb_node;
39 int d; 41 int d;
40 42
43 spin_lock_bh(&nft_rbtree_lock);
41 while (parent != NULL) { 44 while (parent != NULL) {
42 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 45 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
43 46
@@ -53,6 +56,8 @@ found:
53 goto out; 56 goto out;
54 if (set->flags & NFT_SET_MAP) 57 if (set->flags & NFT_SET_MAP)
55 nft_data_copy(data, rbe->data); 58 nft_data_copy(data, rbe->data);
59
60 spin_unlock_bh(&nft_rbtree_lock);
56 return true; 61 return true;
57 } 62 }
58 } 63 }
@@ -62,6 +67,7 @@ found:
62 goto found; 67 goto found;
63 } 68 }
64out: 69out:
70 spin_unlock_bh(&nft_rbtree_lock);
65 return false; 71 return false;
66} 72}
67 73
@@ -124,9 +130,12 @@ static int nft_rbtree_insert(const struct nft_set *set,
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 130 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
125 nft_data_copy(rbe->data, &elem->data); 131 nft_data_copy(rbe->data, &elem->data);
126 132
133 spin_lock_bh(&nft_rbtree_lock);
127 err = __nft_rbtree_insert(set, rbe); 134 err = __nft_rbtree_insert(set, rbe);
128 if (err < 0) 135 if (err < 0)
129 kfree(rbe); 136 kfree(rbe);
137
138 spin_unlock_bh(&nft_rbtree_lock);
130 return err; 139 return err;
131} 140}
132 141
@@ -136,7 +145,9 @@ static void nft_rbtree_remove(const struct nft_set *set,
136 struct nft_rbtree *priv = nft_set_priv(set); 145 struct nft_rbtree *priv = nft_set_priv(set);
137 struct nft_rbtree_elem *rbe = elem->cookie; 146 struct nft_rbtree_elem *rbe = elem->cookie;
138 147
148 spin_lock_bh(&nft_rbtree_lock);
139 rb_erase(&rbe->node, &priv->root); 149 rb_erase(&rbe->node, &priv->root);
150 spin_unlock_bh(&nft_rbtree_lock);
140 kfree(rbe); 151 kfree(rbe);
141} 152}
142 153
@@ -147,6 +158,7 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
147 struct nft_rbtree_elem *rbe; 158 struct nft_rbtree_elem *rbe;
148 int d; 159 int d;
149 160
161 spin_lock_bh(&nft_rbtree_lock);
150 while (parent != NULL) { 162 while (parent != NULL) {
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 163 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152 164
@@ -161,9 +173,11 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 173 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
162 nft_data_copy(&elem->data, rbe->data); 174 nft_data_copy(&elem->data, rbe->data);
163 elem->flags = rbe->flags; 175 elem->flags = rbe->flags;
176 spin_unlock_bh(&nft_rbtree_lock);
164 return 0; 177 return 0;
165 } 178 }
166 } 179 }
180 spin_unlock_bh(&nft_rbtree_lock);
167 return -ENOENT; 181 return -ENOENT;
168} 182}
169 183
@@ -176,6 +190,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
176 struct nft_set_elem elem; 190 struct nft_set_elem elem;
177 struct rb_node *node; 191 struct rb_node *node;
178 192
193 spin_lock_bh(&nft_rbtree_lock);
179 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 194 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
180 if (iter->count < iter->skip) 195 if (iter->count < iter->skip)
181 goto cont; 196 goto cont;
@@ -188,11 +203,14 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
188 elem.flags = rbe->flags; 203 elem.flags = rbe->flags;
189 204
190 iter->err = iter->fn(ctx, set, iter, &elem); 205 iter->err = iter->fn(ctx, set, iter, &elem);
191 if (iter->err < 0) 206 if (iter->err < 0) {
207 spin_unlock_bh(&nft_rbtree_lock);
192 return; 208 return;
209 }
193cont: 210cont:
194 iter->count++; 211 iter->count++;
195 } 212 }
213 spin_unlock_bh(&nft_rbtree_lock);
196} 214}
197 215
198static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) 216static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
@@ -216,11 +234,13 @@ static void nft_rbtree_destroy(const struct nft_set *set)
216 struct nft_rbtree_elem *rbe; 234 struct nft_rbtree_elem *rbe;
217 struct rb_node *node; 235 struct rb_node *node;
218 236
237 spin_lock_bh(&nft_rbtree_lock);
219 while ((node = priv->root.rb_node) != NULL) { 238 while ((node = priv->root.rb_node) != NULL) {
220 rb_erase(node, &priv->root); 239 rb_erase(node, &priv->root);
221 rbe = rb_entry(node, struct nft_rbtree_elem, node); 240 rbe = rb_entry(node, struct nft_rbtree_elem, node);
222 nft_rbtree_elem_destroy(set, rbe); 241 nft_rbtree_elem_destroy(set, rbe);
223 } 242 }
243 spin_unlock_bh(&nft_rbtree_lock);
224} 244}
225 245
226static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, 246static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,