aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2014-02-06 10:15:39 -0500
committerPablo Neira Ayuso <pablo@netfilter.org>2014-02-07 08:22:06 -0500
commit2fb91ddbf8e1dcb207e1e2085473aeaeff975102 (patch)
treed546b5942af2d99c7e0f6482f8994f737ed6cb37 /net
parentbd7fc645dabab423ef362186db2917f3919321d3 (diff)
netfilter: nft_rbtree: fix data handling of end interval elements
This patch fixes several things which related to the handling of end interval elements: * Chain use underflow with intervals and map: If you add a rule using intervals+map that introduces a loop, the error path of the rbtree set decrements the chain refcount for each side of the interval, leading to a chain use counter underflow. * Don't copy the data part of the end interval element since, this area is uninitialized and this confuses the loop detection code. * Don't allocate room for the data part of end interval elements since this is unused. So, after this patch the idea is that end interval elements don't have a data part. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nft_rbtree.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b231bfe..e21d69d13506 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
69 struct nft_rbtree_elem *rbe) 69 struct nft_rbtree_elem *rbe)
70{ 70{
71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE); 71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
72 if (set->flags & NFT_SET_MAP) 72 if (set->flags & NFT_SET_MAP &&
73 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
73 nft_data_uninit(rbe->data, set->dtype); 74 nft_data_uninit(rbe->data, set->dtype);
75
74 kfree(rbe); 76 kfree(rbe);
75} 77}
76 78
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
108 int err; 110 int err;
109 111
110 size = sizeof(*rbe); 112 size = sizeof(*rbe);
111 if (set->flags & NFT_SET_MAP) 113 if (set->flags & NFT_SET_MAP &&
114 !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
112 size += sizeof(rbe->data[0]); 115 size += sizeof(rbe->data[0]);
113 116
114 rbe = kzalloc(size, GFP_KERNEL); 117 rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
117 120
118 rbe->flags = elem->flags; 121 rbe->flags = elem->flags;
119 nft_data_copy(&rbe->key, &elem->key); 122 nft_data_copy(&rbe->key, &elem->key);
120 if (set->flags & NFT_SET_MAP) 123 if (set->flags & NFT_SET_MAP &&
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
121 nft_data_copy(rbe->data, &elem->data); 125 nft_data_copy(rbe->data, &elem->data);
122 126
123 err = __nft_rbtree_insert(set, rbe); 127 err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
153 parent = parent->rb_right; 157 parent = parent->rb_right;
154 else { 158 else {
155 elem->cookie = rbe; 159 elem->cookie = rbe;
156 if (set->flags & NFT_SET_MAP) 160 if (set->flags & NFT_SET_MAP &&
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
157 nft_data_copy(&elem->data, rbe->data); 162 nft_data_copy(&elem->data, rbe->data);
158 elem->flags = rbe->flags; 163 elem->flags = rbe->flags;
159 return 0; 164 return 0;
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
177 182
178 rbe = rb_entry(node, struct nft_rbtree_elem, node); 183 rbe = rb_entry(node, struct nft_rbtree_elem, node);
179 nft_data_copy(&elem.key, &rbe->key); 184 nft_data_copy(&elem.key, &rbe->key);
180 if (set->flags & NFT_SET_MAP) 185 if (set->flags & NFT_SET_MAP &&
186 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
181 nft_data_copy(&elem.data, rbe->data); 187 nft_data_copy(&elem.data, rbe->data);
182 elem.flags = rbe->flags; 188 elem.flags = rbe->flags;
183 189