aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYotam Gigi <yotamg@mellanox.com>2017-01-31 08:14:29 -0500
committerDavid S. Miller <davem@davemloft.net>2017-02-01 11:57:33 -0500
commitfd62d9f5c575f0792f150109f1fd24a0d4b3f854 (patch)
treed040d634401cf38fcb447c13aff26d0f492a82a1
parent4993b39ab04b083ff6ee1147e7e7f120feb6bf7f (diff)
net/sched: matchall: Fix configuration race
In the current version, the matchall internal state is split into two structs: cls_matchall_head and cls_matchall_filter. This makes little sense, as matchall instance supports only one filter, and there is no situation where one exists and the other does not. In addition, that led to some races when filter was deleted while packet was processed. Unify that two structs into one, thus simplifying the process of matchall creation and deletion. As a result, the new, delete and get callbacks have a dummy implementation where all the work is done in destroy and change callbacks, as was done in cls_cgroup. Fixes: bf3994d2ed31 ("net/sched: introduce Match-all classifier") Reported-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/cls_matchall.c127
1 files changed, 45 insertions, 82 deletions
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429bd5ef..b12bc2abea93 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
16#include <net/sch_generic.h> 16#include <net/sch_generic.h>
17#include <net/pkt_cls.h> 17#include <net/pkt_cls.h>
18 18
19struct cls_mall_filter { 19struct cls_mall_head {
20 struct tcf_exts exts; 20 struct tcf_exts exts;
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 struct rcu_head rcu;
24 u32 flags; 23 u32 flags;
25};
26
27struct cls_mall_head {
28 struct cls_mall_filter *filter;
29 struct rcu_head rcu; 24 struct rcu_head rcu;
30}; 25};
31 26
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33 struct tcf_result *res) 28 struct tcf_result *res)
34{ 29{
35 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36 struct cls_mall_filter *f = head->filter;
37 31
38 if (tc_skip_sw(f->flags)) 32 if (tc_skip_sw(head->flags))
39 return -1; 33 return -1;
40 34
41 return tcf_exts_exec(skb, &f->exts, res); 35 return tcf_exts_exec(skb, &head->exts, res);
42} 36}
43 37
44static int mall_init(struct tcf_proto *tp) 38static int mall_init(struct tcf_proto *tp)
45{ 39{
46 struct cls_mall_head *head;
47
48 head = kzalloc(sizeof(*head), GFP_KERNEL);
49 if (!head)
50 return -ENOBUFS;
51
52 rcu_assign_pointer(tp->root, head);
53
54 return 0; 40 return 0;
55} 41}
56 42
57static void mall_destroy_filter(struct rcu_head *head) 43static void mall_destroy_rcu(struct rcu_head *rcu)
58{ 44{
59 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 45 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 rcu);
60 47
61 tcf_exts_destroy(&f->exts); 48 tcf_exts_destroy(&head->exts);
62 49 kfree(head);
63 kfree(f);
64} 50}
65 51
66static int mall_replace_hw_filter(struct tcf_proto *tp, 52static int mall_replace_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_filter *f, 53 struct cls_mall_head *head,
68 unsigned long cookie) 54 unsigned long cookie)
69{ 55{
70 struct net_device *dev = tp->q->dev_queue->dev; 56 struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
74 offload.type = TC_SETUP_MATCHALL; 60 offload.type = TC_SETUP_MATCHALL;
75 offload.cls_mall = &mall_offload; 61 offload.cls_mall = &mall_offload;
76 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 62 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77 offload.cls_mall->exts = &f->exts; 63 offload.cls_mall->exts = &head->exts;
78 offload.cls_mall->cookie = cookie; 64 offload.cls_mall->cookie = cookie;
79 65
80 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 66 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
82} 68}
83 69
84static void mall_destroy_hw_filter(struct tcf_proto *tp, 70static void mall_destroy_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_filter *f, 71 struct cls_mall_head *head,
86 unsigned long cookie) 72 unsigned long cookie)
87{ 73{
88 struct net_device *dev = tp->q->dev_queue->dev; 74 struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
103{ 89{
104 struct cls_mall_head *head = rtnl_dereference(tp->root); 90 struct cls_mall_head *head = rtnl_dereference(tp->root);
105 struct net_device *dev = tp->q->dev_queue->dev; 91 struct net_device *dev = tp->q->dev_queue->dev;
106 struct cls_mall_filter *f = head->filter;
107 92
108 if (!force && f) 93 if (!head)
109 return false; 94 return true;
110 95
111 if (f) { 96 if (tc_should_offload(dev, tp, head->flags))
112 if (tc_should_offload(dev, tp, f->flags)) 97 mall_destroy_hw_filter(tp, head, (unsigned long) head);
113 mall_destroy_hw_filter(tp, f, (unsigned long) f);
114 98
115 call_rcu(&f->rcu, mall_destroy_filter); 99 call_rcu(&head->rcu, mall_destroy_rcu);
116 }
117 kfree_rcu(head, rcu);
118 return true; 100 return true;
119} 101}
120 102
121static unsigned long mall_get(struct tcf_proto *tp, u32 handle) 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122{ 104{
123 struct cls_mall_head *head = rtnl_dereference(tp->root); 105 return 0UL;
124 struct cls_mall_filter *f = head->filter;
125
126 if (f && f->handle == handle)
127 return (unsigned long) f;
128 return 0;
129} 106}
130 107
131static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134}; 111};
135 112
136static int mall_set_parms(struct net *net, struct tcf_proto *tp, 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137 struct cls_mall_filter *f, 114 struct cls_mall_head *head,
138 unsigned long base, struct nlattr **tb, 115 unsigned long base, struct nlattr **tb,
139 struct nlattr *est, bool ovr) 116 struct nlattr *est, bool ovr)
140{ 117{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147 return err; 124 return err;
148 125
149 if (tb[TCA_MATCHALL_CLASSID]) { 126 if (tb[TCA_MATCHALL_CLASSID]) {
150 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 127 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151 tcf_bind_filter(tp, &f->res, base); 128 tcf_bind_filter(tp, &head->res, base);
152 } 129 }
153 130
154 tcf_exts_change(tp, &f->exts, &e); 131 tcf_exts_change(tp, &head->exts, &e);
155 132
156 return 0; 133 return 0;
157} 134}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162 unsigned long *arg, bool ovr) 139 unsigned long *arg, bool ovr)
163{ 140{
164 struct cls_mall_head *head = rtnl_dereference(tp->root); 141 struct cls_mall_head *head = rtnl_dereference(tp->root);
165 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166 struct net_device *dev = tp->q->dev_queue->dev; 142 struct net_device *dev = tp->q->dev_queue->dev;
167 struct cls_mall_filter *f;
168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 143 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144 struct cls_mall_head *new;
169 u32 flags = 0; 145 u32 flags = 0;
170 int err; 146 int err;
171 147
172 if (!tca[TCA_OPTIONS]) 148 if (!tca[TCA_OPTIONS])
173 return -EINVAL; 149 return -EINVAL;
174 150
175 if (head->filter) 151 if (head)
176 return -EBUSY; 152 return -EEXIST;
177
178 if (fold)
179 return -EINVAL;
180 153
181 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, 154 err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy); 155 tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189 return -EINVAL; 162 return -EINVAL;
190 } 163 }
191 164
192 f = kzalloc(sizeof(*f), GFP_KERNEL); 165 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!f) 166 if (!new)
194 return -ENOBUFS; 167 return -ENOBUFS;
195 168
196 tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); 169 tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197 170
198 if (!handle) 171 if (!handle)
199 handle = 1; 172 handle = 1;
200 f->handle = handle; 173 new->handle = handle;
201 f->flags = flags; 174 new->flags = flags;
202 175
203 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 176 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204 if (err) 177 if (err)
205 goto errout; 178 goto errout;
206 179
207 if (tc_should_offload(dev, tp, flags)) { 180 if (tc_should_offload(dev, tp, flags)) {
208 err = mall_replace_hw_filter(tp, f, (unsigned long) f); 181 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209 if (err) { 182 if (err) {
210 if (tc_skip_sw(flags)) 183 if (tc_skip_sw(flags))
211 goto errout; 184 goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214 } 187 }
215 } 188 }
216 189
217 *arg = (unsigned long) f; 190 *arg = (unsigned long) head;
218 rcu_assign_pointer(head->filter, f); 191 rcu_assign_pointer(tp->root, new);
219 192 if (head)
193 call_rcu(&head->rcu, mall_destroy_rcu);
220 return 0; 194 return 0;
221 195
222errout: 196errout:
223 kfree(f); 197 kfree(new);
224 return err; 198 return err;
225} 199}
226 200
227static int mall_delete(struct tcf_proto *tp, unsigned long arg) 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228{ 202{
229 struct cls_mall_head *head = rtnl_dereference(tp->root); 203 return -EOPNOTSUPP;
230 struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231 struct net_device *dev = tp->q->dev_queue->dev;
232
233 if (tc_should_offload(dev, tp, f->flags))
234 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236 RCU_INIT_POINTER(head->filter, NULL);
237 tcf_unbind_filter(tp, &f->res);
238 call_rcu(&f->rcu, mall_destroy_filter);
239 return 0;
240} 204}
241 205
242static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243{ 207{
244 struct cls_mall_head *head = rtnl_dereference(tp->root); 208 struct cls_mall_head *head = rtnl_dereference(tp->root);
245 struct cls_mall_filter *f = head->filter;
246 209
247 if (arg->count < arg->skip) 210 if (arg->count < arg->skip)
248 goto skip; 211 goto skip;
249 if (arg->fn(tp, (unsigned long) f, arg) < 0) 212 if (arg->fn(tp, (unsigned long) head, arg) < 0)
250 arg->stop = 1; 213 arg->stop = 1;
251skip: 214skip:
252 arg->count++; 215 arg->count++;
@@ -255,28 +218,28 @@ skip:
255static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256 struct sk_buff *skb, struct tcmsg *t) 219 struct sk_buff *skb, struct tcmsg *t)
257{ 220{
258 struct cls_mall_filter *f = (struct cls_mall_filter *) fh; 221 struct cls_mall_head *head = (struct cls_mall_head *) fh;
259 struct nlattr *nest; 222 struct nlattr *nest;
260 223
261 if (!f) 224 if (!head)
262 return skb->len; 225 return skb->len;
263 226
264 t->tcm_handle = f->handle; 227 t->tcm_handle = head->handle;
265 228
266 nest = nla_nest_start(skb, TCA_OPTIONS); 229 nest = nla_nest_start(skb, TCA_OPTIONS);
267 if (!nest) 230 if (!nest)
268 goto nla_put_failure; 231 goto nla_put_failure;
269 232
270 if (f->res.classid && 233 if (head->res.classid &&
271 nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) 234 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272 goto nla_put_failure; 235 goto nla_put_failure;
273 236
274 if (tcf_exts_dump(skb, &f->exts)) 237 if (tcf_exts_dump(skb, &head->exts))
275 goto nla_put_failure; 238 goto nla_put_failure;
276 239
277 nla_nest_end(skb, nest); 240 nla_nest_end(skb, nest);
278 241
279 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 242 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280 goto nla_put_failure; 243 goto nla_put_failure;
281 244
282 return skb->len; 245 return skb->len;