diff options
author | Mario Kicherer <dev@kicherer.org> | 2017-02-21 06:19:47 -0500 |
---|---|---|
committer | Marc Kleine-Budde <mkl@pengutronix.de> | 2017-04-04 11:35:58 -0400 |
commit | 8e8cda6d737d356054c9eeef642aec0e8ae7e6bc (patch) | |
tree | bdc78138b5beca98f398c86ad65c526bbea2ebf8 /net/can/raw.c | |
parent | dabf54dd1c6369160f8d4c793a8613dfb4e7848a (diff) |
can: initial support for network namespaces
This patch adds initial support for network namespaces. The changes only
enable support in the CAN raw, proc and af_can code. GW and BCM still
have their checks that ensure that they are used only from the main
namespace.
The patch boils down to moving the global structures, i.e. the global
filter list and their /proc stats, into a per-namespace structure and passing
around the corresponding "struct net" in a lot of different places.
Changes since v1:
- rebased on current HEAD (2bfe01e)
- fixed overlong line
Signed-off-by: Mario Kicherer <dev@kicherer.org>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'net/can/raw.c')
-rw-r--r-- | net/can/raw.c | 92 |
1 files changed, 52 insertions, 40 deletions
diff --git a/net/can/raw.c b/net/can/raw.c index 6dc546a06673..864c80dbdb72 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -181,20 +181,21 @@ static void raw_rcv(struct sk_buff *oskb, void *data) | |||
181 | kfree_skb(skb); | 181 | kfree_skb(skb); |
182 | } | 182 | } |
183 | 183 | ||
184 | static int raw_enable_filters(struct net_device *dev, struct sock *sk, | 184 | static int raw_enable_filters(struct net *net, struct net_device *dev, |
185 | struct can_filter *filter, int count) | 185 | struct sock *sk, struct can_filter *filter, |
186 | int count) | ||
186 | { | 187 | { |
187 | int err = 0; | 188 | int err = 0; |
188 | int i; | 189 | int i; |
189 | 190 | ||
190 | for (i = 0; i < count; i++) { | 191 | for (i = 0; i < count; i++) { |
191 | err = can_rx_register(dev, filter[i].can_id, | 192 | err = can_rx_register(net, dev, filter[i].can_id, |
192 | filter[i].can_mask, | 193 | filter[i].can_mask, |
193 | raw_rcv, sk, "raw", sk); | 194 | raw_rcv, sk, "raw", sk); |
194 | if (err) { | 195 | if (err) { |
195 | /* clean up successfully registered filters */ | 196 | /* clean up successfully registered filters */ |
196 | while (--i >= 0) | 197 | while (--i >= 0) |
197 | can_rx_unregister(dev, filter[i].can_id, | 198 | can_rx_unregister(net, dev, filter[i].can_id, |
198 | filter[i].can_mask, | 199 | filter[i].can_mask, |
199 | raw_rcv, sk); | 200 | raw_rcv, sk); |
200 | break; | 201 | break; |
@@ -204,57 +205,62 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, | |||
204 | return err; | 205 | return err; |
205 | } | 206 | } |
206 | 207 | ||
207 | static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, | 208 | static int raw_enable_errfilter(struct net *net, struct net_device *dev, |
208 | can_err_mask_t err_mask) | 209 | struct sock *sk, can_err_mask_t err_mask) |
209 | { | 210 | { |
210 | int err = 0; | 211 | int err = 0; |
211 | 212 | ||
212 | if (err_mask) | 213 | if (err_mask) |
213 | err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, | 214 | err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG, |
214 | raw_rcv, sk, "raw", sk); | 215 | raw_rcv, sk, "raw", sk); |
215 | 216 | ||
216 | return err; | 217 | return err; |
217 | } | 218 | } |
218 | 219 | ||
219 | static void raw_disable_filters(struct net_device *dev, struct sock *sk, | 220 | static void raw_disable_filters(struct net *net, struct net_device *dev, |
220 | struct can_filter *filter, int count) | 221 | struct sock *sk, struct can_filter *filter, |
222 | int count) | ||
221 | { | 223 | { |
222 | int i; | 224 | int i; |
223 | 225 | ||
224 | for (i = 0; i < count; i++) | 226 | for (i = 0; i < count; i++) |
225 | can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask, | 227 | can_rx_unregister(net, dev, filter[i].can_id, |
226 | raw_rcv, sk); | 228 | filter[i].can_mask, raw_rcv, sk); |
227 | } | 229 | } |
228 | 230 | ||
229 | static inline void raw_disable_errfilter(struct net_device *dev, | 231 | static inline void raw_disable_errfilter(struct net *net, |
232 | struct net_device *dev, | ||
230 | struct sock *sk, | 233 | struct sock *sk, |
231 | can_err_mask_t err_mask) | 234 | can_err_mask_t err_mask) |
232 | 235 | ||
233 | { | 236 | { |
234 | if (err_mask) | 237 | if (err_mask) |
235 | can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG, | 238 | can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG, |
236 | raw_rcv, sk); | 239 | raw_rcv, sk); |
237 | } | 240 | } |
238 | 241 | ||
239 | static inline void raw_disable_allfilters(struct net_device *dev, | 242 | static inline void raw_disable_allfilters(struct net *net, |
243 | struct net_device *dev, | ||
240 | struct sock *sk) | 244 | struct sock *sk) |
241 | { | 245 | { |
242 | struct raw_sock *ro = raw_sk(sk); | 246 | struct raw_sock *ro = raw_sk(sk); |
243 | 247 | ||
244 | raw_disable_filters(dev, sk, ro->filter, ro->count); | 248 | raw_disable_filters(net, dev, sk, ro->filter, ro->count); |
245 | raw_disable_errfilter(dev, sk, ro->err_mask); | 249 | raw_disable_errfilter(net, dev, sk, ro->err_mask); |
246 | } | 250 | } |
247 | 251 | ||
248 | static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) | 252 | static int raw_enable_allfilters(struct net *net, struct net_device *dev, |
253 | struct sock *sk) | ||
249 | { | 254 | { |
250 | struct raw_sock *ro = raw_sk(sk); | 255 | struct raw_sock *ro = raw_sk(sk); |
251 | int err; | 256 | int err; |
252 | 257 | ||
253 | err = raw_enable_filters(dev, sk, ro->filter, ro->count); | 258 | err = raw_enable_filters(net, dev, sk, ro->filter, ro->count); |
254 | if (!err) { | 259 | if (!err) { |
255 | err = raw_enable_errfilter(dev, sk, ro->err_mask); | 260 | err = raw_enable_errfilter(net, dev, sk, ro->err_mask); |
256 | if (err) | 261 | if (err) |
257 | raw_disable_filters(dev, sk, ro->filter, ro->count); | 262 | raw_disable_filters(net, dev, sk, ro->filter, |
263 | ro->count); | ||
258 | } | 264 | } |
259 | 265 | ||
260 | return err; | 266 | return err; |
@@ -267,7 +273,7 @@ static int raw_notifier(struct notifier_block *nb, | |||
267 | struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); | 273 | struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); |
268 | struct sock *sk = &ro->sk; | 274 | struct sock *sk = &ro->sk; |
269 | 275 | ||
270 | if (!net_eq(dev_net(dev), &init_net)) | 276 | if (!net_eq(dev_net(dev), sock_net(sk))) |
271 | return NOTIFY_DONE; | 277 | return NOTIFY_DONE; |
272 | 278 | ||
273 | if (dev->type != ARPHRD_CAN) | 279 | if (dev->type != ARPHRD_CAN) |
@@ -282,7 +288,7 @@ static int raw_notifier(struct notifier_block *nb, | |||
282 | lock_sock(sk); | 288 | lock_sock(sk); |
283 | /* remove current filters & unregister */ | 289 | /* remove current filters & unregister */ |
284 | if (ro->bound) | 290 | if (ro->bound) |
285 | raw_disable_allfilters(dev, sk); | 291 | raw_disable_allfilters(dev_net(dev), dev, sk); |
286 | 292 | ||
287 | if (ro->count > 1) | 293 | if (ro->count > 1) |
288 | kfree(ro->filter); | 294 | kfree(ro->filter); |
@@ -358,13 +364,13 @@ static int raw_release(struct socket *sock) | |||
358 | if (ro->ifindex) { | 364 | if (ro->ifindex) { |
359 | struct net_device *dev; | 365 | struct net_device *dev; |
360 | 366 | ||
361 | dev = dev_get_by_index(&init_net, ro->ifindex); | 367 | dev = dev_get_by_index(sock_net(sk), ro->ifindex); |
362 | if (dev) { | 368 | if (dev) { |
363 | raw_disable_allfilters(dev, sk); | 369 | raw_disable_allfilters(dev_net(dev), dev, sk); |
364 | dev_put(dev); | 370 | dev_put(dev); |
365 | } | 371 | } |
366 | } else | 372 | } else |
367 | raw_disable_allfilters(NULL, sk); | 373 | raw_disable_allfilters(sock_net(sk), NULL, sk); |
368 | } | 374 | } |
369 | 375 | ||
370 | if (ro->count > 1) | 376 | if (ro->count > 1) |
@@ -404,7 +410,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) | |||
404 | if (addr->can_ifindex) { | 410 | if (addr->can_ifindex) { |
405 | struct net_device *dev; | 411 | struct net_device *dev; |
406 | 412 | ||
407 | dev = dev_get_by_index(&init_net, addr->can_ifindex); | 413 | dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); |
408 | if (!dev) { | 414 | if (!dev) { |
409 | err = -ENODEV; | 415 | err = -ENODEV; |
410 | goto out; | 416 | goto out; |
@@ -420,13 +426,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) | |||
420 | ifindex = dev->ifindex; | 426 | ifindex = dev->ifindex; |
421 | 427 | ||
422 | /* filters set by default/setsockopt */ | 428 | /* filters set by default/setsockopt */ |
423 | err = raw_enable_allfilters(dev, sk); | 429 | err = raw_enable_allfilters(sock_net(sk), dev, sk); |
424 | dev_put(dev); | 430 | dev_put(dev); |
425 | } else { | 431 | } else { |
426 | ifindex = 0; | 432 | ifindex = 0; |
427 | 433 | ||
428 | /* filters set by default/setsockopt */ | 434 | /* filters set by default/setsockopt */ |
429 | err = raw_enable_allfilters(NULL, sk); | 435 | err = raw_enable_allfilters(sock_net(sk), NULL, sk); |
430 | } | 436 | } |
431 | 437 | ||
432 | if (!err) { | 438 | if (!err) { |
@@ -435,13 +441,15 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) | |||
435 | if (ro->ifindex) { | 441 | if (ro->ifindex) { |
436 | struct net_device *dev; | 442 | struct net_device *dev; |
437 | 443 | ||
438 | dev = dev_get_by_index(&init_net, ro->ifindex); | 444 | dev = dev_get_by_index(sock_net(sk), |
445 | ro->ifindex); | ||
439 | if (dev) { | 446 | if (dev) { |
440 | raw_disable_allfilters(dev, sk); | 447 | raw_disable_allfilters(dev_net(dev), |
448 | dev, sk); | ||
441 | dev_put(dev); | 449 | dev_put(dev); |
442 | } | 450 | } |
443 | } else | 451 | } else |
444 | raw_disable_allfilters(NULL, sk); | 452 | raw_disable_allfilters(sock_net(sk), NULL, sk); |
445 | } | 453 | } |
446 | ro->ifindex = ifindex; | 454 | ro->ifindex = ifindex; |
447 | ro->bound = 1; | 455 | ro->bound = 1; |
@@ -517,15 +525,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
517 | lock_sock(sk); | 525 | lock_sock(sk); |
518 | 526 | ||
519 | if (ro->bound && ro->ifindex) | 527 | if (ro->bound && ro->ifindex) |
520 | dev = dev_get_by_index(&init_net, ro->ifindex); | 528 | dev = dev_get_by_index(sock_net(sk), ro->ifindex); |
521 | 529 | ||
522 | if (ro->bound) { | 530 | if (ro->bound) { |
523 | /* (try to) register the new filters */ | 531 | /* (try to) register the new filters */ |
524 | if (count == 1) | 532 | if (count == 1) |
525 | err = raw_enable_filters(dev, sk, &sfilter, 1); | 533 | err = raw_enable_filters(sock_net(sk), dev, sk, |
534 | &sfilter, 1); | ||
526 | else | 535 | else |
527 | err = raw_enable_filters(dev, sk, filter, | 536 | err = raw_enable_filters(sock_net(sk), dev, sk, |
528 | count); | 537 | filter, count); |
529 | if (err) { | 538 | if (err) { |
530 | if (count > 1) | 539 | if (count > 1) |
531 | kfree(filter); | 540 | kfree(filter); |
@@ -533,7 +542,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
533 | } | 542 | } |
534 | 543 | ||
535 | /* remove old filter registrations */ | 544 | /* remove old filter registrations */ |
536 | raw_disable_filters(dev, sk, ro->filter, ro->count); | 545 | raw_disable_filters(sock_net(sk), dev, sk, ro->filter, |
546 | ro->count); | ||
537 | } | 547 | } |
538 | 548 | ||
539 | /* remove old filter space */ | 549 | /* remove old filter space */ |
@@ -569,18 +579,20 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
569 | lock_sock(sk); | 579 | lock_sock(sk); |
570 | 580 | ||
571 | if (ro->bound && ro->ifindex) | 581 | if (ro->bound && ro->ifindex) |
572 | dev = dev_get_by_index(&init_net, ro->ifindex); | 582 | dev = dev_get_by_index(sock_net(sk), ro->ifindex); |
573 | 583 | ||
574 | /* remove current error mask */ | 584 | /* remove current error mask */ |
575 | if (ro->bound) { | 585 | if (ro->bound) { |
576 | /* (try to) register the new err_mask */ | 586 | /* (try to) register the new err_mask */ |
577 | err = raw_enable_errfilter(dev, sk, err_mask); | 587 | err = raw_enable_errfilter(sock_net(sk), dev, sk, |
588 | err_mask); | ||
578 | 589 | ||
579 | if (err) | 590 | if (err) |
580 | goto out_err; | 591 | goto out_err; |
581 | 592 | ||
582 | /* remove old err_mask registration */ | 593 | /* remove old err_mask registration */ |
583 | raw_disable_errfilter(dev, sk, ro->err_mask); | 594 | raw_disable_errfilter(sock_net(sk), dev, sk, |
595 | ro->err_mask); | ||
584 | } | 596 | } |
585 | 597 | ||
586 | /* link new err_mask to the socket */ | 598 | /* link new err_mask to the socket */ |
@@ -741,7 +753,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
741 | return -EINVAL; | 753 | return -EINVAL; |
742 | } | 754 | } |
743 | 755 | ||
744 | dev = dev_get_by_index(&init_net, ifindex); | 756 | dev = dev_get_by_index(sock_net(sk), ifindex); |
745 | if (!dev) | 757 | if (!dev) |
746 | return -ENXIO; | 758 | return -ENXIO; |
747 | 759 | ||