diff options
Diffstat (limited to 'net/can/af_can.c')
-rw-r--r-- | net/can/af_can.c | 124 |
1 files changed, 40 insertions, 84 deletions
diff --git a/net/can/af_can.c b/net/can/af_can.c index 51adc4c2b860..702be5a2c956 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1; | |||
77 | module_param(stats_timer, int, S_IRUGO); | 77 | module_param(stats_timer, int, S_IRUGO); |
78 | MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); | 78 | MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); |
79 | 79 | ||
80 | HLIST_HEAD(can_rx_dev_list); | 80 | /* receive filters subscribed for 'all' CAN devices */ |
81 | static struct dev_rcv_lists can_rx_alldev_list; | 81 | struct dev_rcv_lists can_rx_alldev_list; |
82 | static DEFINE_SPINLOCK(can_rcvlists_lock); | 82 | static DEFINE_SPINLOCK(can_rcvlists_lock); |
83 | 83 | ||
84 | static struct kmem_cache *rcv_cache __read_mostly; | 84 | static struct kmem_cache *rcv_cache __read_mostly; |
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send); | |||
292 | 292 | ||
293 | static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) | 293 | static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) |
294 | { | 294 | { |
295 | struct dev_rcv_lists *d = NULL; | 295 | if (!dev) |
296 | struct hlist_node *n; | 296 | return &can_rx_alldev_list; |
297 | 297 | else | |
298 | /* | 298 | return (struct dev_rcv_lists *)dev->ml_priv; |
299 | * find receive list for this device | ||
300 | * | ||
301 | * The hlist_for_each_entry*() macros curse through the list | ||
302 | * using the pointer variable n and set d to the containing | ||
303 | * struct in each list iteration. Therefore, after list | ||
304 | * iteration, d is unmodified when the list is empty, and it | ||
305 | * points to last list element, when the list is non-empty | ||
306 | * but no match in the loop body is found. I.e. d is *not* | ||
307 | * NULL when no match is found. We can, however, use the | ||
308 | * cursor variable n to decide if a match was found. | ||
309 | */ | ||
310 | |||
311 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { | ||
312 | if (d->dev == dev) | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | return n ? d : NULL; | ||
317 | } | 299 | } |
318 | 300 | ||
319 | /** | 301 | /** |
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, | |||
433 | 415 | ||
434 | /* insert new receiver (dev,canid,mask) -> (func,data) */ | 416 | /* insert new receiver (dev,canid,mask) -> (func,data) */ |
435 | 417 | ||
418 | if (dev && dev->type != ARPHRD_CAN) | ||
419 | return -ENODEV; | ||
420 | |||
436 | r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); | 421 | r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); |
437 | if (!r) | 422 | if (!r) |
438 | return -ENOMEM; | 423 | return -ENOMEM; |
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, | |||
468 | EXPORT_SYMBOL(can_rx_register); | 453 | EXPORT_SYMBOL(can_rx_register); |
469 | 454 | ||
470 | /* | 455 | /* |
471 | * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal | ||
472 | */ | ||
473 | static void can_rx_delete_device(struct rcu_head *rp) | ||
474 | { | ||
475 | struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu); | ||
476 | |||
477 | kfree(d); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * can_rx_delete_receiver - rcu callback for single receiver entry removal | 456 | * can_rx_delete_receiver - rcu callback for single receiver entry removal |
482 | */ | 457 | */ |
483 | static void can_rx_delete_receiver(struct rcu_head *rp) | 458 | static void can_rx_delete_receiver(struct rcu_head *rp) |
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
506 | struct hlist_node *next; | 481 | struct hlist_node *next; |
507 | struct dev_rcv_lists *d; | 482 | struct dev_rcv_lists *d; |
508 | 483 | ||
484 | if (dev && dev->type != ARPHRD_CAN) | ||
485 | return; | ||
486 | |||
509 | spin_lock(&can_rcvlists_lock); | 487 | spin_lock(&can_rcvlists_lock); |
510 | 488 | ||
511 | d = find_dev_rcv_lists(dev); | 489 | d = find_dev_rcv_lists(dev); |
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
541 | "dev %s, id %03X, mask %03X\n", | 519 | "dev %s, id %03X, mask %03X\n", |
542 | DNAME(dev), can_id, mask); | 520 | DNAME(dev), can_id, mask); |
543 | r = NULL; | 521 | r = NULL; |
544 | d = NULL; | ||
545 | goto out; | 522 | goto out; |
546 | } | 523 | } |
547 | 524 | ||
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
552 | can_pstats.rcv_entries--; | 529 | can_pstats.rcv_entries--; |
553 | 530 | ||
554 | /* remove device structure requested by NETDEV_UNREGISTER */ | 531 | /* remove device structure requested by NETDEV_UNREGISTER */ |
555 | if (d->remove_on_zero_entries && !d->entries) | 532 | if (d->remove_on_zero_entries && !d->entries) { |
556 | hlist_del_rcu(&d->list); | 533 | kfree(d); |
557 | else | 534 | dev->ml_priv = NULL; |
558 | d = NULL; | 535 | } |
559 | 536 | ||
560 | out: | 537 | out: |
561 | spin_unlock(&can_rcvlists_lock); | 538 | spin_unlock(&can_rcvlists_lock); |
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
563 | /* schedule the receiver item for deletion */ | 540 | /* schedule the receiver item for deletion */ |
564 | if (r) | 541 | if (r) |
565 | call_rcu(&r->rcu, can_rx_delete_receiver); | 542 | call_rcu(&r->rcu, can_rx_delete_receiver); |
566 | |||
567 | /* schedule the device structure for deletion */ | ||
568 | if (d) | ||
569 | call_rcu(&d->rcu, can_rx_delete_device); | ||
570 | } | 543 | } |
571 | EXPORT_SYMBOL(can_rx_unregister); | 544 | EXPORT_SYMBOL(can_rx_unregister); |
572 | 545 | ||
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
780 | 753 | ||
781 | case NETDEV_REGISTER: | 754 | case NETDEV_REGISTER: |
782 | 755 | ||
783 | /* | 756 | /* create new dev_rcv_lists for this device */ |
784 | * create new dev_rcv_lists for this device | ||
785 | * | ||
786 | * N.B. zeroing the struct is the correct initialization | ||
787 | * for the embedded hlist_head structs. | ||
788 | * Another list type, e.g. list_head, would require | ||
789 | * explicit initialization. | ||
790 | */ | ||
791 | |||
792 | d = kzalloc(sizeof(*d), GFP_KERNEL); | 757 | d = kzalloc(sizeof(*d), GFP_KERNEL); |
793 | if (!d) { | 758 | if (!d) { |
794 | printk(KERN_ERR | 759 | printk(KERN_ERR |
795 | "can: allocation of receive list failed\n"); | 760 | "can: allocation of receive list failed\n"); |
796 | return NOTIFY_DONE; | 761 | return NOTIFY_DONE; |
797 | } | 762 | } |
798 | d->dev = dev; | 763 | BUG_ON(dev->ml_priv); |
799 | 764 | dev->ml_priv = d; | |
800 | spin_lock(&can_rcvlists_lock); | ||
801 | hlist_add_head_rcu(&d->list, &can_rx_dev_list); | ||
802 | spin_unlock(&can_rcvlists_lock); | ||
803 | 765 | ||
804 | break; | 766 | break; |
805 | 767 | ||
806 | case NETDEV_UNREGISTER: | 768 | case NETDEV_UNREGISTER: |
807 | spin_lock(&can_rcvlists_lock); | 769 | spin_lock(&can_rcvlists_lock); |
808 | 770 | ||
809 | d = find_dev_rcv_lists(dev); | 771 | d = dev->ml_priv; |
810 | if (d) { | 772 | if (d) { |
811 | if (d->entries) { | 773 | if (d->entries) |
812 | d->remove_on_zero_entries = 1; | 774 | d->remove_on_zero_entries = 1; |
813 | d = NULL; | 775 | else { |
814 | } else | 776 | kfree(d); |
815 | hlist_del_rcu(&d->list); | 777 | dev->ml_priv = NULL; |
778 | } | ||
816 | } else | 779 | } else |
817 | printk(KERN_ERR "can: notifier: receive list not " | 780 | printk(KERN_ERR "can: notifier: receive list not " |
818 | "found for dev %s\n", dev->name); | 781 | "found for dev %s\n", dev->name); |
819 | 782 | ||
820 | spin_unlock(&can_rcvlists_lock); | 783 | spin_unlock(&can_rcvlists_lock); |
821 | 784 | ||
822 | if (d) | ||
823 | call_rcu(&d->rcu, can_rx_delete_device); | ||
824 | |||
825 | break; | 785 | break; |
826 | } | 786 | } |
827 | 787 | ||
@@ -853,21 +813,13 @@ static __init int can_init(void) | |||
853 | { | 813 | { |
854 | printk(banner); | 814 | printk(banner); |
855 | 815 | ||
816 | memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); | ||
817 | |||
856 | rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), | 818 | rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), |
857 | 0, 0, NULL); | 819 | 0, 0, NULL); |
858 | if (!rcv_cache) | 820 | if (!rcv_cache) |
859 | return -ENOMEM; | 821 | return -ENOMEM; |
860 | 822 | ||
861 | /* | ||
862 | * Insert can_rx_alldev_list for reception on all devices. | ||
863 | * This struct is zero initialized which is correct for the | ||
864 | * embedded hlist heads, the dev pointer, and the entries counter. | ||
865 | */ | ||
866 | |||
867 | spin_lock(&can_rcvlists_lock); | ||
868 | hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list); | ||
869 | spin_unlock(&can_rcvlists_lock); | ||
870 | |||
871 | if (stats_timer) { | 823 | if (stats_timer) { |
872 | /* the statistics are updated every second (timer triggered) */ | 824 | /* the statistics are updated every second (timer triggered) */ |
873 | setup_timer(&can_stattimer, can_stat_update, 0); | 825 | setup_timer(&can_stattimer, can_stat_update, 0); |
@@ -887,8 +839,7 @@ static __init int can_init(void) | |||
887 | 839 | ||
888 | static __exit void can_exit(void) | 840 | static __exit void can_exit(void) |
889 | { | 841 | { |
890 | struct dev_rcv_lists *d; | 842 | struct net_device *dev; |
891 | struct hlist_node *n, *next; | ||
892 | 843 | ||
893 | if (stats_timer) | 844 | if (stats_timer) |
894 | del_timer(&can_stattimer); | 845 | del_timer(&can_stattimer); |
@@ -900,14 +851,19 @@ static __exit void can_exit(void) | |||
900 | unregister_netdevice_notifier(&can_netdev_notifier); | 851 | unregister_netdevice_notifier(&can_netdev_notifier); |
901 | sock_unregister(PF_CAN); | 852 | sock_unregister(PF_CAN); |
902 | 853 | ||
903 | /* remove can_rx_dev_list */ | 854 | /* remove created dev_rcv_lists from still registered CAN devices */ |
904 | spin_lock(&can_rcvlists_lock); | 855 | rcu_read_lock(); |
905 | hlist_del(&can_rx_alldev_list.list); | 856 | for_each_netdev_rcu(&init_net, dev) { |
906 | hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { | 857 | if (dev->type == ARPHRD_CAN && dev->ml_priv){ |
907 | hlist_del(&d->list); | 858 | |
908 | kfree(d); | 859 | struct dev_rcv_lists *d = dev->ml_priv; |
860 | |||
861 | BUG_ON(d->entries); | ||
862 | kfree(d); | ||
863 | dev->ml_priv = NULL; | ||
864 | } | ||
909 | } | 865 | } |
910 | spin_unlock(&can_rcvlists_lock); | 866 | rcu_read_unlock(); |
911 | 867 | ||
912 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 868 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
913 | 869 | ||