diff options
author | Oliver Hartkopp <socketcan@hartkopp.net> | 2009-12-25 01:47:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-01-04 00:31:03 -0500 |
commit | 20dd3850bcf860561496827b711fa10fecf6e787 (patch) | |
tree | 95ecd549717f2d654b870ffb44d342c04ab048b6 /net/can/af_can.c | |
parent | 75ed0a897208c3273fd8dc0f71e1417dba5a049b (diff) |
can: Speed up CAN frame receiption by using ml_priv
this patch removes the hlist that contains the CAN receiver filter lists.
It uses the 'midlayer private' pointer ml_priv and links the filters directly
to the CAN netdevice, which allows to omit the walk through the complete CAN
devices hlist for each received CAN frame.
This patch is tested and does not remove any locking.
Signed-off-by: Oliver Hartkopp <oliver@hartkopp.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/can/af_can.c')
-rw-r--r-- | net/can/af_can.c | 118 |
1 files changed, 34 insertions, 84 deletions
diff --git a/net/can/af_can.c b/net/can/af_can.c index 51adc4c2b860..bc18b084ffdb 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1; | |||
77 | module_param(stats_timer, int, S_IRUGO); | 77 | module_param(stats_timer, int, S_IRUGO); |
78 | MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); | 78 | MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); |
79 | 79 | ||
80 | HLIST_HEAD(can_rx_dev_list); | 80 | /* receive filters subscribed for 'all' CAN devices */ |
81 | static struct dev_rcv_lists can_rx_alldev_list; | 81 | struct dev_rcv_lists can_rx_alldev_list; |
82 | static DEFINE_SPINLOCK(can_rcvlists_lock); | 82 | static DEFINE_SPINLOCK(can_rcvlists_lock); |
83 | 83 | ||
84 | static struct kmem_cache *rcv_cache __read_mostly; | 84 | static struct kmem_cache *rcv_cache __read_mostly; |
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send); | |||
292 | 292 | ||
293 | static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) | 293 | static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) |
294 | { | 294 | { |
295 | struct dev_rcv_lists *d = NULL; | 295 | if (!dev) |
296 | struct hlist_node *n; | 296 | return &can_rx_alldev_list; |
297 | 297 | else | |
298 | /* | 298 | return (struct dev_rcv_lists *)dev->ml_priv; |
299 | * find receive list for this device | ||
300 | * | ||
301 | * The hlist_for_each_entry*() macros curse through the list | ||
302 | * using the pointer variable n and set d to the containing | ||
303 | * struct in each list iteration. Therefore, after list | ||
304 | * iteration, d is unmodified when the list is empty, and it | ||
305 | * points to last list element, when the list is non-empty | ||
306 | * but no match in the loop body is found. I.e. d is *not* | ||
307 | * NULL when no match is found. We can, however, use the | ||
308 | * cursor variable n to decide if a match was found. | ||
309 | */ | ||
310 | |||
311 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { | ||
312 | if (d->dev == dev) | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | return n ? d : NULL; | ||
317 | } | 299 | } |
318 | 300 | ||
319 | /** | 301 | /** |
@@ -468,16 +450,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, | |||
468 | EXPORT_SYMBOL(can_rx_register); | 450 | EXPORT_SYMBOL(can_rx_register); |
469 | 451 | ||
470 | /* | 452 | /* |
471 | * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal | ||
472 | */ | ||
473 | static void can_rx_delete_device(struct rcu_head *rp) | ||
474 | { | ||
475 | struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu); | ||
476 | |||
477 | kfree(d); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * can_rx_delete_receiver - rcu callback for single receiver entry removal | 453 | * can_rx_delete_receiver - rcu callback for single receiver entry removal |
482 | */ | 454 | */ |
483 | static void can_rx_delete_receiver(struct rcu_head *rp) | 455 | static void can_rx_delete_receiver(struct rcu_head *rp) |
@@ -541,7 +513,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
541 | "dev %s, id %03X, mask %03X\n", | 513 | "dev %s, id %03X, mask %03X\n", |
542 | DNAME(dev), can_id, mask); | 514 | DNAME(dev), can_id, mask); |
543 | r = NULL; | 515 | r = NULL; |
544 | d = NULL; | ||
545 | goto out; | 516 | goto out; |
546 | } | 517 | } |
547 | 518 | ||
@@ -552,10 +523,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
552 | can_pstats.rcv_entries--; | 523 | can_pstats.rcv_entries--; |
553 | 524 | ||
554 | /* remove device structure requested by NETDEV_UNREGISTER */ | 525 | /* remove device structure requested by NETDEV_UNREGISTER */ |
555 | if (d->remove_on_zero_entries && !d->entries) | 526 | if (d->remove_on_zero_entries && !d->entries) { |
556 | hlist_del_rcu(&d->list); | 527 | kfree(d); |
557 | else | 528 | dev->ml_priv = NULL; |
558 | d = NULL; | 529 | } |
559 | 530 | ||
560 | out: | 531 | out: |
561 | spin_unlock(&can_rcvlists_lock); | 532 | spin_unlock(&can_rcvlists_lock); |
@@ -563,10 +534,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
563 | /* schedule the receiver item for deletion */ | 534 | /* schedule the receiver item for deletion */ |
564 | if (r) | 535 | if (r) |
565 | call_rcu(&r->rcu, can_rx_delete_receiver); | 536 | call_rcu(&r->rcu, can_rx_delete_receiver); |
566 | |||
567 | /* schedule the device structure for deletion */ | ||
568 | if (d) | ||
569 | call_rcu(&d->rcu, can_rx_delete_device); | ||
570 | } | 537 | } |
571 | EXPORT_SYMBOL(can_rx_unregister); | 538 | EXPORT_SYMBOL(can_rx_unregister); |
572 | 539 | ||
@@ -780,48 +747,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
780 | 747 | ||
781 | case NETDEV_REGISTER: | 748 | case NETDEV_REGISTER: |
782 | 749 | ||
783 | /* | 750 | /* create new dev_rcv_lists for this device */ |
784 | * create new dev_rcv_lists for this device | ||
785 | * | ||
786 | * N.B. zeroing the struct is the correct initialization | ||
787 | * for the embedded hlist_head structs. | ||
788 | * Another list type, e.g. list_head, would require | ||
789 | * explicit initialization. | ||
790 | */ | ||
791 | |||
792 | d = kzalloc(sizeof(*d), GFP_KERNEL); | 751 | d = kzalloc(sizeof(*d), GFP_KERNEL); |
793 | if (!d) { | 752 | if (!d) { |
794 | printk(KERN_ERR | 753 | printk(KERN_ERR |
795 | "can: allocation of receive list failed\n"); | 754 | "can: allocation of receive list failed\n"); |
796 | return NOTIFY_DONE; | 755 | return NOTIFY_DONE; |
797 | } | 756 | } |
798 | d->dev = dev; | 757 | BUG_ON(dev->ml_priv); |
799 | 758 | dev->ml_priv = d; | |
800 | spin_lock(&can_rcvlists_lock); | ||
801 | hlist_add_head_rcu(&d->list, &can_rx_dev_list); | ||
802 | spin_unlock(&can_rcvlists_lock); | ||
803 | 759 | ||
804 | break; | 760 | break; |
805 | 761 | ||
806 | case NETDEV_UNREGISTER: | 762 | case NETDEV_UNREGISTER: |
807 | spin_lock(&can_rcvlists_lock); | 763 | spin_lock(&can_rcvlists_lock); |
808 | 764 | ||
809 | d = find_dev_rcv_lists(dev); | 765 | d = dev->ml_priv; |
810 | if (d) { | 766 | if (d) { |
811 | if (d->entries) { | 767 | if (d->entries) |
812 | d->remove_on_zero_entries = 1; | 768 | d->remove_on_zero_entries = 1; |
813 | d = NULL; | 769 | else { |
814 | } else | 770 | kfree(d); |
815 | hlist_del_rcu(&d->list); | 771 | dev->ml_priv = NULL; |
772 | } | ||
816 | } else | 773 | } else |
817 | printk(KERN_ERR "can: notifier: receive list not " | 774 | printk(KERN_ERR "can: notifier: receive list not " |
818 | "found for dev %s\n", dev->name); | 775 | "found for dev %s\n", dev->name); |
819 | 776 | ||
820 | spin_unlock(&can_rcvlists_lock); | 777 | spin_unlock(&can_rcvlists_lock); |
821 | 778 | ||
822 | if (d) | ||
823 | call_rcu(&d->rcu, can_rx_delete_device); | ||
824 | |||
825 | break; | 779 | break; |
826 | } | 780 | } |
827 | 781 | ||
@@ -853,21 +807,13 @@ static __init int can_init(void) | |||
853 | { | 807 | { |
854 | printk(banner); | 808 | printk(banner); |
855 | 809 | ||
810 | memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); | ||
811 | |||
856 | rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), | 812 | rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), |
857 | 0, 0, NULL); | 813 | 0, 0, NULL); |
858 | if (!rcv_cache) | 814 | if (!rcv_cache) |
859 | return -ENOMEM; | 815 | return -ENOMEM; |
860 | 816 | ||
861 | /* | ||
862 | * Insert can_rx_alldev_list for reception on all devices. | ||
863 | * This struct is zero initialized which is correct for the | ||
864 | * embedded hlist heads, the dev pointer, and the entries counter. | ||
865 | */ | ||
866 | |||
867 | spin_lock(&can_rcvlists_lock); | ||
868 | hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list); | ||
869 | spin_unlock(&can_rcvlists_lock); | ||
870 | |||
871 | if (stats_timer) { | 817 | if (stats_timer) { |
872 | /* the statistics are updated every second (timer triggered) */ | 818 | /* the statistics are updated every second (timer triggered) */ |
873 | setup_timer(&can_stattimer, can_stat_update, 0); | 819 | setup_timer(&can_stattimer, can_stat_update, 0); |
@@ -887,8 +833,7 @@ static __init int can_init(void) | |||
887 | 833 | ||
888 | static __exit void can_exit(void) | 834 | static __exit void can_exit(void) |
889 | { | 835 | { |
890 | struct dev_rcv_lists *d; | 836 | struct net_device *dev; |
891 | struct hlist_node *n, *next; | ||
892 | 837 | ||
893 | if (stats_timer) | 838 | if (stats_timer) |
894 | del_timer(&can_stattimer); | 839 | del_timer(&can_stattimer); |
@@ -900,14 +845,19 @@ static __exit void can_exit(void) | |||
900 | unregister_netdevice_notifier(&can_netdev_notifier); | 845 | unregister_netdevice_notifier(&can_netdev_notifier); |
901 | sock_unregister(PF_CAN); | 846 | sock_unregister(PF_CAN); |
902 | 847 | ||
903 | /* remove can_rx_dev_list */ | 848 | /* remove created dev_rcv_lists from still registered CAN devices */ |
904 | spin_lock(&can_rcvlists_lock); | 849 | rcu_read_lock(); |
905 | hlist_del(&can_rx_alldev_list.list); | 850 | for_each_netdev_rcu(&init_net, dev) { |
906 | hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { | 851 | if (dev->type == ARPHRD_CAN && dev->ml_priv){ |
907 | hlist_del(&d->list); | 852 | |
908 | kfree(d); | 853 | struct dev_rcv_lists *d = dev->ml_priv; |
854 | |||
855 | BUG_ON(d->entries); | ||
856 | kfree(d); | ||
857 | dev->ml_priv = NULL; | ||
858 | } | ||
909 | } | 859 | } |
910 | spin_unlock(&can_rcvlists_lock); | 860 | rcu_read_unlock(); |
911 | 861 | ||
912 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 862 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
913 | 863 | ||