diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-07-29 23:15:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-02 21:35:16 -0400 |
commit | e4c4e448cf557921ffbbbd6d6ddac81fdceacb4f (patch) | |
tree | 6813f4b0228e1ec26c26c5c56ef980a2a7c0963c /net/core/neighbour.c | |
parent | 1e3e238e9c4bf9987b19185235cd0cdc21ea038c (diff) |
neigh: Convert garbage collection from softirq to workqueue
Current neigh_periodic_timer() function is fired by timer IRQ, and
scans one hash bucket each round (very litle work in fact)
As we are supposed to scan whole hash table in 15 seconds, this means
neigh_periodic_timer() can be fired very often. (depending on the number
of concurrent hash entries we stored in this table)
Converting this to a workqueue permits scanning whole table, minimizing
icache pollution, and firing this work every 15 seconds, independantly
of hash table size.
This 15 seconds delay is not a hard number, as work is a deferrable one.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/neighbour.c')
-rw-r--r-- | net/core/neighbour.c | 89 |
1 files changed, 43 insertions, 46 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index c6f9ad8e4c7a..e587e6819698 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh) | |||
692 | hh->hh_output = neigh->ops->hh_output; | 692 | hh->hh_output = neigh->ops->hh_output; |
693 | } | 693 | } |
694 | 694 | ||
695 | static void neigh_periodic_timer(unsigned long arg) | 695 | static void neigh_periodic_work(struct work_struct *work) |
696 | { | 696 | { |
697 | struct neigh_table *tbl = (struct neigh_table *)arg; | 697 | struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); |
698 | struct neighbour *n, **np; | 698 | struct neighbour *n, **np; |
699 | unsigned long expire, now = jiffies; | 699 | unsigned int i; |
700 | 700 | ||
701 | NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); | 701 | NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); |
702 | 702 | ||
703 | write_lock(&tbl->lock); | 703 | write_lock_bh(&tbl->lock); |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * periodically recompute ReachableTime from random function | 706 | * periodically recompute ReachableTime from random function |
707 | */ | 707 | */ |
708 | 708 | ||
709 | if (time_after(now, tbl->last_rand + 300 * HZ)) { | 709 | if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { |
710 | struct neigh_parms *p; | 710 | struct neigh_parms *p; |
711 | tbl->last_rand = now; | 711 | tbl->last_rand = jiffies; |
712 | for (p = &tbl->parms; p; p = p->next) | 712 | for (p = &tbl->parms; p; p = p->next) |
713 | p->reachable_time = | 713 | p->reachable_time = |
714 | neigh_rand_reach_time(p->base_reachable_time); | 714 | neigh_rand_reach_time(p->base_reachable_time); |
715 | } | 715 | } |
716 | 716 | ||
717 | np = &tbl->hash_buckets[tbl->hash_chain_gc]; | 717 | for (i = 0 ; i <= tbl->hash_mask; i++) { |
718 | tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); | 718 | np = &tbl->hash_buckets[i]; |
719 | 719 | ||
720 | while ((n = *np) != NULL) { | 720 | while ((n = *np) != NULL) { |
721 | unsigned int state; | 721 | unsigned int state; |
722 | 722 | ||
723 | write_lock(&n->lock); | 723 | write_lock(&n->lock); |
724 | 724 | ||
725 | state = n->nud_state; | 725 | state = n->nud_state; |
726 | if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { | 726 | if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { |
727 | write_unlock(&n->lock); | 727 | write_unlock(&n->lock); |
728 | goto next_elt; | 728 | goto next_elt; |
729 | } | 729 | } |
730 | 730 | ||
731 | if (time_before(n->used, n->confirmed)) | 731 | if (time_before(n->used, n->confirmed)) |
732 | n->used = n->confirmed; | 732 | n->used = n->confirmed; |
733 | 733 | ||
734 | if (atomic_read(&n->refcnt) == 1 && | 734 | if (atomic_read(&n->refcnt) == 1 && |
735 | (state == NUD_FAILED || | 735 | (state == NUD_FAILED || |
736 | time_after(now, n->used + n->parms->gc_staletime))) { | 736 | time_after(jiffies, n->used + n->parms->gc_staletime))) { |
737 | *np = n->next; | 737 | *np = n->next; |
738 | n->dead = 1; | 738 | n->dead = 1; |
739 | write_unlock(&n->lock); | ||
740 | neigh_cleanup_and_release(n); | ||
741 | continue; | ||
742 | } | ||
739 | write_unlock(&n->lock); | 743 | write_unlock(&n->lock); |
740 | neigh_cleanup_and_release(n); | ||
741 | continue; | ||
742 | } | ||
743 | write_unlock(&n->lock); | ||
744 | 744 | ||
745 | next_elt: | 745 | next_elt: |
746 | np = &n->next; | 746 | np = &n->next; |
747 | } | ||
748 | /* | ||
749 | * It's fine to release lock here, even if hash table | ||
750 | * grows while we are preempted. | ||
751 | */ | ||
752 | write_unlock_bh(&tbl->lock); | ||
753 | cond_resched(); | ||
754 | write_lock_bh(&tbl->lock); | ||
747 | } | 755 | } |
748 | |||
749 | /* Cycle through all hash buckets every base_reachable_time/2 ticks. | 756 | /* Cycle through all hash buckets every base_reachable_time/2 ticks. |
750 | * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 | 757 | * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 |
751 | * base_reachable_time. | 758 | * base_reachable_time. |
752 | */ | 759 | */ |
753 | expire = tbl->parms.base_reachable_time >> 1; | 760 | schedule_delayed_work(&tbl->gc_work, |
754 | expire /= (tbl->hash_mask + 1); | 761 | tbl->parms.base_reachable_time >> 1); |
755 | if (!expire) | 762 | write_unlock_bh(&tbl->lock); |
756 | expire = 1; | ||
757 | |||
758 | if (expire>HZ) | ||
759 | mod_timer(&tbl->gc_timer, round_jiffies(now + expire)); | ||
760 | else | ||
761 | mod_timer(&tbl->gc_timer, now + expire); | ||
762 | |||
763 | write_unlock(&tbl->lock); | ||
764 | } | 763 | } |
765 | 764 | ||
766 | static __inline__ int neigh_max_probes(struct neighbour *n) | 765 | static __inline__ int neigh_max_probes(struct neighbour *n) |
@@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
1442 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); | 1441 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
1443 | 1442 | ||
1444 | rwlock_init(&tbl->lock); | 1443 | rwlock_init(&tbl->lock); |
1445 | setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); | 1444 | INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); |
1446 | tbl->gc_timer.expires = now + 1; | 1445 | schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); |
1447 | add_timer(&tbl->gc_timer); | ||
1448 | |||
1449 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); | 1446 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); |
1450 | skb_queue_head_init_class(&tbl->proxy_queue, | 1447 | skb_queue_head_init_class(&tbl->proxy_queue, |
1451 | &neigh_table_proxy_queue_class); | 1448 | &neigh_table_proxy_queue_class); |
@@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl) | |||
1482 | struct neigh_table **tp; | 1479 | struct neigh_table **tp; |
1483 | 1480 | ||
1484 | /* It is not clean... Fix it to unload IPv6 module safely */ | 1481 | /* It is not clean... Fix it to unload IPv6 module safely */ |
1485 | del_timer_sync(&tbl->gc_timer); | 1482 | cancel_delayed_work(&tbl->gc_work); |
1483 | flush_scheduled_work(); | ||
1486 | del_timer_sync(&tbl->proxy_timer); | 1484 | del_timer_sync(&tbl->proxy_timer); |
1487 | pneigh_queue_purge(&tbl->proxy_queue); | 1485 | pneigh_queue_purge(&tbl->proxy_queue); |
1488 | neigh_ifdown(tbl, NULL); | 1486 | neigh_ifdown(tbl, NULL); |
@@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, | |||
1752 | .ndtc_last_rand = jiffies_to_msecs(rand_delta), | 1750 | .ndtc_last_rand = jiffies_to_msecs(rand_delta), |
1753 | .ndtc_hash_rnd = tbl->hash_rnd, | 1751 | .ndtc_hash_rnd = tbl->hash_rnd, |
1754 | .ndtc_hash_mask = tbl->hash_mask, | 1752 | .ndtc_hash_mask = tbl->hash_mask, |
1755 | .ndtc_hash_chain_gc = tbl->hash_chain_gc, | ||
1756 | .ndtc_proxy_qlen = tbl->proxy_queue.qlen, | 1753 | .ndtc_proxy_qlen = tbl->proxy_queue.qlen, |
1757 | }; | 1754 | }; |
1758 | 1755 | ||