diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2007-11-20 01:43:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:54:33 -0500 |
commit | beb659bd8c9f2ccc8195779383f71088f936bf6e (patch) | |
tree | 0d07cc0a6e4525720537d08e7f905245f016136a /net/ipv4/route.c | |
parent | 42a73808ed4f30b739eb52bcbb33a02fe62ceef5 (diff) |
[PATCH] IPV4 : Move ip route cache flush (secret_rebuild) from softirq to workqueue
Every 600 seconds (ip_rt_secret_interval), a softirq flush of the
whole ip route cache is triggered. On loaded machines, this can starve
softirq for many seconds and can eventually crash.
This patch moves this flush to a workqueue context, using the worker
we intoduced in commit 39c90ece7565f5c47110c2fa77409d7a9478bd5b (IPV4:
Convert rt_check_expire() from softirq processing to workqueue.)
Also, immediate flushes (echo 0 >/proc/sys/net/ipv4/route/flush) are
using rt_do_flush() helper function, wich take attention to
rescheduling.
Next step will be to handle delayed flushes
("echo -1 >/proc/sys/net/ipv4/route/flush" or "ip route flush cache")
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r-- | net/ipv4/route.c | 83 |
1 files changed, 59 insertions, 24 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e4b6fb4b1f4f..fcae074b7ae4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -133,13 +133,14 @@ static int ip_rt_mtu_expires = 10 * 60 * HZ; | |||
133 | static int ip_rt_min_pmtu = 512 + 20 + 20; | 133 | static int ip_rt_min_pmtu = 512 + 20 + 20; |
134 | static int ip_rt_min_advmss = 256; | 134 | static int ip_rt_min_advmss = 256; |
135 | static int ip_rt_secret_interval = 10 * 60 * HZ; | 135 | static int ip_rt_secret_interval = 10 * 60 * HZ; |
136 | static int ip_rt_flush_expected; | ||
136 | static unsigned long rt_deadline; | 137 | static unsigned long rt_deadline; |
137 | 138 | ||
138 | #define RTprint(a...) printk(KERN_DEBUG a) | 139 | #define RTprint(a...) printk(KERN_DEBUG a) |
139 | 140 | ||
140 | static struct timer_list rt_flush_timer; | 141 | static struct timer_list rt_flush_timer; |
141 | static void rt_check_expire(struct work_struct *work); | 142 | static void rt_worker_func(struct work_struct *work); |
142 | static DECLARE_DELAYED_WORK(expires_work, rt_check_expire); | 143 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); |
143 | static struct timer_list rt_secret_timer; | 144 | static struct timer_list rt_secret_timer; |
144 | 145 | ||
145 | /* | 146 | /* |
@@ -561,7 +562,36 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
561 | (fl1->iif ^ fl2->iif)) == 0; | 562 | (fl1->iif ^ fl2->iif)) == 0; |
562 | } | 563 | } |
563 | 564 | ||
564 | static void rt_check_expire(struct work_struct *work) | 565 | /* |
566 | * Perform a full scan of hash table and free all entries. | ||
567 | * Can be called by a softirq or a process. | ||
568 | * In the later case, we want to be reschedule if necessary | ||
569 | */ | ||
570 | static void rt_do_flush(int process_context) | ||
571 | { | ||
572 | unsigned int i; | ||
573 | struct rtable *rth, *next; | ||
574 | |||
575 | for (i = 0; i <= rt_hash_mask; i++) { | ||
576 | if (process_context && need_resched()) | ||
577 | cond_resched(); | ||
578 | rth = rt_hash_table[i].chain; | ||
579 | if (!rth) | ||
580 | continue; | ||
581 | |||
582 | spin_lock_bh(rt_hash_lock_addr(i)); | ||
583 | rth = rt_hash_table[i].chain; | ||
584 | rt_hash_table[i].chain = NULL; | ||
585 | spin_unlock_bh(rt_hash_lock_addr(i)); | ||
586 | |||
587 | for (; rth; rth = next) { | ||
588 | next = rth->u.dst.rt_next; | ||
589 | rt_free(rth); | ||
590 | } | ||
591 | } | ||
592 | } | ||
593 | |||
594 | static void rt_check_expire(void) | ||
565 | { | 595 | { |
566 | static unsigned int rover; | 596 | static unsigned int rover; |
567 | unsigned int i = rover, goal; | 597 | unsigned int i = rover, goal; |
@@ -607,33 +637,33 @@ static void rt_check_expire(struct work_struct *work) | |||
607 | spin_unlock_bh(rt_hash_lock_addr(i)); | 637 | spin_unlock_bh(rt_hash_lock_addr(i)); |
608 | } | 638 | } |
609 | rover = i; | 639 | rover = i; |
640 | } | ||
641 | |||
642 | /* | ||
643 | * rt_worker_func() is run in process context. | ||
644 | * If a whole flush was scheduled, it is done. | ||
645 | * Else, we call rt_check_expire() to scan part of the hash table | ||
646 | */ | ||
647 | static void rt_worker_func(struct work_struct *work) | ||
648 | { | ||
649 | if (ip_rt_flush_expected) { | ||
650 | ip_rt_flush_expected = 0; | ||
651 | rt_do_flush(1); | ||
652 | } else | ||
653 | rt_check_expire(); | ||
610 | schedule_delayed_work(&expires_work, ip_rt_gc_interval); | 654 | schedule_delayed_work(&expires_work, ip_rt_gc_interval); |
611 | } | 655 | } |
612 | 656 | ||
613 | /* This can run from both BH and non-BH contexts, the latter | 657 | /* This can run from both BH and non-BH contexts, the latter |
614 | * in the case of a forced flush event. | 658 | * in the case of a forced flush event. |
615 | */ | 659 | */ |
616 | static void rt_run_flush(unsigned long dummy) | 660 | static void rt_run_flush(unsigned long process_context) |
617 | { | 661 | { |
618 | int i; | ||
619 | struct rtable *rth, *next; | ||
620 | |||
621 | rt_deadline = 0; | 662 | rt_deadline = 0; |
622 | 663 | ||
623 | get_random_bytes(&rt_hash_rnd, 4); | 664 | get_random_bytes(&rt_hash_rnd, 4); |
624 | 665 | ||
625 | for (i = rt_hash_mask; i >= 0; i--) { | 666 | rt_do_flush(process_context); |
626 | spin_lock_bh(rt_hash_lock_addr(i)); | ||
627 | rth = rt_hash_table[i].chain; | ||
628 | if (rth) | ||
629 | rt_hash_table[i].chain = NULL; | ||
630 | spin_unlock_bh(rt_hash_lock_addr(i)); | ||
631 | |||
632 | for (; rth; rth = next) { | ||
633 | next = rth->u.dst.rt_next; | ||
634 | rt_free(rth); | ||
635 | } | ||
636 | } | ||
637 | } | 667 | } |
638 | 668 | ||
639 | static DEFINE_SPINLOCK(rt_flush_lock); | 669 | static DEFINE_SPINLOCK(rt_flush_lock); |
@@ -667,7 +697,7 @@ void rt_cache_flush(int delay) | |||
667 | 697 | ||
668 | if (delay <= 0) { | 698 | if (delay <= 0) { |
669 | spin_unlock_bh(&rt_flush_lock); | 699 | spin_unlock_bh(&rt_flush_lock); |
670 | rt_run_flush(0); | 700 | rt_run_flush(user_mode); |
671 | return; | 701 | return; |
672 | } | 702 | } |
673 | 703 | ||
@@ -678,12 +708,17 @@ void rt_cache_flush(int delay) | |||
678 | spin_unlock_bh(&rt_flush_lock); | 708 | spin_unlock_bh(&rt_flush_lock); |
679 | } | 709 | } |
680 | 710 | ||
711 | /* | ||
712 | * We change rt_hash_rnd and ask next rt_worker_func() invocation | ||
713 | * to perform a flush in process context | ||
714 | */ | ||
681 | static void rt_secret_rebuild(unsigned long dummy) | 715 | static void rt_secret_rebuild(unsigned long dummy) |
682 | { | 716 | { |
683 | unsigned long now = jiffies; | 717 | get_random_bytes(&rt_hash_rnd, 4); |
684 | 718 | ip_rt_flush_expected = 1; | |
685 | rt_cache_flush(0); | 719 | cancel_delayed_work(&expires_work); |
686 | mod_timer(&rt_secret_timer, now + ip_rt_secret_interval); | 720 | schedule_delayed_work(&expires_work, HZ/10); |
721 | mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); | ||
687 | } | 722 | } |
688 | 723 | ||
689 | /* | 724 | /* |