aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2009-06-13 06:21:49 -0400
committerPatrick McHardy <kaber@trash.net>2009-06-13 06:21:49 -0400
commit65cb9fda32be613216f601a330b311c3bd7a8436 (patch)
tree3ba0ae9cbd474723f47e5ea8f1d21e212381c135
parent266d07cb1c9a0c345d7d3aea889f92062894059e (diff)
netfilter: nf_conntrack: use mod_timer_pending() for conntrack refresh
Use mod_timer_pending() instead of atomic sequence of del_timer()/ add_timer(). mod_timer_pending() does not rearm an inactive timer, so we don't need the conntrack lock anymore to make sure we don't accidentally rearm a timer of a conntrack which is in the process of being destroyed. With this change, we don't need to take the global lock anymore at all, counter updates can be performed under the per-conntrack lock. Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--net/netfilter/nf_conntrack_core.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index edf95695e0aa..d8dffe7ab509 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -807,8 +807,6 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
807 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 807 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
808 NF_CT_ASSERT(skb); 808 NF_CT_ASSERT(skb);
809 809
810 spin_lock_bh(&nf_conntrack_lock);
811
812 /* Only update if this is not a fixed timeout */ 810 /* Only update if this is not a fixed timeout */
813 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 811 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
814 goto acct; 812 goto acct;
@@ -822,11 +820,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
822 /* Only update the timeout if the new timeout is at least 820 /* Only update the timeout if the new timeout is at least
823 HZ jiffies from the old timeout. Need del_timer for race 821 HZ jiffies from the old timeout. Need del_timer for race
824 avoidance (may already be dying). */ 822 avoidance (may already be dying). */
825 if (newtime - ct->timeout.expires >= HZ 823 if (newtime - ct->timeout.expires >= HZ)
826 && del_timer(&ct->timeout)) { 824 mod_timer_pending(&ct->timeout, newtime);
827 ct->timeout.expires = newtime;
828 add_timer(&ct->timeout);
829 }
830 } 825 }
831 826
832acct: 827acct:
@@ -835,13 +830,13 @@ acct:
835 830
836 acct = nf_conn_acct_find(ct); 831 acct = nf_conn_acct_find(ct);
837 if (acct) { 832 if (acct) {
833 spin_lock_bh(&ct->lock);
838 acct[CTINFO2DIR(ctinfo)].packets++; 834 acct[CTINFO2DIR(ctinfo)].packets++;
839 acct[CTINFO2DIR(ctinfo)].bytes += 835 acct[CTINFO2DIR(ctinfo)].bytes +=
840 skb->len - skb_network_offset(skb); 836 skb->len - skb_network_offset(skb);
837 spin_unlock_bh(&ct->lock);
841 } 838 }
842 } 839 }
843
844 spin_unlock_bh(&nf_conntrack_lock);
845} 840}
846EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 841EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
847 842
@@ -853,14 +848,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
853 if (do_acct) { 848 if (do_acct) {
854 struct nf_conn_counter *acct; 849 struct nf_conn_counter *acct;
855 850
856 spin_lock_bh(&nf_conntrack_lock);
857 acct = nf_conn_acct_find(ct); 851 acct = nf_conn_acct_find(ct);
858 if (acct) { 852 if (acct) {
853 spin_lock_bh(&ct->lock);
859 acct[CTINFO2DIR(ctinfo)].packets++; 854 acct[CTINFO2DIR(ctinfo)].packets++;
860 acct[CTINFO2DIR(ctinfo)].bytes += 855 acct[CTINFO2DIR(ctinfo)].bytes +=
861 skb->len - skb_network_offset(skb); 856 skb->len - skb_network_offset(skb);
857 spin_unlock_bh(&ct->lock);
862 } 858 }
863 spin_unlock_bh(&nf_conntrack_lock);
864 } 859 }
865 860
866 if (del_timer(&ct->timeout)) { 861 if (del_timer(&ct->timeout)) {