diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-10 14:11:38 -0500 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2011-01-10 14:11:38 -0500 |
commit | 83723d60717f8da0f53f91cf42a845ed56c09662 (patch) | |
tree | 9d32edc2c6dc4849e63d422f8dad42606b2f984f /include | |
parent | 45b9f509b7f5d2d792b3c03b78ddc8ec543e921b (diff) |
netfilter: x_tables: dont block BH while reading counters
Using "iptables -L" with a lot of rules have a too big BH latency.
Jesper mentioned ~6 ms and worried of frame drops.
Switch to a per_cpu seqlock scheme, so that taking a snapshot of
counters doesnt need to block BH (for this cpu, but also other cpus).
This adds two increments on seqlock sequence per ipt_do_table() call,
its a reasonable cost for allowing "iptables -L" not block BH
processing.
Reported-by: Jesper Dangaard Brouer <hawk@comx.dk>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Patrick McHardy <kaber@trash.net>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Jesper Dangaard Brouer <hawk@comx.dk>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/netfilter/x_tables.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 742bec051440..6712e713b299 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); | |||
472 | * necessary for reading the counters. | 472 | * necessary for reading the counters. |
473 | */ | 473 | */ |
474 | struct xt_info_lock { | 474 | struct xt_info_lock { |
475 | spinlock_t lock; | 475 | seqlock_t lock; |
476 | unsigned char readers; | 476 | unsigned char readers; |
477 | }; | 477 | }; |
478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); | 478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); |
@@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) | |||
497 | local_bh_disable(); | 497 | local_bh_disable(); |
498 | lock = &__get_cpu_var(xt_info_locks); | 498 | lock = &__get_cpu_var(xt_info_locks); |
499 | if (likely(!lock->readers++)) | 499 | if (likely(!lock->readers++)) |
500 | spin_lock(&lock->lock); | 500 | write_seqlock(&lock->lock); |
501 | } | 501 | } |
502 | 502 | ||
503 | static inline void xt_info_rdunlock_bh(void) | 503 | static inline void xt_info_rdunlock_bh(void) |
@@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) | |||
505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); | 505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); |
506 | 506 | ||
507 | if (likely(!--lock->readers)) | 507 | if (likely(!--lock->readers)) |
508 | spin_unlock(&lock->lock); | 508 | write_sequnlock(&lock->lock); |
509 | local_bh_enable(); | 509 | local_bh_enable(); |
510 | } | 510 | } |
511 | 511 | ||
@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) | |||
516 | */ | 516 | */ |
517 | static inline void xt_info_wrlock(unsigned int cpu) | 517 | static inline void xt_info_wrlock(unsigned int cpu) |
518 | { | 518 | { |
519 | spin_lock(&per_cpu(xt_info_locks, cpu).lock); | 519 | write_seqlock(&per_cpu(xt_info_locks, cpu).lock); |
520 | } | 520 | } |
521 | 521 | ||
522 | static inline void xt_info_wrunlock(unsigned int cpu) | 522 | static inline void xt_info_wrunlock(unsigned int cpu) |
523 | { | 523 | { |
524 | spin_unlock(&per_cpu(xt_info_locks, cpu).lock); | 524 | write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); |
525 | } | 525 | } |
526 | 526 | ||
527 | /* | 527 | /* |