aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-09 12:14:58 -0400
committerPatrick McHardy <kaber@trash.net>2010-06-09 12:14:58 -0400
commitbed1be20867d17a3eb2fb5e1613ebdc50c83b8aa (patch)
treec8350ba3c47861b11857ff943d8647d7bd08eb36
parentc463ac972315a0c86bb20b8d35225baa75caf899 (diff)
netfilter: nfnetlink_log: RCU conversion
- instances_lock becomes a spinlock - lockless lookups While nfnetlink_log probably not performance critical, using less rwlocks in our code is always welcomed... Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--net/netfilter/nfnetlink_log.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fc9a211e629e..8ec23ec568e7 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,9 +66,10 @@ struct nfulnl_instance {
66 u_int16_t group_num; /* number of this queue */ 66 u_int16_t group_num; /* number of this queue */
67 u_int16_t flags; 67 u_int16_t flags;
68 u_int8_t copy_mode; 68 u_int8_t copy_mode;
69 struct rcu_head rcu;
69}; 70};
70 71
71static DEFINE_RWLOCK(instances_lock); 72static DEFINE_SPINLOCK(instances_lock);
72static atomic_t global_seq; 73static atomic_t global_seq;
73 74
74#define INSTANCE_BUCKETS 16 75#define INSTANCE_BUCKETS 16
@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
88 struct nfulnl_instance *inst; 89 struct nfulnl_instance *inst;
89 90
90 head = &instance_table[instance_hashfn(group_num)]; 91 head = &instance_table[instance_hashfn(group_num)];
91 hlist_for_each_entry(inst, pos, head, hlist) { 92 hlist_for_each_entry_rcu(inst, pos, head, hlist) {
92 if (inst->group_num == group_num) 93 if (inst->group_num == group_num)
93 return inst; 94 return inst;
94 } 95 }
@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
106{ 107{
107 struct nfulnl_instance *inst; 108 struct nfulnl_instance *inst;
108 109
109 read_lock_bh(&instances_lock); 110 rcu_read_lock_bh();
110 inst = __instance_lookup(group_num); 111 inst = __instance_lookup(group_num);
111 if (inst) 112 if (inst)
112 instance_get(inst); 113 instance_get(inst);
113 read_unlock_bh(&instances_lock); 114 rcu_read_unlock_bh();
114 115
115 return inst; 116 return inst;
116} 117}
117 118
119static void nfulnl_instance_free_rcu(struct rcu_head *head)
120{
121 kfree(container_of(head, struct nfulnl_instance, rcu));
122 module_put(THIS_MODULE);
123}
124
118static void 125static void
119instance_put(struct nfulnl_instance *inst) 126instance_put(struct nfulnl_instance *inst)
120{ 127{
121 if (inst && atomic_dec_and_test(&inst->use)) { 128 if (inst && atomic_dec_and_test(&inst->use))
122 kfree(inst); 129 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
123 module_put(THIS_MODULE);
124 }
125} 130}
126 131
127static void nfulnl_timer(unsigned long data); 132static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
132 struct nfulnl_instance *inst; 137 struct nfulnl_instance *inst;
133 int err; 138 int err;
134 139
135 write_lock_bh(&instances_lock); 140 spin_lock_bh(&instances_lock);
136 if (__instance_lookup(group_num)) { 141 if (__instance_lookup(group_num)) {
137 err = -EEXIST; 142 err = -EEXIST;
138 goto out_unlock; 143 goto out_unlock;
@@ -169,12 +174,12 @@ instance_create(u_int16_t group_num, int pid)
169 hlist_add_head(&inst->hlist, 174 hlist_add_head(&inst->hlist,
170 &instance_table[instance_hashfn(group_num)]); 175 &instance_table[instance_hashfn(group_num)]);
171 176
172 write_unlock_bh(&instances_lock); 177 spin_unlock_bh(&instances_lock);
173 178
174 return inst; 179 return inst;
175 180
176out_unlock: 181out_unlock:
177 write_unlock_bh(&instances_lock); 182 spin_unlock_bh(&instances_lock);
178 return ERR_PTR(err); 183 return ERR_PTR(err);
179} 184}
180 185
@@ -200,9 +205,9 @@ __instance_destroy(struct nfulnl_instance *inst)
200static inline void 205static inline void
201instance_destroy(struct nfulnl_instance *inst) 206instance_destroy(struct nfulnl_instance *inst)
202{ 207{
203 write_lock_bh(&instances_lock); 208 spin_lock_bh(&instances_lock);
204 __instance_destroy(inst); 209 __instance_destroy(inst);
205 write_unlock_bh(&instances_lock); 210 spin_unlock_bh(&instances_lock);
206} 211}
207 212
208static int 213static int
@@ -672,7 +677,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
672 int i; 677 int i;
673 678
674 /* destroy all instances for this pid */ 679 /* destroy all instances for this pid */
675 write_lock_bh(&instances_lock); 680 spin_lock_bh(&instances_lock);
676 for (i = 0; i < INSTANCE_BUCKETS; i++) { 681 for (i = 0; i < INSTANCE_BUCKETS; i++) {
677 struct hlist_node *tmp, *t2; 682 struct hlist_node *tmp, *t2;
678 struct nfulnl_instance *inst; 683 struct nfulnl_instance *inst;
@@ -684,7 +689,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
684 __instance_destroy(inst); 689 __instance_destroy(inst);
685 } 690 }
686 } 691 }
687 write_unlock_bh(&instances_lock); 692 spin_unlock_bh(&instances_lock);
688 } 693 }
689 return NOTIFY_DONE; 694 return NOTIFY_DONE;
690} 695}
@@ -861,19 +866,19 @@ static struct hlist_node *get_first(struct iter_state *st)
861 866
862 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 867 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
863 if (!hlist_empty(&instance_table[st->bucket])) 868 if (!hlist_empty(&instance_table[st->bucket]))
864 return instance_table[st->bucket].first; 869 return rcu_dereference_bh(instance_table[st->bucket].first);
865 } 870 }
866 return NULL; 871 return NULL;
867} 872}
868 873
869static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) 874static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
870{ 875{
871 h = h->next; 876 h = rcu_dereference_bh(h->next);
872 while (!h) { 877 while (!h) {
873 if (++st->bucket >= INSTANCE_BUCKETS) 878 if (++st->bucket >= INSTANCE_BUCKETS)
874 return NULL; 879 return NULL;
875 880
876 h = instance_table[st->bucket].first; 881 h = rcu_dereference_bh(instance_table[st->bucket].first);
877 } 882 }
878 return h; 883 return h;
879} 884}
@@ -890,9 +895,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
890} 895}
891 896
892static void *seq_start(struct seq_file *seq, loff_t *pos) 897static void *seq_start(struct seq_file *seq, loff_t *pos)
893 __acquires(instances_lock) 898 __acquires(rcu_bh)
894{ 899{
895 read_lock_bh(&instances_lock); 900 rcu_read_lock_bh();
896 return get_idx(seq->private, *pos); 901 return get_idx(seq->private, *pos);
897} 902}
898 903
@@ -903,9 +908,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
903} 908}
904 909
905static void seq_stop(struct seq_file *s, void *v) 910static void seq_stop(struct seq_file *s, void *v)
906 __releases(instances_lock) 911 __releases(rcu_bh)
907{ 912{
908 read_unlock_bh(&instances_lock); 913 rcu_read_unlock_bh();
909} 914}
910 915
911static int seq_show(struct seq_file *s, void *v) 916static int seq_show(struct seq_file *s, void *v)