aboutsummaryrefslogtreecommitdiffstats
path: root/net/ax25/af_ax25.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-07-12 16:25:23 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-07-12 16:58:57 -0400
commitc19c4b9c9acb4ab6f5477ae9ca2c0a8619f19c7a (patch)
tree3aaf3c6e4bc9dd797af434b6767c3da5732a6ba1 /net/ax25/af_ax25.c
parentda952315c9c625bd513c6162613fd3fd01d91aae (diff)
[AX.25]: Optimize AX.25 socket list lock
Right now all uses of the ax25_list_lock lock are _bh locks but knowing some code is only ever getting invoked from _bh context we can better. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ax25/af_ax25.c')
-rw-r--r--net/ax25/af_ax25.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f12be2acf9bc..000695c48583 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
145 ax25_cb *s; 145 ax25_cb *s;
146 struct hlist_node *node; 146 struct hlist_node *node;
147 147
148 spin_lock_bh(&ax25_list_lock); 148 spin_lock(&ax25_list_lock);
149 ax25_for_each(s, node, &ax25_list) { 149 ax25_for_each(s, node, &ax25_list) {
150 if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) 150 if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
151 continue; 151 continue;
@@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
154 /* If device is null we match any device */ 154 /* If device is null we match any device */
155 if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { 155 if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
156 sock_hold(s->sk); 156 sock_hold(s->sk);
157 spin_unlock_bh(&ax25_list_lock); 157 spin_unlock(&ax25_list_lock);
158 return s->sk; 158 return s->sk;
159 } 159 }
160 } 160 }
161 } 161 }
162 spin_unlock_bh(&ax25_list_lock); 162 spin_unlock(&ax25_list_lock);
163 163
164 return NULL; 164 return NULL;
165} 165}
@@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
174 ax25_cb *s; 174 ax25_cb *s;
175 struct hlist_node *node; 175 struct hlist_node *node;
176 176
177 spin_lock_bh(&ax25_list_lock); 177 spin_lock(&ax25_list_lock);
178 ax25_for_each(s, node, &ax25_list) { 178 ax25_for_each(s, node, &ax25_list) {
179 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && 179 if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
180 !ax25cmp(&s->dest_addr, dest_addr) && 180 !ax25cmp(&s->dest_addr, dest_addr) &&
@@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
185 } 185 }
186 } 186 }
187 187
188 spin_unlock_bh(&ax25_list_lock); 188 spin_unlock(&ax25_list_lock);
189 189
190 return sk; 190 return sk;
191} 191}
@@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
235 struct sk_buff *copy; 235 struct sk_buff *copy;
236 struct hlist_node *node; 236 struct hlist_node *node;
237 237
238 spin_lock_bh(&ax25_list_lock); 238 spin_lock(&ax25_list_lock);
239 ax25_for_each(s, node, &ax25_list) { 239 ax25_for_each(s, node, &ax25_list) {
240 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && 240 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
241 s->sk->sk_type == SOCK_RAW && 241 s->sk->sk_type == SOCK_RAW &&
@@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
248 kfree_skb(copy); 248 kfree_skb(copy);
249 } 249 }
250 } 250 }
251 spin_unlock_bh(&ax25_list_lock); 251 spin_unlock(&ax25_list_lock);
252} 252}
253 253
254/* 254/*