aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/cifs_debug.c
diff options
context:
space:
mode:
authorSuresh Jayaraman <sjayaraman@suse.de>2010-10-18 13:59:37 -0400
committerSteve French <sfrench@us.ibm.com>2010-10-21 09:14:27 -0400
commit3f9bcca7820a6711307b6499952b13cfcfc31dd6 (patch)
tree6c380f5877562778335d6794e1e4a297f8970d77 /fs/cifs/cifs_debug.c
parent3e24e132878c83910b61eb7704511a6d96a0389f (diff)
cifs: convert cifs_tcp_ses_lock from a rwlock to a spinlock
cifs_tcp_ses_lock is a rwlock with protects the cifs_tcp_ses_list, server->smb_ses_list and the ses->tcon_list. It also protects a few ref counters in server, ses and tcon. In most cases the critical section doesn't seem to be large, in a few cases where it is slightly large, there seem to be really no benefit from concurrent access. I briefly considered RCU mechanism but it appears to me that there is no real need. Replace it with a spinlock and get rid of the last rwlock in the cifs code. Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/cifs_debug.c')
-rw-r--r--fs/cifs/cifs_debug.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index eb1ba493489f..103ab8b605b0 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -148,7 +148,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
148 seq_printf(m, "Servers:"); 148 seq_printf(m, "Servers:");
149 149
150 i = 0; 150 i = 0;
151 read_lock(&cifs_tcp_ses_lock); 151 spin_lock(&cifs_tcp_ses_lock);
152 list_for_each(tmp1, &cifs_tcp_ses_list) { 152 list_for_each(tmp1, &cifs_tcp_ses_list) {
153 server = list_entry(tmp1, struct TCP_Server_Info, 153 server = list_entry(tmp1, struct TCP_Server_Info,
154 tcp_ses_list); 154 tcp_ses_list);
@@ -230,7 +230,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
230 spin_unlock(&GlobalMid_Lock); 230 spin_unlock(&GlobalMid_Lock);
231 } 231 }
232 } 232 }
233 read_unlock(&cifs_tcp_ses_lock); 233 spin_unlock(&cifs_tcp_ses_lock);
234 seq_putc(m, '\n'); 234 seq_putc(m, '\n');
235 235
236 /* BB add code to dump additional info such as TCP session info now */ 236 /* BB add code to dump additional info such as TCP session info now */
@@ -270,7 +270,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
270 atomic_set(&totBufAllocCount, 0); 270 atomic_set(&totBufAllocCount, 0);
271 atomic_set(&totSmBufAllocCount, 0); 271 atomic_set(&totSmBufAllocCount, 0);
272#endif /* CONFIG_CIFS_STATS2 */ 272#endif /* CONFIG_CIFS_STATS2 */
273 read_lock(&cifs_tcp_ses_lock); 273 spin_lock(&cifs_tcp_ses_lock);
274 list_for_each(tmp1, &cifs_tcp_ses_list) { 274 list_for_each(tmp1, &cifs_tcp_ses_list) {
275 server = list_entry(tmp1, struct TCP_Server_Info, 275 server = list_entry(tmp1, struct TCP_Server_Info,
276 tcp_ses_list); 276 tcp_ses_list);
@@ -303,7 +303,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
303 } 303 }
304 } 304 }
305 } 305 }
306 read_unlock(&cifs_tcp_ses_lock); 306 spin_unlock(&cifs_tcp_ses_lock);
307 } 307 }
308 308
309 return count; 309 return count;
@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
343 GlobalCurrentXid, GlobalMaxActiveXid); 343 GlobalCurrentXid, GlobalMaxActiveXid);
344 344
345 i = 0; 345 i = 0;
346 read_lock(&cifs_tcp_ses_lock); 346 spin_lock(&cifs_tcp_ses_lock);
347 list_for_each(tmp1, &cifs_tcp_ses_list) { 347 list_for_each(tmp1, &cifs_tcp_ses_list) {
348 server = list_entry(tmp1, struct TCP_Server_Info, 348 server = list_entry(tmp1, struct TCP_Server_Info,
349 tcp_ses_list); 349 tcp_ses_list);
@@ -397,7 +397,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
397 } 397 }
398 } 398 }
399 } 399 }
400 read_unlock(&cifs_tcp_ses_lock); 400 spin_unlock(&cifs_tcp_ses_lock);
401 401
402 seq_putc(m, '\n'); 402 seq_putc(m, '\n');
403 return 0; 403 return 0;