aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuresh Jayaraman <sjayaraman@suse.de>2010-10-18 13:59:37 -0400
committerSteve French <sfrench@us.ibm.com>2010-10-21 09:14:27 -0400
commit3f9bcca7820a6711307b6499952b13cfcfc31dd6 (patch)
tree6c380f5877562778335d6794e1e4a297f8970d77
parent3e24e132878c83910b61eb7704511a6d96a0389f (diff)
cifs: convert cifs_tcp_ses_lock from a rwlock to a spinlock
cifs_tcp_ses_lock is a rwlock with protects the cifs_tcp_ses_list, server->smb_ses_list and the ses->tcon_list. It also protects a few ref counters in server, ses and tcon. In most cases the critical section doesn't seem to be large, in a few cases where it is slightly large, there seem to be really no benefit from concurrent access. I briefly considered RCU mechanism but it appears to me that there is no real need. Replace it with a spinlock and get rid of the last rwlock in the cifs code. Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
-rw-r--r--fs/cifs/cifs_debug.c12
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c70
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/sess.c4
7 files changed, 58 insertions, 58 deletions
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index eb1ba493489f..103ab8b605b0 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -148,7 +148,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
148 seq_printf(m, "Servers:"); 148 seq_printf(m, "Servers:");
149 149
150 i = 0; 150 i = 0;
151 read_lock(&cifs_tcp_ses_lock); 151 spin_lock(&cifs_tcp_ses_lock);
152 list_for_each(tmp1, &cifs_tcp_ses_list) { 152 list_for_each(tmp1, &cifs_tcp_ses_list) {
153 server = list_entry(tmp1, struct TCP_Server_Info, 153 server = list_entry(tmp1, struct TCP_Server_Info,
154 tcp_ses_list); 154 tcp_ses_list);
@@ -230,7 +230,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
230 spin_unlock(&GlobalMid_Lock); 230 spin_unlock(&GlobalMid_Lock);
231 } 231 }
232 } 232 }
233 read_unlock(&cifs_tcp_ses_lock); 233 spin_unlock(&cifs_tcp_ses_lock);
234 seq_putc(m, '\n'); 234 seq_putc(m, '\n');
235 235
236 /* BB add code to dump additional info such as TCP session info now */ 236 /* BB add code to dump additional info such as TCP session info now */
@@ -270,7 +270,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
270 atomic_set(&totBufAllocCount, 0); 270 atomic_set(&totBufAllocCount, 0);
271 atomic_set(&totSmBufAllocCount, 0); 271 atomic_set(&totSmBufAllocCount, 0);
272#endif /* CONFIG_CIFS_STATS2 */ 272#endif /* CONFIG_CIFS_STATS2 */
273 read_lock(&cifs_tcp_ses_lock); 273 spin_lock(&cifs_tcp_ses_lock);
274 list_for_each(tmp1, &cifs_tcp_ses_list) { 274 list_for_each(tmp1, &cifs_tcp_ses_list) {
275 server = list_entry(tmp1, struct TCP_Server_Info, 275 server = list_entry(tmp1, struct TCP_Server_Info,
276 tcp_ses_list); 276 tcp_ses_list);
@@ -303,7 +303,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
303 } 303 }
304 } 304 }
305 } 305 }
306 read_unlock(&cifs_tcp_ses_lock); 306 spin_unlock(&cifs_tcp_ses_lock);
307 } 307 }
308 308
309 return count; 309 return count;
@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
343 GlobalCurrentXid, GlobalMaxActiveXid); 343 GlobalCurrentXid, GlobalMaxActiveXid);
344 344
345 i = 0; 345 i = 0;
346 read_lock(&cifs_tcp_ses_lock); 346 spin_lock(&cifs_tcp_ses_lock);
347 list_for_each(tmp1, &cifs_tcp_ses_list) { 347 list_for_each(tmp1, &cifs_tcp_ses_list) {
348 server = list_entry(tmp1, struct TCP_Server_Info, 348 server = list_entry(tmp1, struct TCP_Server_Info,
349 tcp_ses_list); 349 tcp_ses_list);
@@ -397,7 +397,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
397 } 397 }
398 } 398 }
399 } 399 }
400 read_unlock(&cifs_tcp_ses_lock); 400 spin_unlock(&cifs_tcp_ses_lock);
401 401
402 seq_putc(m, '\n'); 402 seq_putc(m, '\n');
403 return 0; 403 return 0;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f1d9c71e807f..cb77915a445b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -482,16 +482,16 @@ static void cifs_umount_begin(struct super_block *sb)
482 482
483 tcon = cifs_sb_master_tcon(cifs_sb); 483 tcon = cifs_sb_master_tcon(cifs_sb);
484 484
485 read_lock(&cifs_tcp_ses_lock); 485 spin_lock(&cifs_tcp_ses_lock);
486 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { 486 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
487 /* we have other mounts to same share or we have 487 /* we have other mounts to same share or we have
488 already tried to force umount this and woken up 488 already tried to force umount this and woken up
489 all waiting network requests, nothing to do */ 489 all waiting network requests, nothing to do */
490 read_unlock(&cifs_tcp_ses_lock); 490 spin_unlock(&cifs_tcp_ses_lock);
491 return; 491 return;
492 } else if (tcon->tc_count == 1) 492 } else if (tcon->tc_count == 1)
493 tcon->tidStatus = CifsExiting; 493 tcon->tidStatus = CifsExiting;
494 read_unlock(&cifs_tcp_ses_lock); 494 spin_unlock(&cifs_tcp_ses_lock);
495 495
496 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 496 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
497 /* cancel_notify_requests(tcon); */ 497 /* cancel_notify_requests(tcon); */
@@ -940,7 +940,7 @@ init_cifs(void)
940 GlobalTotalActiveXid = 0; 940 GlobalTotalActiveXid = 0;
941 GlobalMaxActiveXid = 0; 941 GlobalMaxActiveXid = 0;
942 memset(Local_System_Name, 0, 15); 942 memset(Local_System_Name, 0, 15);
943 rwlock_init(&cifs_tcp_ses_lock); 943 spin_lock_init(&cifs_tcp_ses_lock);
944 spin_lock_init(&cifs_file_list_lock); 944 spin_lock_init(&cifs_file_list_lock);
945 spin_lock_init(&GlobalMid_Lock); 945 spin_lock_init(&GlobalMid_Lock);
946 946
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 18ee0adda306..28337cba0295 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -703,7 +703,7 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
703 * the reference counters for the server, smb session, and tcon. Finally, 703 * the reference counters for the server, smb session, and tcon. Finally,
704 * changes to the tcon->tidStatus should be done while holding this lock. 704 * changes to the tcon->tidStatus should be done while holding this lock.
705 */ 705 */
706GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; 706GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
707 707
708/* 708/*
709 * This lock protects the cifs_file->llist and cifs_file->flist 709 * This lock protects the cifs_file->llist and cifs_file->flist
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index bfb59a68e4fd..e98f1f317b15 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -593,9 +593,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
593 rc = -EIO; 593 rc = -EIO;
594 goto neg_err_exit; 594 goto neg_err_exit;
595 } 595 }
596 read_lock(&cifs_tcp_ses_lock); 596 spin_lock(&cifs_tcp_ses_lock);
597 if (server->srv_count > 1) { 597 if (server->srv_count > 1) {
598 read_unlock(&cifs_tcp_ses_lock); 598 spin_unlock(&cifs_tcp_ses_lock);
599 if (memcmp(server->server_GUID, 599 if (memcmp(server->server_GUID,
600 pSMBr->u.extended_response. 600 pSMBr->u.extended_response.
601 GUID, 16) != 0) { 601 GUID, 16) != 0) {
@@ -605,7 +605,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
605 16); 605 16);
606 } 606 }
607 } else { 607 } else {
608 read_unlock(&cifs_tcp_ses_lock); 608 spin_unlock(&cifs_tcp_ses_lock);
609 memcpy(server->server_GUID, 609 memcpy(server->server_GUID,
610 pSMBr->u.extended_response.GUID, 16); 610 pSMBr->u.extended_response.GUID, 16);
611 } 611 }
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 019f00380d12..7e73176acb58 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -150,7 +150,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
150 150
151 /* before reconnecting the tcp session, mark the smb session (uid) 151 /* before reconnecting the tcp session, mark the smb session (uid)
152 and the tid bad so they are not used until reconnected */ 152 and the tid bad so they are not used until reconnected */
153 read_lock(&cifs_tcp_ses_lock); 153 spin_lock(&cifs_tcp_ses_lock);
154 list_for_each(tmp, &server->smb_ses_list) { 154 list_for_each(tmp, &server->smb_ses_list) {
155 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 155 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
156 ses->need_reconnect = true; 156 ses->need_reconnect = true;
@@ -160,7 +160,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
160 tcon->need_reconnect = true; 160 tcon->need_reconnect = true;
161 } 161 }
162 } 162 }
163 read_unlock(&cifs_tcp_ses_lock); 163 spin_unlock(&cifs_tcp_ses_lock);
164 /* do not want to be sending data on a socket we are freeing */ 164 /* do not want to be sending data on a socket we are freeing */
165 mutex_lock(&server->srv_mutex); 165 mutex_lock(&server->srv_mutex);
166 if (server->ssocket) { 166 if (server->ssocket) {
@@ -637,9 +637,9 @@ multi_t2_fnd:
637 } /* end while !EXITING */ 637 } /* end while !EXITING */
638 638
639 /* take it off the list, if it's not already */ 639 /* take it off the list, if it's not already */
640 write_lock(&cifs_tcp_ses_lock); 640 spin_lock(&cifs_tcp_ses_lock);
641 list_del_init(&server->tcp_ses_list); 641 list_del_init(&server->tcp_ses_list);
642 write_unlock(&cifs_tcp_ses_lock); 642 spin_unlock(&cifs_tcp_ses_lock);
643 643
644 spin_lock(&GlobalMid_Lock); 644 spin_lock(&GlobalMid_Lock);
645 server->tcpStatus = CifsExiting; 645 server->tcpStatus = CifsExiting;
@@ -677,7 +677,7 @@ multi_t2_fnd:
677 * BB: we shouldn't have to do any of this. It shouldn't be 677 * BB: we shouldn't have to do any of this. It shouldn't be
678 * possible to exit from the thread with active SMB sessions 678 * possible to exit from the thread with active SMB sessions
679 */ 679 */
680 read_lock(&cifs_tcp_ses_lock); 680 spin_lock(&cifs_tcp_ses_lock);
681 if (list_empty(&server->pending_mid_q)) { 681 if (list_empty(&server->pending_mid_q)) {
682 /* loop through server session structures attached to this and 682 /* loop through server session structures attached to this and
683 mark them dead */ 683 mark them dead */
@@ -687,7 +687,7 @@ multi_t2_fnd:
687 ses->status = CifsExiting; 687 ses->status = CifsExiting;
688 ses->server = NULL; 688 ses->server = NULL;
689 } 689 }
690 read_unlock(&cifs_tcp_ses_lock); 690 spin_unlock(&cifs_tcp_ses_lock);
691 } else { 691 } else {
692 /* although we can not zero the server struct pointer yet, 692 /* although we can not zero the server struct pointer yet,
693 since there are active requests which may depnd on them, 693 since there are active requests which may depnd on them,
@@ -710,7 +710,7 @@ multi_t2_fnd:
710 } 710 }
711 } 711 }
712 spin_unlock(&GlobalMid_Lock); 712 spin_unlock(&GlobalMid_Lock);
713 read_unlock(&cifs_tcp_ses_lock); 713 spin_unlock(&cifs_tcp_ses_lock);
714 /* 1/8th of sec is more than enough time for them to exit */ 714 /* 1/8th of sec is more than enough time for them to exit */
715 msleep(125); 715 msleep(125);
716 } 716 }
@@ -733,12 +733,12 @@ multi_t2_fnd:
733 if a crazy root user tried to kill cifsd 733 if a crazy root user tried to kill cifsd
734 kernel thread explicitly this might happen) */ 734 kernel thread explicitly this might happen) */
735 /* BB: This shouldn't be necessary, see above */ 735 /* BB: This shouldn't be necessary, see above */
736 read_lock(&cifs_tcp_ses_lock); 736 spin_lock(&cifs_tcp_ses_lock);
737 list_for_each(tmp, &server->smb_ses_list) { 737 list_for_each(tmp, &server->smb_ses_list) {
738 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 738 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
739 ses->server = NULL; 739 ses->server = NULL;
740 } 740 }
741 read_unlock(&cifs_tcp_ses_lock); 741 spin_unlock(&cifs_tcp_ses_lock);
742 742
743 kfree(server->hostname); 743 kfree(server->hostname);
744 task_to_wake = xchg(&server->tsk, NULL); 744 task_to_wake = xchg(&server->tsk, NULL);
@@ -1524,7 +1524,7 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
1524{ 1524{
1525 struct TCP_Server_Info *server; 1525 struct TCP_Server_Info *server;
1526 1526
1527 write_lock(&cifs_tcp_ses_lock); 1527 spin_lock(&cifs_tcp_ses_lock);
1528 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1528 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1529 if (!match_address(server, addr, 1529 if (!match_address(server, addr,
1530 (struct sockaddr *)&vol->srcaddr)) 1530 (struct sockaddr *)&vol->srcaddr))
@@ -1534,11 +1534,11 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
1534 continue; 1534 continue;
1535 1535
1536 ++server->srv_count; 1536 ++server->srv_count;
1537 write_unlock(&cifs_tcp_ses_lock); 1537 spin_unlock(&cifs_tcp_ses_lock);
1538 cFYI(1, "Existing tcp session with server found"); 1538 cFYI(1, "Existing tcp session with server found");
1539 return server; 1539 return server;
1540 } 1540 }
1541 write_unlock(&cifs_tcp_ses_lock); 1541 spin_unlock(&cifs_tcp_ses_lock);
1542 return NULL; 1542 return NULL;
1543} 1543}
1544 1544
@@ -1547,14 +1547,14 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
1547{ 1547{
1548 struct task_struct *task; 1548 struct task_struct *task;
1549 1549
1550 write_lock(&cifs_tcp_ses_lock); 1550 spin_lock(&cifs_tcp_ses_lock);
1551 if (--server->srv_count > 0) { 1551 if (--server->srv_count > 0) {
1552 write_unlock(&cifs_tcp_ses_lock); 1552 spin_unlock(&cifs_tcp_ses_lock);
1553 return; 1553 return;
1554 } 1554 }
1555 1555
1556 list_del_init(&server->tcp_ses_list); 1556 list_del_init(&server->tcp_ses_list);
1557 write_unlock(&cifs_tcp_ses_lock); 1557 spin_unlock(&cifs_tcp_ses_lock);
1558 1558
1559 spin_lock(&GlobalMid_Lock); 1559 spin_lock(&GlobalMid_Lock);
1560 server->tcpStatus = CifsExiting; 1560 server->tcpStatus = CifsExiting;
@@ -1679,9 +1679,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1679 } 1679 }
1680 1680
1681 /* thread spawned, put it on the list */ 1681 /* thread spawned, put it on the list */
1682 write_lock(&cifs_tcp_ses_lock); 1682 spin_lock(&cifs_tcp_ses_lock);
1683 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); 1683 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1684 write_unlock(&cifs_tcp_ses_lock); 1684 spin_unlock(&cifs_tcp_ses_lock);
1685 1685
1686 cifs_fscache_get_client_cookie(tcp_ses); 1686 cifs_fscache_get_client_cookie(tcp_ses);
1687 1687
@@ -1703,7 +1703,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1703{ 1703{
1704 struct cifsSesInfo *ses; 1704 struct cifsSesInfo *ses;
1705 1705
1706 write_lock(&cifs_tcp_ses_lock); 1706 spin_lock(&cifs_tcp_ses_lock);
1707 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1707 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1708 switch (server->secType) { 1708 switch (server->secType) {
1709 case Kerberos: 1709 case Kerberos:
@@ -1723,10 +1723,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1723 continue; 1723 continue;
1724 } 1724 }
1725 ++ses->ses_count; 1725 ++ses->ses_count;
1726 write_unlock(&cifs_tcp_ses_lock); 1726 spin_unlock(&cifs_tcp_ses_lock);
1727 return ses; 1727 return ses;
1728 } 1728 }
1729 write_unlock(&cifs_tcp_ses_lock); 1729 spin_unlock(&cifs_tcp_ses_lock);
1730 return NULL; 1730 return NULL;
1731} 1731}
1732 1732
@@ -1737,14 +1737,14 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1737 struct TCP_Server_Info *server = ses->server; 1737 struct TCP_Server_Info *server = ses->server;
1738 1738
1739 cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count); 1739 cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
1740 write_lock(&cifs_tcp_ses_lock); 1740 spin_lock(&cifs_tcp_ses_lock);
1741 if (--ses->ses_count > 0) { 1741 if (--ses->ses_count > 0) {
1742 write_unlock(&cifs_tcp_ses_lock); 1742 spin_unlock(&cifs_tcp_ses_lock);
1743 return; 1743 return;
1744 } 1744 }
1745 1745
1746 list_del_init(&ses->smb_ses_list); 1746 list_del_init(&ses->smb_ses_list);
1747 write_unlock(&cifs_tcp_ses_lock); 1747 spin_unlock(&cifs_tcp_ses_lock);
1748 1748
1749 if (ses->status == CifsGood) { 1749 if (ses->status == CifsGood) {
1750 xid = GetXid(); 1750 xid = GetXid();
@@ -1841,9 +1841,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1841 goto get_ses_fail; 1841 goto get_ses_fail;
1842 1842
1843 /* success, put it on the list */ 1843 /* success, put it on the list */
1844 write_lock(&cifs_tcp_ses_lock); 1844 spin_lock(&cifs_tcp_ses_lock);
1845 list_add(&ses->smb_ses_list, &server->smb_ses_list); 1845 list_add(&ses->smb_ses_list, &server->smb_ses_list);
1846 write_unlock(&cifs_tcp_ses_lock); 1846 spin_unlock(&cifs_tcp_ses_lock);
1847 1847
1848 FreeXid(xid); 1848 FreeXid(xid);
1849 return ses; 1849 return ses;
@@ -1860,7 +1860,7 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
1860 struct list_head *tmp; 1860 struct list_head *tmp;
1861 struct cifsTconInfo *tcon; 1861 struct cifsTconInfo *tcon;
1862 1862
1863 write_lock(&cifs_tcp_ses_lock); 1863 spin_lock(&cifs_tcp_ses_lock);
1864 list_for_each(tmp, &ses->tcon_list) { 1864 list_for_each(tmp, &ses->tcon_list) {
1865 tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); 1865 tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
1866 if (tcon->tidStatus == CifsExiting) 1866 if (tcon->tidStatus == CifsExiting)
@@ -1869,10 +1869,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
1869 continue; 1869 continue;
1870 1870
1871 ++tcon->tc_count; 1871 ++tcon->tc_count;
1872 write_unlock(&cifs_tcp_ses_lock); 1872 spin_unlock(&cifs_tcp_ses_lock);
1873 return tcon; 1873 return tcon;
1874 } 1874 }
1875 write_unlock(&cifs_tcp_ses_lock); 1875 spin_unlock(&cifs_tcp_ses_lock);
1876 return NULL; 1876 return NULL;
1877} 1877}
1878 1878
@@ -1883,14 +1883,14 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
1883 struct cifsSesInfo *ses = tcon->ses; 1883 struct cifsSesInfo *ses = tcon->ses;
1884 1884
1885 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); 1885 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
1886 write_lock(&cifs_tcp_ses_lock); 1886 spin_lock(&cifs_tcp_ses_lock);
1887 if (--tcon->tc_count > 0) { 1887 if (--tcon->tc_count > 0) {
1888 write_unlock(&cifs_tcp_ses_lock); 1888 spin_unlock(&cifs_tcp_ses_lock);
1889 return; 1889 return;
1890 } 1890 }
1891 1891
1892 list_del_init(&tcon->tcon_list); 1892 list_del_init(&tcon->tcon_list);
1893 write_unlock(&cifs_tcp_ses_lock); 1893 spin_unlock(&cifs_tcp_ses_lock);
1894 1894
1895 xid = GetXid(); 1895 xid = GetXid();
1896 CIFSSMBTDis(xid, tcon); 1896 CIFSSMBTDis(xid, tcon);
@@ -1963,9 +1963,9 @@ cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
1963 tcon->nocase = volume_info->nocase; 1963 tcon->nocase = volume_info->nocase;
1964 tcon->local_lease = volume_info->local_lease; 1964 tcon->local_lease = volume_info->local_lease;
1965 1965
1966 write_lock(&cifs_tcp_ses_lock); 1966 spin_lock(&cifs_tcp_ses_lock);
1967 list_add(&tcon->tcon_list, &ses->tcon_list); 1967 list_add(&tcon->tcon_list, &ses->tcon_list);
1968 write_unlock(&cifs_tcp_ses_lock); 1968 spin_unlock(&cifs_tcp_ses_lock);
1969 1969
1970 cifs_fscache_get_super_cookie(tcon); 1970 cifs_fscache_get_super_cookie(tcon);
1971 1971
@@ -3225,9 +3225,9 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3225 vol_info->secFlg = CIFSSEC_MUST_KRB5; 3225 vol_info->secFlg = CIFSSEC_MUST_KRB5;
3226 3226
3227 /* get a reference for the same TCP session */ 3227 /* get a reference for the same TCP session */
3228 write_lock(&cifs_tcp_ses_lock); 3228 spin_lock(&cifs_tcp_ses_lock);
3229 ++master_tcon->ses->server->srv_count; 3229 ++master_tcon->ses->server->srv_count;
3230 write_unlock(&cifs_tcp_ses_lock); 3230 spin_unlock(&cifs_tcp_ses_lock);
3231 3231
3232 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); 3232 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
3233 if (IS_ERR(ses)) { 3233 if (IS_ERR(ses)) {
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index de6073cccd9c..a7b492c213cd 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -347,7 +347,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
347 if (current_fsuid() != treeCon->ses->linux_uid) { 347 if (current_fsuid() != treeCon->ses->linux_uid) {
348 cFYI(1, "Multiuser mode and UID " 348 cFYI(1, "Multiuser mode and UID "
349 "did not match tcon uid"); 349 "did not match tcon uid");
350 read_lock(&cifs_tcp_ses_lock); 350 spin_lock(&cifs_tcp_ses_lock);
351 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { 351 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
352 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); 352 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
353 if (ses->linux_uid == current_fsuid()) { 353 if (ses->linux_uid == current_fsuid()) {
@@ -361,7 +361,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
361 } 361 }
362 } 362 }
363 } 363 }
364 read_unlock(&cifs_tcp_ses_lock); 364 spin_unlock(&cifs_tcp_ses_lock);
365 } 365 }
366 } 366 }
367 } 367 }
@@ -551,7 +551,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
551 return false; 551 return false;
552 552
553 /* look up tcon based on tid & uid */ 553 /* look up tcon based on tid & uid */
554 read_lock(&cifs_tcp_ses_lock); 554 spin_lock(&cifs_tcp_ses_lock);
555 list_for_each(tmp, &srv->smb_ses_list) { 555 list_for_each(tmp, &srv->smb_ses_list) {
556 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 556 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
557 list_for_each(tmp1, &ses->tcon_list) { 557 list_for_each(tmp1, &ses->tcon_list) {
@@ -573,7 +573,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
573 */ 573 */
574 if (netfile->closePend) { 574 if (netfile->closePend) {
575 spin_unlock(&cifs_file_list_lock); 575 spin_unlock(&cifs_file_list_lock);
576 read_unlock(&cifs_tcp_ses_lock); 576 spin_unlock(&cifs_tcp_ses_lock);
577 return true; 577 return true;
578 } 578 }
579 579
@@ -595,16 +595,16 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
595 netfile->oplock_break_cancelled = false; 595 netfile->oplock_break_cancelled = false;
596 596
597 spin_unlock(&cifs_file_list_lock); 597 spin_unlock(&cifs_file_list_lock);
598 read_unlock(&cifs_tcp_ses_lock); 598 spin_unlock(&cifs_tcp_ses_lock);
599 return true; 599 return true;
600 } 600 }
601 spin_unlock(&cifs_file_list_lock); 601 spin_unlock(&cifs_file_list_lock);
602 read_unlock(&cifs_tcp_ses_lock); 602 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "No matching file for oplock break"); 603 cFYI(1, "No matching file for oplock break");
604 return true; 604 return true;
605 } 605 }
606 } 606 }
607 read_unlock(&cifs_tcp_ses_lock); 607 spin_unlock(&cifs_tcp_ses_lock);
608 cFYI(1, "Can not process oplock break for non-existent connection"); 608 cFYI(1, "Can not process oplock break for non-existent connection");
609 return true; 609 return true;
610} 610}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index e35dc60d3255..2a11efd96592 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -80,7 +80,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
80 if (max_vcs < 2) 80 if (max_vcs < 2)
81 max_vcs = 0xFFFF; 81 max_vcs = 0xFFFF;
82 82
83 write_lock(&cifs_tcp_ses_lock); 83 spin_lock(&cifs_tcp_ses_lock);
84 if ((ses->need_reconnect) && is_first_ses_reconnect(ses)) 84 if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
85 goto get_vc_num_exit; /* vcnum will be zero */ 85 goto get_vc_num_exit; /* vcnum will be zero */
86 for (i = ses->server->srv_count - 1; i < max_vcs; i++) { 86 for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
@@ -112,7 +112,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
112 vcnum = i; 112 vcnum = i;
113 ses->vcnum = vcnum; 113 ses->vcnum = vcnum;
114get_vc_num_exit: 114get_vc_num_exit:
115 write_unlock(&cifs_tcp_ses_lock); 115 spin_unlock(&cifs_tcp_ses_lock);
116 116
117 return cpu_to_le16(vcnum); 117 return cpu_to_le16(vcnum);
118} 118}