diff options
author | Steve French <sfrench@us.ibm.com> | 2007-07-06 19:13:06 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2007-07-06 19:13:06 -0400 |
commit | 6dc0f87e351142e224b396f29b59527c4b2d834c (patch) | |
tree | b504c9201a9399c9f4007ce9c8c2c78e2b4e233e /fs/cifs/cifsfs.c | |
parent | 79a58d1f6075bc0029c38836be9790917a69a342 (diff) |
[CIFS] whitespace cleanup
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r-- | fs/cifs/cifsfs.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 7c04752b76cb..9122ef5c3174 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -64,10 +64,10 @@ unsigned int multiuser_mount = 0; | |||
64 | unsigned int extended_security = CIFSSEC_DEF; | 64 | unsigned int extended_security = CIFSSEC_DEF; |
65 | /* unsigned int ntlmv2_support = 0; */ | 65 | /* unsigned int ntlmv2_support = 0; */ |
66 | unsigned int sign_CIFS_PDUs = 1; | 66 | unsigned int sign_CIFS_PDUs = 1; |
67 | extern struct task_struct * oplockThread; /* remove sparse warning */ | 67 | extern struct task_struct *oplockThread; /* remove sparse warning */ |
68 | struct task_struct * oplockThread = NULL; | 68 | struct task_struct *oplockThread = NULL; |
69 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ | 69 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ |
70 | static struct task_struct * dnotifyThread = NULL; | 70 | static struct task_struct *dnotifyThread = NULL; |
71 | static const struct super_operations cifs_super_ops; | 71 | static const struct super_operations cifs_super_ops; |
72 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | 72 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; |
73 | module_param(CIFSMaxBufSize, int, 0); | 73 | module_param(CIFSMaxBufSize, int, 0); |
@@ -765,22 +765,22 @@ cifs_init_request_bufs(void) | |||
765 | been more efficient (compacting multiple slab items on one 4k page) | 765 | been more efficient (compacting multiple slab items on one 4k page) |
766 | for the case in which debug was on, but this larger size allows | 766 | for the case in which debug was on, but this larger size allows |
767 | more SMBs to use small buffer alloc and is still much more | 767 | more SMBs to use small buffer alloc and is still much more |
768 | efficient to alloc 1 per page off the slab compared to 17K (5page) | 768 | efficient to alloc 1 per page off the slab compared to 17K (5page) |
769 | alloc of large cifs buffers even when page debugging is on */ | 769 | alloc of large cifs buffers even when page debugging is on */ |
770 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", | 770 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", |
771 | MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, | 771 | MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, |
772 | NULL, NULL); | 772 | NULL, NULL); |
773 | if (cifs_sm_req_cachep == NULL) { | 773 | if (cifs_sm_req_cachep == NULL) { |
774 | mempool_destroy(cifs_req_poolp); | 774 | mempool_destroy(cifs_req_poolp); |
775 | kmem_cache_destroy(cifs_req_cachep); | 775 | kmem_cache_destroy(cifs_req_cachep); |
776 | return -ENOMEM; | 776 | return -ENOMEM; |
777 | } | 777 | } |
778 | 778 | ||
779 | if (cifs_min_small < 2) | 779 | if (cifs_min_small < 2) |
780 | cifs_min_small = 2; | 780 | cifs_min_small = 2; |
781 | else if (cifs_min_small > 256) { | 781 | else if (cifs_min_small > 256) { |
782 | cifs_min_small = 256; | 782 | cifs_min_small = 256; |
783 | cFYI(1,("cifs_min_small set to maximum (256)")); | 783 | cFYI(1, ("cifs_min_small set to maximum (256)")); |
784 | } | 784 | } |
785 | 785 | ||
786 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, | 786 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, |
@@ -843,14 +843,14 @@ cifs_destroy_mids(void) | |||
843 | 843 | ||
844 | static int cifs_oplock_thread(void * dummyarg) | 844 | static int cifs_oplock_thread(void * dummyarg) |
845 | { | 845 | { |
846 | struct oplock_q_entry * oplock_item; | 846 | struct oplock_q_entry *oplock_item; |
847 | struct cifsTconInfo *pTcon; | 847 | struct cifsTconInfo *pTcon; |
848 | struct inode * inode; | 848 | struct inode *inode; |
849 | __u16 netfid; | 849 | __u16 netfid; |
850 | int rc; | 850 | int rc; |
851 | 851 | ||
852 | do { | 852 | do { |
853 | if (try_to_freeze()) | 853 | if (try_to_freeze()) |
854 | continue; | 854 | continue; |
855 | 855 | ||
856 | spin_lock(&GlobalMid_Lock); | 856 | spin_lock(&GlobalMid_Lock); |
@@ -859,17 +859,17 @@ static int cifs_oplock_thread(void * dummyarg) | |||
859 | set_current_state(TASK_INTERRUPTIBLE); | 859 | set_current_state(TASK_INTERRUPTIBLE); |
860 | schedule_timeout(39*HZ); | 860 | schedule_timeout(39*HZ); |
861 | } else { | 861 | } else { |
862 | oplock_item = list_entry(GlobalOplock_Q.next, | 862 | oplock_item = list_entry(GlobalOplock_Q.next, |
863 | struct oplock_q_entry, qhead); | 863 | struct oplock_q_entry, qhead); |
864 | if (oplock_item) { | 864 | if (oplock_item) { |
865 | cFYI(1,("found oplock item to write out")); | 865 | cFYI(1,("found oplock item to write out")); |
866 | pTcon = oplock_item->tcon; | 866 | pTcon = oplock_item->tcon; |
867 | inode = oplock_item->pinode; | 867 | inode = oplock_item->pinode; |
868 | netfid = oplock_item->netfid; | 868 | netfid = oplock_item->netfid; |
869 | spin_unlock(&GlobalMid_Lock); | 869 | spin_unlock(&GlobalMid_Lock); |
870 | DeleteOplockQEntry(oplock_item); | 870 | DeleteOplockQEntry(oplock_item); |
871 | /* can not grab inode sem here since it would | 871 | /* can not grab inode sem here since it would |
872 | deadlock when oplock received on delete | 872 | deadlock when oplock received on delete |
873 | since vfs_unlink holds the i_mutex across | 873 | since vfs_unlink holds the i_mutex across |
874 | the call */ | 874 | the call */ |
875 | /* mutex_lock(&inode->i_mutex);*/ | 875 | /* mutex_lock(&inode->i_mutex);*/ |
@@ -884,20 +884,21 @@ static int cifs_oplock_thread(void * dummyarg) | |||
884 | /* mutex_unlock(&inode->i_mutex);*/ | 884 | /* mutex_unlock(&inode->i_mutex);*/ |
885 | if (rc) | 885 | if (rc) |
886 | CIFS_I(inode)->write_behind_rc = rc; | 886 | CIFS_I(inode)->write_behind_rc = rc; |
887 | cFYI(1,("Oplock flush inode %p rc %d",inode,rc)); | 887 | cFYI(1, ("Oplock flush inode %p rc %d", |
888 | 888 | inode, rc)); | |
889 | /* releasing a stale oplock after recent reconnection | 889 | |
890 | of smb session using a now incorrect file | 890 | /* releasing stale oplock after recent reconnect |
891 | handle is not a data integrity issue but do | 891 | of smb session using a now incorrect file |
892 | not bother sending an oplock release if session | 892 | handle is not a data integrity issue but do |
893 | to server still is disconnected since oplock | 893 | not bother sending an oplock release if session |
894 | to server still is disconnected since oplock | ||
894 | already released by the server in that case */ | 895 | already released by the server in that case */ |
895 | if (pTcon->tidStatus != CifsNeedReconnect) { | 896 | if (pTcon->tidStatus != CifsNeedReconnect) { |
896 | rc = CIFSSMBLock(0, pTcon, netfid, | 897 | rc = CIFSSMBLock(0, pTcon, netfid, |
897 | 0 /* len */ , 0 /* offset */, 0, | 898 | 0 /* len */ , 0 /* offset */, 0, |
898 | 0, LOCKING_ANDX_OPLOCK_RELEASE, | 899 | 0, LOCKING_ANDX_OPLOCK_RELEASE, |
899 | 0 /* wait flag */); | 900 | 0 /* wait flag */); |
900 | cFYI(1,("Oplock release rc = %d ",rc)); | 901 | cFYI(1,("Oplock release rc = %d ", rc)); |
901 | } | 902 | } |
902 | } else | 903 | } else |
903 | spin_unlock(&GlobalMid_Lock); | 904 | spin_unlock(&GlobalMid_Lock); |
@@ -924,9 +925,9 @@ static int cifs_dnotify_thread(void * dummyarg) | |||
924 | to be woken up and wakeq so the | 925 | to be woken up and wakeq so the |
925 | thread can wake up and error out */ | 926 | thread can wake up and error out */ |
926 | list_for_each(tmp, &GlobalSMBSessionList) { | 927 | list_for_each(tmp, &GlobalSMBSessionList) { |
927 | ses = list_entry(tmp, struct cifsSesInfo, | 928 | ses = list_entry(tmp, struct cifsSesInfo, |
928 | cifsSessionList); | 929 | cifsSessionList); |
929 | if (ses && ses->server && | 930 | if (ses && ses->server && |
930 | atomic_read(&ses->server->inFlight)) | 931 | atomic_read(&ses->server->inFlight)) |
931 | wake_up_all(&ses->server->response_q); | 932 | wake_up_all(&ses->server->response_q); |
932 | } | 933 | } |
@@ -950,13 +951,13 @@ init_cifs(void) | |||
950 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 951 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
951 | INIT_LIST_HEAD(&GlobalDnotifyReqList); | 952 | INIT_LIST_HEAD(&GlobalDnotifyReqList); |
952 | INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); | 953 | INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); |
953 | #endif | 954 | #endif |
954 | /* | 955 | /* |
955 | * Initialize Global counters | 956 | * Initialize Global counters |
956 | */ | 957 | */ |
957 | atomic_set(&sesInfoAllocCount, 0); | 958 | atomic_set(&sesInfoAllocCount, 0); |
958 | atomic_set(&tconInfoAllocCount, 0); | 959 | atomic_set(&tconInfoAllocCount, 0); |
959 | atomic_set(&tcpSesAllocCount,0); | 960 | atomic_set(&tcpSesAllocCount, 0); |
960 | atomic_set(&tcpSesReconnectCount, 0); | 961 | atomic_set(&tcpSesReconnectCount, 0); |
961 | atomic_set(&tconInfoReconnectCount, 0); | 962 | atomic_set(&tconInfoReconnectCount, 0); |
962 | 963 | ||
@@ -977,10 +978,10 @@ init_cifs(void) | |||
977 | 978 | ||
978 | if (cifs_max_pending < 2) { | 979 | if (cifs_max_pending < 2) { |
979 | cifs_max_pending = 2; | 980 | cifs_max_pending = 2; |
980 | cFYI(1,("cifs_max_pending set to min of 2")); | 981 | cFYI(1, ("cifs_max_pending set to min of 2")); |
981 | } else if (cifs_max_pending > 256) { | 982 | } else if (cifs_max_pending > 256) { |
982 | cifs_max_pending = 256; | 983 | cifs_max_pending = 256; |
983 | cFYI(1,("cifs_max_pending set to max of 256")); | 984 | cFYI(1, ("cifs_max_pending set to max of 256")); |
984 | } | 985 | } |
985 | 986 | ||
986 | rc = cifs_init_inodecache(); | 987 | rc = cifs_init_inodecache(); |
@@ -1002,14 +1003,14 @@ init_cifs(void) | |||
1002 | oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); | 1003 | oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); |
1003 | if (IS_ERR(oplockThread)) { | 1004 | if (IS_ERR(oplockThread)) { |
1004 | rc = PTR_ERR(oplockThread); | 1005 | rc = PTR_ERR(oplockThread); |
1005 | cERROR(1,("error %d create oplock thread", rc)); | 1006 | cERROR(1, ("error %d create oplock thread", rc)); |
1006 | goto out_unregister_filesystem; | 1007 | goto out_unregister_filesystem; |
1007 | } | 1008 | } |
1008 | 1009 | ||
1009 | dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd"); | 1010 | dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd"); |
1010 | if (IS_ERR(dnotifyThread)) { | 1011 | if (IS_ERR(dnotifyThread)) { |
1011 | rc = PTR_ERR(dnotifyThread); | 1012 | rc = PTR_ERR(dnotifyThread); |
1012 | cERROR(1,("error %d create dnotify thread", rc)); | 1013 | cERROR(1, ("error %d create dnotify thread", rc)); |
1013 | goto out_stop_oplock_thread; | 1014 | goto out_stop_oplock_thread; |
1014 | } | 1015 | } |
1015 | 1016 | ||
@@ -1048,7 +1049,7 @@ exit_cifs(void) | |||
1048 | } | 1049 | } |
1049 | 1050 | ||
1050 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); | 1051 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); |
1051 | MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ | 1052 | MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ |
1052 | MODULE_DESCRIPTION | 1053 | MODULE_DESCRIPTION |
1053 | ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows"); | 1054 | ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows"); |
1054 | MODULE_VERSION(CIFS_VERSION); | 1055 | MODULE_VERSION(CIFS_VERSION); |