aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/cifsfs.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2007-07-12 20:33:32 -0400
committerSteve French <sfrench@us.ibm.com>2007-07-12 20:33:32 -0400
commit50c2f75388727018c3c357454a247072915a9e3f (patch)
treee7c7cd30f1adee51be7af8bda5e937df5c899bc6 /fs/cifs/cifsfs.c
parent7521a3c566dda7bb09576975324fc0a08a79ad14 (diff)
[CIFS] whitespace/formatting fixes
This should be the last big batch of whitespace/formatting fixes. checkpatch warnings for the cifs directory are down about 90% and many of the remaining ones are harder to remove or make the code harder to read. Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r--fs/cifs/cifsfs.c91
1 files changed, 46 insertions, 45 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 40f35f0263ac..8ebd887205bb 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -95,7 +95,7 @@ cifs_read_super(struct super_block *sb, void *data,
95 struct inode *inode; 95 struct inode *inode;
96 struct cifs_sb_info *cifs_sb; 96 struct cifs_sb_info *cifs_sb;
97 int rc = 0; 97 int rc = 0;
98 98
99 /* BB should we make this contingent on mount parm? */ 99 /* BB should we make this contingent on mount parm? */
100 sb->s_flags |= MS_NODIRATIME | MS_NOATIME; 100 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
101 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); 101 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
@@ -135,7 +135,7 @@ cifs_read_super(struct super_block *sb, void *data,
135 rc = -ENOMEM; 135 rc = -ENOMEM;
136 goto out_no_root; 136 goto out_no_root;
137 } 137 }
138 138
139#ifdef CONFIG_CIFS_EXPERIMENTAL 139#ifdef CONFIG_CIFS_EXPERIMENTAL
140 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 140 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
141 cFYI(1, ("export ops supported")); 141 cFYI(1, ("export ops supported"));
@@ -153,7 +153,7 @@ out_no_root:
153out_mount_failed: 153out_mount_failed:
154 if (cifs_sb) { 154 if (cifs_sb) {
155 if (cifs_sb->local_nls) 155 if (cifs_sb->local_nls)
156 unload_nls(cifs_sb->local_nls); 156 unload_nls(cifs_sb->local_nls);
157 kfree(cifs_sb); 157 kfree(cifs_sb);
158 } 158 }
159 return rc; 159 return rc;
@@ -230,7 +230,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
230 longer available? */ 230 longer available? */
231} 231}
232 232
233static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd) 233static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
234{ 234{
235 struct cifs_sb_info *cifs_sb; 235 struct cifs_sb_info *cifs_sb;
236 236
@@ -238,10 +238,10 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
238 238
239 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 239 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
240 return 0; 240 return 0;
241 } else /* file mode might have been restricted at mount time 241 } else /* file mode might have been restricted at mount time
242 on the client (above and beyond ACL on servers) for 242 on the client (above and beyond ACL on servers) for
243 servers which do not support setting and viewing mode bits, 243 servers which do not support setting and viewing mode bits,
244 so allowing client to check permissions is useful */ 244 so allowing client to check permissions is useful */
245 return generic_permission(inode, mask, NULL); 245 return generic_permission(inode, mask, NULL);
246} 246}
247 247
@@ -270,7 +270,7 @@ cifs_alloc_inode(struct super_block *sb)
270 cifs_inode->clientCanCacheRead = FALSE; 270 cifs_inode->clientCanCacheRead = FALSE;
271 cifs_inode->clientCanCacheAll = FALSE; 271 cifs_inode->clientCanCacheAll = FALSE;
272 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 272 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
273 273
274 /* Can not set i_flags here - they get immediately overwritten 274 /* Can not set i_flags here - they get immediately overwritten
275 to zero by the VFS */ 275 to zero by the VFS */
276/* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/ 276/* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
@@ -317,21 +317,21 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
317 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) || 317 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
318 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX)) 318 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
319 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); 319 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
320 seq_printf(s, ",rsize=%d",cifs_sb->rsize); 320 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
321 seq_printf(s, ",wsize=%d",cifs_sb->wsize); 321 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
322 } 322 }
323 return 0; 323 return 0;
324} 324}
325 325
326#ifdef CONFIG_CIFS_QUOTA 326#ifdef CONFIG_CIFS_QUOTA
327int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid, 327int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
328 struct fs_disk_quota * pdquota) 328 struct fs_disk_quota *pdquota)
329{ 329{
330 int xid; 330 int xid;
331 int rc = 0; 331 int rc = 0;
332 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 332 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
333 struct cifsTconInfo *pTcon; 333 struct cifsTconInfo *pTcon;
334 334
335 if (cifs_sb) 335 if (cifs_sb)
336 pTcon = cifs_sb->tcon; 336 pTcon = cifs_sb->tcon;
337 else 337 else
@@ -340,7 +340,7 @@ int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
340 340
341 xid = GetXid(); 341 xid = GetXid();
342 if (pTcon) { 342 if (pTcon) {
343 cFYI(1,("set type: 0x%x id: %d",quota_type,qid)); 343 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
344 } else { 344 } else {
345 return -EIO; 345 return -EIO;
346 } 346 }
@@ -349,8 +349,8 @@ int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
349 return rc; 349 return rc;
350} 350}
351 351
352int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid, 352int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
353 struct fs_disk_quota * pdquota) 353 struct fs_disk_quota *pdquota)
354{ 354{
355 int xid; 355 int xid;
356 int rc = 0; 356 int rc = 0;
@@ -364,7 +364,7 @@ int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
364 364
365 xid = GetXid(); 365 xid = GetXid();
366 if (pTcon) { 366 if (pTcon) {
367 cFYI(1,("set type: 0x%x id: %d",quota_type,qid)); 367 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
368 } else { 368 } else {
369 rc = -EIO; 369 rc = -EIO;
370 } 370 }
@@ -373,9 +373,9 @@ int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
373 return rc; 373 return rc;
374} 374}
375 375
376int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation) 376int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
377{ 377{
378 int xid; 378 int xid;
379 int rc = 0; 379 int rc = 0;
380 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 380 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
381 struct cifsTconInfo *pTcon; 381 struct cifsTconInfo *pTcon;
@@ -387,7 +387,7 @@ int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
387 387
388 xid = GetXid(); 388 xid = GetXid();
389 if (pTcon) { 389 if (pTcon) {
390 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation)); 390 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
391 } else { 391 } else {
392 rc = -EIO; 392 rc = -EIO;
393 } 393 }
@@ -396,7 +396,7 @@ int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
396 return rc; 396 return rc;
397} 397}
398 398
399int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats) 399int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
400{ 400{
401 int xid; 401 int xid;
402 int rc = 0; 402 int rc = 0;
@@ -410,7 +410,7 @@ int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
410 } 410 }
411 xid = GetXid(); 411 xid = GetXid();
412 if (pTcon) { 412 if (pTcon) {
413 cFYI(1,("pqstats %p",qstats)); 413 cFYI(1, ("pqstats %p", qstats));
414 } else { 414 } else {
415 rc = -EIO; 415 rc = -EIO;
416 } 416 }
@@ -427,10 +427,10 @@ static struct quotactl_ops cifs_quotactl_ops = {
427}; 427};
428#endif 428#endif
429 429
430static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) 430static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
431{ 431{
432 struct cifs_sb_info *cifs_sb; 432 struct cifs_sb_info *cifs_sb;
433 struct cifsTconInfo * tcon; 433 struct cifsTconInfo *tcon;
434 434
435 if (!(flags & MNT_FORCE)) 435 if (!(flags & MNT_FORCE))
436 return; 436 return;
@@ -448,9 +448,8 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
448 448
449 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 449 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
450 /* cancel_notify_requests(tcon); */ 450 /* cancel_notify_requests(tcon); */
451 if (tcon->ses && tcon->ses->server) 451 if (tcon->ses && tcon->ses->server) {
452 { 452 cFYI(1, ("wake up tasks now - umount begin not complete"));
453 cFYI(1,("wake up tasks now - umount begin not complete"));
454 wake_up_all(&tcon->ses->server->request_q); 453 wake_up_all(&tcon->ses->server->request_q);
455 wake_up_all(&tcon->ses->server->response_q); 454 wake_up_all(&tcon->ses->server->response_q);
456 msleep(1); /* yield */ 455 msleep(1); /* yield */
@@ -483,10 +482,11 @@ static const struct super_operations cifs_super_ops = {
483 .statfs = cifs_statfs, 482 .statfs = cifs_statfs,
484 .alloc_inode = cifs_alloc_inode, 483 .alloc_inode = cifs_alloc_inode,
485 .destroy_inode = cifs_destroy_inode, 484 .destroy_inode = cifs_destroy_inode,
486/* .drop_inode = generic_delete_inode, 485/* .drop_inode = generic_delete_inode,
487 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions 486 .delete_inode = cifs_delete_inode, */ /* Do not need above two
488 unless later we add lazy close of inodes or unless the kernel forgets to call 487 functions unless later we add lazy close of inodes or unless the
489 us with the same number of releases (closes) as opens */ 488 kernel forgets to call us with the same number of releases (closes)
489 as opens */
490 .show_options = cifs_show_options, 490 .show_options = cifs_show_options,
491 .umount_begin = cifs_umount_begin, 491 .umount_begin = cifs_umount_begin,
492 .remount_fs = cifs_remount, 492 .remount_fs = cifs_remount,
@@ -589,11 +589,11 @@ const struct inode_operations cifs_file_inode_ops = {
589 .getxattr = cifs_getxattr, 589 .getxattr = cifs_getxattr,
590 .listxattr = cifs_listxattr, 590 .listxattr = cifs_listxattr,
591 .removexattr = cifs_removexattr, 591 .removexattr = cifs_removexattr,
592#endif 592#endif
593}; 593};
594 594
595const struct inode_operations cifs_symlink_inode_ops = { 595const struct inode_operations cifs_symlink_inode_ops = {
596 .readlink = generic_readlink, 596 .readlink = generic_readlink,
597 .follow_link = cifs_follow_link, 597 .follow_link = cifs_follow_link,
598 .put_link = cifs_put_link, 598 .put_link = cifs_put_link,
599 .permission = cifs_permission, 599 .permission = cifs_permission,
@@ -605,7 +605,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
605 .getxattr = cifs_getxattr, 605 .getxattr = cifs_getxattr,
606 .listxattr = cifs_listxattr, 606 .listxattr = cifs_listxattr,
607 .removexattr = cifs_removexattr, 607 .removexattr = cifs_removexattr,
608#endif 608#endif
609}; 609};
610 610
611const struct file_operations cifs_file_ops = { 611const struct file_operations cifs_file_ops = {
@@ -631,7 +631,7 @@ const struct file_operations cifs_file_ops = {
631}; 631};
632 632
633const struct file_operations cifs_file_direct_ops = { 633const struct file_operations cifs_file_direct_ops = {
634 /* no mmap, no aio, no readv - 634 /* no mmap, no aio, no readv -
635 BB reevaluate whether they can be done with directio, no cache */ 635 BB reevaluate whether they can be done with directio, no cache */
636 .read = cifs_user_read, 636 .read = cifs_user_read,
637 .write = cifs_user_write, 637 .write = cifs_user_write,
@@ -671,7 +671,7 @@ const struct file_operations cifs_file_nobrl_ops = {
671}; 671};
672 672
673const struct file_operations cifs_file_direct_nobrl_ops = { 673const struct file_operations cifs_file_direct_nobrl_ops = {
674 /* no mmap, no aio, no readv - 674 /* no mmap, no aio, no readv -
675 BB reevaluate whether they can be done with directio, no cache */ 675 BB reevaluate whether they can be done with directio, no cache */
676 .read = cifs_user_read, 676 .read = cifs_user_read,
677 .write = cifs_user_write, 677 .write = cifs_user_write,
@@ -696,11 +696,11 @@ const struct file_operations cifs_dir_ops = {
696#ifdef CONFIG_CIFS_EXPERIMENTAL 696#ifdef CONFIG_CIFS_EXPERIMENTAL
697 .dir_notify = cifs_dir_notify, 697 .dir_notify = cifs_dir_notify,
698#endif /* CONFIG_CIFS_EXPERIMENTAL */ 698#endif /* CONFIG_CIFS_EXPERIMENTAL */
699 .ioctl = cifs_ioctl, 699 .ioctl = cifs_ioctl,
700}; 700};
701 701
702static void 702static void
703cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) 703cifs_init_once(void *inode, struct kmem_cache *cachep, unsigned long flags)
704{ 704{
705 struct cifsInodeInfo *cifsi = inode; 705 struct cifsInodeInfo *cifsi = inode;
706 706
@@ -752,7 +752,7 @@ cifs_init_request_bufs(void)
752 cifs_min_rcv = 1; 752 cifs_min_rcv = 1;
753 else if (cifs_min_rcv > 64) { 753 else if (cifs_min_rcv > 64) {
754 cifs_min_rcv = 64; 754 cifs_min_rcv = 64;
755 cERROR(1,("cifs_min_rcv set to maximum (64)")); 755 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
756 } 756 }
757 757
758 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 758 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
@@ -765,7 +765,7 @@ cifs_init_request_bufs(void)
765 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 765 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
766 almost all handle based requests (but not write response, nor is it 766 almost all handle based requests (but not write response, nor is it
767 sufficient for path based requests). A smaller size would have 767 sufficient for path based requests). A smaller size would have
768 been more efficient (compacting multiple slab items on one 4k page) 768 been more efficient (compacting multiple slab items on one 4k page)
769 for the case in which debug was on, but this larger size allows 769 for the case in which debug was on, but this larger size allows
770 more SMBs to use small buffer alloc and is still much more 770 more SMBs to use small buffer alloc and is still much more
771 efficient to alloc 1 per page off the slab compared to 17K (5page) 771 efficient to alloc 1 per page off the slab compared to 17K (5page)
@@ -844,7 +844,7 @@ cifs_destroy_mids(void)
844 kmem_cache_destroy(cifs_oplock_cachep); 844 kmem_cache_destroy(cifs_oplock_cachep);
845} 845}
846 846
847static int cifs_oplock_thread(void * dummyarg) 847static int cifs_oplock_thread(void *dummyarg)
848{ 848{
849 struct oplock_q_entry *oplock_item; 849 struct oplock_q_entry *oplock_item;
850 struct cifsTconInfo *pTcon; 850 struct cifsTconInfo *pTcon;
@@ -855,7 +855,7 @@ static int cifs_oplock_thread(void * dummyarg)
855 do { 855 do {
856 if (try_to_freeze()) 856 if (try_to_freeze())
857 continue; 857 continue;
858 858
859 spin_lock(&GlobalMid_Lock); 859 spin_lock(&GlobalMid_Lock);
860 if (list_empty(&GlobalOplock_Q)) { 860 if (list_empty(&GlobalOplock_Q)) {
861 spin_unlock(&GlobalMid_Lock); 861 spin_unlock(&GlobalMid_Lock);
@@ -865,7 +865,7 @@ static int cifs_oplock_thread(void * dummyarg)
865 oplock_item = list_entry(GlobalOplock_Q.next, 865 oplock_item = list_entry(GlobalOplock_Q.next,
866 struct oplock_q_entry, qhead); 866 struct oplock_q_entry, qhead);
867 if (oplock_item) { 867 if (oplock_item) {
868 cFYI(1,("found oplock item to write out")); 868 cFYI(1, ("found oplock item to write out"));
869 pTcon = oplock_item->tcon; 869 pTcon = oplock_item->tcon;
870 inode = oplock_item->pinode; 870 inode = oplock_item->pinode;
871 netfid = oplock_item->netfid; 871 netfid = oplock_item->netfid;
@@ -878,7 +878,8 @@ static int cifs_oplock_thread(void * dummyarg)
878 /* mutex_lock(&inode->i_mutex);*/ 878 /* mutex_lock(&inode->i_mutex);*/
879 if (S_ISREG(inode->i_mode)) { 879 if (S_ISREG(inode->i_mode)) {
880 rc = filemap_fdatawrite(inode->i_mapping); 880 rc = filemap_fdatawrite(inode->i_mapping);
881 if (CIFS_I(inode)->clientCanCacheRead == 0) { 881 if (CIFS_I(inode)->clientCanCacheRead
882 == 0) {
882 filemap_fdatawait(inode->i_mapping); 883 filemap_fdatawait(inode->i_mapping);
883 invalidate_remote_inode(inode); 884 invalidate_remote_inode(inode);
884 } 885 }
@@ -913,7 +914,7 @@ static int cifs_oplock_thread(void * dummyarg)
913 return 0; 914 return 0;
914} 915}
915 916
916static int cifs_dnotify_thread(void * dummyarg) 917static int cifs_dnotify_thread(void *dummyarg)
917{ 918{
918 struct list_head *tmp; 919 struct list_head *tmp;
919 struct cifsSesInfo *ses; 920 struct cifsSesInfo *ses;