diff options
Diffstat (limited to 'fs')
88 files changed, 1593 insertions, 720 deletions
diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 1e898144eb7c..56d88c1a09c5 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c | |||
@@ -673,8 +673,10 @@ struct v9fs_fcall *v9fs_create_tcreate(u32 fid, char *name, u32 perm, u8 mode, | |||
673 | struct cbuf *bufp = &buffer; | 673 | struct cbuf *bufp = &buffer; |
674 | 674 | ||
675 | size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */ | 675 | size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */ |
676 | if (extended && extension!=NULL) | 676 | if (extended) { |
677 | size += 2 + strlen(extension); /* extension[s] */ | 677 | size += 2 + /* extension[s] */ |
678 | (extension == NULL ? 0 : strlen(extension)); | ||
679 | } | ||
678 | 680 | ||
679 | fc = v9fs_create_common(bufp, size, TCREATE); | 681 | fc = v9fs_create_common(bufp, size, TCREATE); |
680 | if (IS_ERR(fc)) | 682 | if (IS_ERR(fc)) |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2f580a197b8d..eae50c9d6dc4 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -434,11 +434,11 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) | |||
434 | result = v9fs_t_remove(v9ses, fid, &fcall); | 434 | result = v9fs_t_remove(v9ses, fid, &fcall); |
435 | if (result < 0) { | 435 | if (result < 0) { |
436 | PRINT_FCALL_ERROR("remove fails", fcall); | 436 | PRINT_FCALL_ERROR("remove fails", fcall); |
437 | } else { | ||
438 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
439 | v9fs_fid_destroy(v9fid); | ||
440 | } | 437 | } |
441 | 438 | ||
439 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
440 | v9fs_fid_destroy(v9fid); | ||
441 | |||
442 | kfree(fcall); | 442 | kfree(fcall); |
443 | return result; | 443 | return result; |
444 | } | 444 | } |
diff --git a/fs/adfs/super.c b/fs/adfs/super.c index ba1c88af49fe..82011019494c 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c | |||
@@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di | |||
308 | if (adfs_checkmap(sb, dm)) | 308 | if (adfs_checkmap(sb, dm)) |
309 | return dm; | 309 | return dm; |
310 | 310 | ||
311 | adfs_error(sb, NULL, "map corrupted"); | 311 | adfs_error(sb, "map corrupted"); |
312 | 312 | ||
313 | error_free: | 313 | error_free: |
314 | while (--zone >= 0) | 314 | while (--zone >= 0) |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index fcaeead9696b..50cfca5c7efd 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -512,7 +512,11 @@ befs_utf2nls(struct super_block *sb, const char *in, | |||
512 | wchar_t uni; | 512 | wchar_t uni; |
513 | int unilen, utflen; | 513 | int unilen, utflen; |
514 | char *result; | 514 | char *result; |
515 | int maxlen = in_len; /* The utf8->nls conversion can't make more chars */ | 515 | /* The utf8->nls conversion won't make the final nls string bigger |
516 | * than the utf one, but if the string is pure ascii they'll have the | ||
517 | * same width and an extra char is needed to save the additional \0 | ||
518 | */ | ||
519 | int maxlen = in_len + 1; | ||
516 | 520 | ||
517 | befs_debug(sb, "---> utf2nls()"); | 521 | befs_debug(sb, "---> utf2nls()"); |
518 | 522 | ||
@@ -588,7 +592,10 @@ befs_nls2utf(struct super_block *sb, const char *in, | |||
588 | wchar_t uni; | 592 | wchar_t uni; |
589 | int unilen, utflen; | 593 | int unilen, utflen; |
590 | char *result; | 594 | char *result; |
591 | int maxlen = 3 * in_len; | 595 | /* There're nls characters that will translate to 3-chars-wide UTF-8 |
596 | * characters, a additional byte is needed to save the final \0 | ||
597 | * in special cases */ | ||
598 | int maxlen = (3 * in_len) + 1; | ||
592 | 599 | ||
593 | befs_debug(sb, "---> nls2utf()\n"); | 600 | befs_debug(sb, "---> nls2utf()\n"); |
594 | 601 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 37534573960b..045f98854f14 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size) | |||
884 | } | 884 | } |
885 | EXPORT_SYMBOL(bd_set_size); | 885 | EXPORT_SYMBOL(bd_set_size); |
886 | 886 | ||
887 | static int __blkdev_put(struct block_device *bdev, unsigned int subclass) | ||
888 | { | ||
889 | int ret = 0; | ||
890 | struct inode *bd_inode = bdev->bd_inode; | ||
891 | struct gendisk *disk = bdev->bd_disk; | ||
892 | |||
893 | mutex_lock_nested(&bdev->bd_mutex, subclass); | ||
894 | lock_kernel(); | ||
895 | if (!--bdev->bd_openers) { | ||
896 | sync_blockdev(bdev); | ||
897 | kill_bdev(bdev); | ||
898 | } | ||
899 | if (bdev->bd_contains == bdev) { | ||
900 | if (disk->fops->release) | ||
901 | ret = disk->fops->release(bd_inode, NULL); | ||
902 | } else { | ||
903 | mutex_lock_nested(&bdev->bd_contains->bd_mutex, | ||
904 | subclass + 1); | ||
905 | bdev->bd_contains->bd_part_count--; | ||
906 | mutex_unlock(&bdev->bd_contains->bd_mutex); | ||
907 | } | ||
908 | if (!bdev->bd_openers) { | ||
909 | struct module *owner = disk->fops->owner; | ||
910 | |||
911 | put_disk(disk); | ||
912 | module_put(owner); | ||
913 | |||
914 | if (bdev->bd_contains != bdev) { | ||
915 | kobject_put(&bdev->bd_part->kobj); | ||
916 | bdev->bd_part = NULL; | ||
917 | } | ||
918 | bdev->bd_disk = NULL; | ||
919 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | ||
920 | if (bdev != bdev->bd_contains) | ||
921 | __blkdev_put(bdev->bd_contains, subclass + 1); | ||
922 | bdev->bd_contains = NULL; | ||
923 | } | ||
924 | unlock_kernel(); | ||
925 | mutex_unlock(&bdev->bd_mutex); | ||
926 | bdput(bdev); | ||
927 | return ret; | ||
928 | } | ||
929 | |||
930 | int blkdev_put(struct block_device *bdev) | ||
931 | { | ||
932 | return __blkdev_put(bdev, BD_MUTEX_NORMAL); | ||
933 | } | ||
934 | EXPORT_SYMBOL(blkdev_put); | ||
935 | |||
936 | int blkdev_put_partition(struct block_device *bdev) | ||
937 | { | ||
938 | return __blkdev_put(bdev, BD_MUTEX_PARTITION); | ||
939 | } | ||
940 | EXPORT_SYMBOL(blkdev_put_partition); | ||
941 | |||
887 | static int | 942 | static int |
888 | blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); | 943 | blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); |
889 | 944 | ||
@@ -980,7 +1035,7 @@ out_first: | |||
980 | bdev->bd_disk = NULL; | 1035 | bdev->bd_disk = NULL; |
981 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | 1036 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; |
982 | if (bdev != bdev->bd_contains) | 1037 | if (bdev != bdev->bd_contains) |
983 | blkdev_put(bdev->bd_contains); | 1038 | __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE); |
984 | bdev->bd_contains = NULL; | 1039 | bdev->bd_contains = NULL; |
985 | put_disk(disk); | 1040 | put_disk(disk); |
986 | module_put(owner); | 1041 | module_put(owner); |
@@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp) | |||
1079 | return res; | 1134 | return res; |
1080 | } | 1135 | } |
1081 | 1136 | ||
1082 | static int __blkdev_put(struct block_device *bdev, unsigned int subclass) | ||
1083 | { | ||
1084 | int ret = 0; | ||
1085 | struct inode *bd_inode = bdev->bd_inode; | ||
1086 | struct gendisk *disk = bdev->bd_disk; | ||
1087 | |||
1088 | mutex_lock_nested(&bdev->bd_mutex, subclass); | ||
1089 | lock_kernel(); | ||
1090 | if (!--bdev->bd_openers) { | ||
1091 | sync_blockdev(bdev); | ||
1092 | kill_bdev(bdev); | ||
1093 | } | ||
1094 | if (bdev->bd_contains == bdev) { | ||
1095 | if (disk->fops->release) | ||
1096 | ret = disk->fops->release(bd_inode, NULL); | ||
1097 | } else { | ||
1098 | mutex_lock_nested(&bdev->bd_contains->bd_mutex, | ||
1099 | subclass + 1); | ||
1100 | bdev->bd_contains->bd_part_count--; | ||
1101 | mutex_unlock(&bdev->bd_contains->bd_mutex); | ||
1102 | } | ||
1103 | if (!bdev->bd_openers) { | ||
1104 | struct module *owner = disk->fops->owner; | ||
1105 | |||
1106 | put_disk(disk); | ||
1107 | module_put(owner); | ||
1108 | |||
1109 | if (bdev->bd_contains != bdev) { | ||
1110 | kobject_put(&bdev->bd_part->kobj); | ||
1111 | bdev->bd_part = NULL; | ||
1112 | } | ||
1113 | bdev->bd_disk = NULL; | ||
1114 | bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; | ||
1115 | if (bdev != bdev->bd_contains) | ||
1116 | __blkdev_put(bdev->bd_contains, subclass + 1); | ||
1117 | bdev->bd_contains = NULL; | ||
1118 | } | ||
1119 | unlock_kernel(); | ||
1120 | mutex_unlock(&bdev->bd_mutex); | ||
1121 | bdput(bdev); | ||
1122 | return ret; | ||
1123 | } | ||
1124 | |||
1125 | int blkdev_put(struct block_device *bdev) | ||
1126 | { | ||
1127 | return __blkdev_put(bdev, BD_MUTEX_NORMAL); | ||
1128 | } | ||
1129 | |||
1130 | EXPORT_SYMBOL(blkdev_put); | ||
1131 | |||
1132 | int blkdev_put_partition(struct block_device *bdev) | ||
1133 | { | ||
1134 | return __blkdev_put(bdev, BD_MUTEX_PARTITION); | ||
1135 | } | ||
1136 | |||
1137 | EXPORT_SYMBOL(blkdev_put_partition); | ||
1138 | |||
1139 | static int blkdev_close(struct inode * inode, struct file * filp) | 1137 | static int blkdev_close(struct inode * inode, struct file * filp) |
1140 | { | 1138 | { |
1141 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 1139 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
diff --git a/fs/buffer.c b/fs/buffer.c index 3660dcb97591..71649ef9b658 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -470,13 +470,18 @@ out: | |||
470 | pass does the actual I/O. */ | 470 | pass does the actual I/O. */ |
471 | void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) | 471 | void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) |
472 | { | 472 | { |
473 | struct address_space *mapping = bdev->bd_inode->i_mapping; | ||
474 | |||
475 | if (mapping->nrpages == 0) | ||
476 | return; | ||
477 | |||
473 | invalidate_bh_lrus(); | 478 | invalidate_bh_lrus(); |
474 | /* | 479 | /* |
475 | * FIXME: what about destroy_dirty_buffers? | 480 | * FIXME: what about destroy_dirty_buffers? |
476 | * We really want to use invalidate_inode_pages2() for | 481 | * We really want to use invalidate_inode_pages2() for |
477 | * that, but not until that's cleaned up. | 482 | * that, but not until that's cleaned up. |
478 | */ | 483 | */ |
479 | invalidate_inode_pages(bdev->bd_inode->i_mapping); | 484 | invalidate_inode_pages(mapping); |
480 | } | 485 | } |
481 | 486 | ||
482 | /* | 487 | /* |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index a61d17ed1827..0feb3bd49cb8 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -1,3 +1,13 @@ | |||
1 | Version 1.45 | ||
2 | ------------ | ||
3 | Do not time out lockw calls when using posix extensions. Do not | ||
4 | time out requests if server still responding reasonably fast | ||
5 | on requests on other threads. Improve POSIX locking emulation, | ||
6 | (lock cancel now works, and unlock of merged range works even | ||
7 | to Windows servers now). Fix oops on mount to lanman servers | ||
8 | (win9x, os/2 etc.) when null password. Do not send listxattr | ||
9 | (SMB to query all EAs) if nouser_xattr specified. | ||
10 | |||
1 | Version 1.44 | 11 | Version 1.44 |
2 | ------------ | 12 | ------------ |
3 | Rewritten sessionsetup support, including support for legacy SMB | 13 | Rewritten sessionsetup support, including support for legacy SMB |
diff --git a/fs/cifs/README b/fs/cifs/README index 7986d0d97ace..5f0e1bd64fee 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
@@ -408,7 +408,7 @@ A partial list of the supported mount options follows: | |||
408 | user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended | 408 | user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended |
409 | attributes) to the server (default) e.g. via setfattr | 409 | attributes) to the server (default) e.g. via setfattr |
410 | and getfattr utilities. | 410 | and getfattr utilities. |
411 | nouser_xattr Do not allow getfattr/setfattr to get/set xattrs | 411 | nouser_xattr Do not allow getfattr/setfattr to get/set/list xattrs |
412 | mapchars Translate six of the seven reserved characters (not backslash) | 412 | mapchars Translate six of the seven reserved characters (not backslash) |
413 | *?<>|: | 413 | *?<>|: |
414 | to the remap range (above 0xF000), which also | 414 | to the remap range (above 0xF000), which also |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a89efaf78a26..4bc250b2d9fc 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -277,7 +277,8 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key) | |||
277 | return; | 277 | return; |
278 | 278 | ||
279 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); | 279 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); |
280 | strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); | 280 | if(ses->password) |
281 | strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); | ||
281 | 282 | ||
282 | if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) | 283 | if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) |
283 | if(extended_security & CIFSSEC_MAY_PLNTXT) { | 284 | if(extended_security & CIFSSEC_MAY_PLNTXT) { |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c28ede599946..3cd750029be2 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -402,7 +402,6 @@ static struct quotactl_ops cifs_quotactl_ops = { | |||
402 | }; | 402 | }; |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
406 | static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) | 405 | static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) |
407 | { | 406 | { |
408 | struct cifs_sb_info *cifs_sb; | 407 | struct cifs_sb_info *cifs_sb; |
@@ -422,7 +421,7 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) | |||
422 | tcon->tidStatus = CifsExiting; | 421 | tcon->tidStatus = CifsExiting; |
423 | up(&tcon->tconSem); | 422 | up(&tcon->tconSem); |
424 | 423 | ||
425 | /* cancel_brl_requests(tcon); */ | 424 | /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ |
426 | /* cancel_notify_requests(tcon); */ | 425 | /* cancel_notify_requests(tcon); */ |
427 | if(tcon->ses && tcon->ses->server) | 426 | if(tcon->ses && tcon->ses->server) |
428 | { | 427 | { |
@@ -438,7 +437,6 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) | |||
438 | 437 | ||
439 | return; | 438 | return; |
440 | } | 439 | } |
441 | #endif | ||
442 | 440 | ||
443 | static int cifs_remount(struct super_block *sb, int *flags, char *data) | 441 | static int cifs_remount(struct super_block *sb, int *flags, char *data) |
444 | { | 442 | { |
@@ -457,9 +455,7 @@ struct super_operations cifs_super_ops = { | |||
457 | unless later we add lazy close of inodes or unless the kernel forgets to call | 455 | unless later we add lazy close of inodes or unless the kernel forgets to call |
458 | us with the same number of releases (closes) as opens */ | 456 | us with the same number of releases (closes) as opens */ |
459 | .show_options = cifs_show_options, | 457 | .show_options = cifs_show_options, |
460 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
461 | .umount_begin = cifs_umount_begin, | 458 | .umount_begin = cifs_umount_begin, |
462 | #endif | ||
463 | .remount_fs = cifs_remount, | 459 | .remount_fs = cifs_remount, |
464 | }; | 460 | }; |
465 | 461 | ||
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 8f75c6f24701..39ee8ef3bdeb 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -100,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); | |||
100 | extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); | 100 | extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); |
101 | extern int cifs_ioctl (struct inode * inode, struct file * filep, | 101 | extern int cifs_ioctl (struct inode * inode, struct file * filep, |
102 | unsigned int command, unsigned long arg); | 102 | unsigned int command, unsigned long arg); |
103 | #define CIFS_VERSION "1.44" | 103 | #define CIFS_VERSION "1.45" |
104 | #endif /* _CIFSFS_H */ | 104 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 6d7cf5f3bc0b..b24006c47df1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2006 | 4 | * Copyright (C) International Business Machines Corp., 2002,2006 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * Jeremy Allison (jra@samba.org) | ||
6 | * | 7 | * |
7 | * This library is free software; you can redistribute it and/or modify | 8 | * This library is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU Lesser General Public License as published | 9 | * it under the terms of the GNU Lesser General Public License as published |
@@ -158,7 +159,8 @@ struct TCP_Server_Info { | |||
158 | /* 16th byte of RFC1001 workstation name is always null */ | 159 | /* 16th byte of RFC1001 workstation name is always null */ |
159 | char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; | 160 | char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; |
160 | __u32 sequence_number; /* needed for CIFS PDU signature */ | 161 | __u32 sequence_number; /* needed for CIFS PDU signature */ |
161 | char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; | 162 | char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; |
163 | unsigned long lstrp; /* when we got last response from this server */ | ||
162 | }; | 164 | }; |
163 | 165 | ||
164 | /* | 166 | /* |
@@ -266,14 +268,14 @@ struct cifsTconInfo { | |||
266 | }; | 268 | }; |
267 | 269 | ||
268 | /* | 270 | /* |
269 | * This info hangs off the cifsFileInfo structure. This is used to track | 271 | * This info hangs off the cifsFileInfo structure, pointed to by llist. |
270 | * byte stream locks on the file | 272 | * This is used to track byte stream locks on the file |
271 | */ | 273 | */ |
272 | struct cifsLockInfo { | 274 | struct cifsLockInfo { |
273 | struct cifsLockInfo *next; | 275 | struct list_head llist; /* pointer to next cifsLockInfo */ |
274 | int start; | 276 | __u64 offset; |
275 | int length; | 277 | __u64 length; |
276 | int type; | 278 | __u8 type; |
277 | }; | 279 | }; |
278 | 280 | ||
279 | /* | 281 | /* |
@@ -304,6 +306,8 @@ struct cifsFileInfo { | |||
304 | /* lock scope id (0 if none) */ | 306 | /* lock scope id (0 if none) */ |
305 | struct file * pfile; /* needed for writepage */ | 307 | struct file * pfile; /* needed for writepage */ |
306 | struct inode * pInode; /* needed for oplock break */ | 308 | struct inode * pInode; /* needed for oplock break */ |
309 | struct semaphore lock_sem; | ||
310 | struct list_head llist; /* list of byte range locks we have. */ | ||
307 | unsigned closePend:1; /* file is marked to close */ | 311 | unsigned closePend:1; /* file is marked to close */ |
308 | unsigned invalidHandle:1; /* file closed via session abend */ | 312 | unsigned invalidHandle:1; /* file closed via session abend */ |
309 | atomic_t wrtPending; /* handle in use - defer close */ | 313 | atomic_t wrtPending; /* handle in use - defer close */ |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index a5ddc62d6fe6..b35c55c3c8bb 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -50,6 +50,10 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, | |||
50 | extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, | 50 | extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, |
51 | struct kvec *, int /* nvec to send */, | 51 | struct kvec *, int /* nvec to send */, |
52 | int * /* type of buf returned */ , const int long_op); | 52 | int * /* type of buf returned */ , const int long_op); |
53 | extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *, | ||
54 | struct smb_hdr * /* input */ , | ||
55 | struct smb_hdr * /* out */ , | ||
56 | int * /* bytes returned */); | ||
53 | extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); | 57 | extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); |
54 | extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); | 58 | extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); |
55 | extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); | 59 | extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 19678c575dfc..075d8fb3d376 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -477,7 +477,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
477 | /* BB get server time for time conversions and add | 477 | /* BB get server time for time conversions and add |
478 | code to use it and timezone since this is not UTC */ | 478 | code to use it and timezone since this is not UTC */ |
479 | 479 | ||
480 | if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { | 480 | if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { |
481 | memcpy(server->cryptKey, rsp->EncryptionKey, | 481 | memcpy(server->cryptKey, rsp->EncryptionKey, |
482 | CIFS_CRYPTO_KEY_SIZE); | 482 | CIFS_CRYPTO_KEY_SIZE); |
483 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { | 483 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { |
@@ -1460,8 +1460,13 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1460 | pSMB->hdr.smb_buf_length += count; | 1460 | pSMB->hdr.smb_buf_length += count; |
1461 | pSMB->ByteCount = cpu_to_le16(count); | 1461 | pSMB->ByteCount = cpu_to_le16(count); |
1462 | 1462 | ||
1463 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 1463 | if (waitFlag) { |
1464 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, | ||
1465 | (struct smb_hdr *) pSMBr, &bytes_returned); | ||
1466 | } else { | ||
1467 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | ||
1464 | (struct smb_hdr *) pSMBr, &bytes_returned, timeout); | 1468 | (struct smb_hdr *) pSMBr, &bytes_returned, timeout); |
1469 | } | ||
1465 | cifs_stats_inc(&tcon->num_locks); | 1470 | cifs_stats_inc(&tcon->num_locks); |
1466 | if (rc) { | 1471 | if (rc) { |
1467 | cFYI(1, ("Send error in Lock = %d", rc)); | 1472 | cFYI(1, ("Send error in Lock = %d", rc)); |
@@ -1484,6 +1489,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1484 | char *data_offset; | 1489 | char *data_offset; |
1485 | struct cifs_posix_lock *parm_data; | 1490 | struct cifs_posix_lock *parm_data; |
1486 | int rc = 0; | 1491 | int rc = 0; |
1492 | int timeout = 0; | ||
1487 | int bytes_returned = 0; | 1493 | int bytes_returned = 0; |
1488 | __u16 params, param_offset, offset, byte_count, count; | 1494 | __u16 params, param_offset, offset, byte_count, count; |
1489 | 1495 | ||
@@ -1503,7 +1509,6 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1503 | pSMB->MaxSetupCount = 0; | 1509 | pSMB->MaxSetupCount = 0; |
1504 | pSMB->Reserved = 0; | 1510 | pSMB->Reserved = 0; |
1505 | pSMB->Flags = 0; | 1511 | pSMB->Flags = 0; |
1506 | pSMB->Timeout = 0; | ||
1507 | pSMB->Reserved2 = 0; | 1512 | pSMB->Reserved2 = 0; |
1508 | param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; | 1513 | param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; |
1509 | offset = param_offset + params; | 1514 | offset = param_offset + params; |
@@ -1529,8 +1534,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1529 | (((char *) &pSMB->hdr.Protocol) + offset); | 1534 | (((char *) &pSMB->hdr.Protocol) + offset); |
1530 | 1535 | ||
1531 | parm_data->lock_type = cpu_to_le16(lock_type); | 1536 | parm_data->lock_type = cpu_to_le16(lock_type); |
1532 | if(waitFlag) | 1537 | if(waitFlag) { |
1538 | timeout = 3; /* blocking operation, no timeout */ | ||
1533 | parm_data->lock_flags = cpu_to_le16(1); | 1539 | parm_data->lock_flags = cpu_to_le16(1); |
1540 | pSMB->Timeout = cpu_to_le32(-1); | ||
1541 | } else | ||
1542 | pSMB->Timeout = 0; | ||
1543 | |||
1534 | parm_data->pid = cpu_to_le32(current->tgid); | 1544 | parm_data->pid = cpu_to_le32(current->tgid); |
1535 | parm_data->start = cpu_to_le64(pLockData->fl_start); | 1545 | parm_data->start = cpu_to_le64(pLockData->fl_start); |
1536 | parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ | 1546 | parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ |
@@ -1541,8 +1551,14 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1541 | pSMB->Reserved4 = 0; | 1551 | pSMB->Reserved4 = 0; |
1542 | pSMB->hdr.smb_buf_length += byte_count; | 1552 | pSMB->hdr.smb_buf_length += byte_count; |
1543 | pSMB->ByteCount = cpu_to_le16(byte_count); | 1553 | pSMB->ByteCount = cpu_to_le16(byte_count); |
1544 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 1554 | if (waitFlag) { |
1545 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 1555 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, |
1556 | (struct smb_hdr *) pSMBr, &bytes_returned); | ||
1557 | } else { | ||
1558 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | ||
1559 | (struct smb_hdr *) pSMBr, &bytes_returned, timeout); | ||
1560 | } | ||
1561 | |||
1546 | if (rc) { | 1562 | if (rc) { |
1547 | cFYI(1, ("Send error in Posix Lock = %d", rc)); | 1563 | cFYI(1, ("Send error in Posix Lock = %d", rc)); |
1548 | } else if (get_flag) { | 1564 | } else if (get_flag) { |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 876eb9ef85fe..5d394c726860 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -182,6 +182,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
182 | 182 | ||
183 | while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) | 183 | while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) |
184 | { | 184 | { |
185 | try_to_freeze(); | ||
185 | if(server->protocolType == IPV6) { | 186 | if(server->protocolType == IPV6) { |
186 | rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket); | 187 | rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket); |
187 | } else { | 188 | } else { |
@@ -612,6 +613,10 @@ multi_t2_fnd: | |||
612 | #ifdef CONFIG_CIFS_STATS2 | 613 | #ifdef CONFIG_CIFS_STATS2 |
613 | mid_entry->when_received = jiffies; | 614 | mid_entry->when_received = jiffies; |
614 | #endif | 615 | #endif |
616 | /* so we do not time out requests to server | ||
617 | which is still responding (since server could | ||
618 | be busy but not dead) */ | ||
619 | server->lstrp = jiffies; | ||
615 | break; | 620 | break; |
616 | } | 621 | } |
617 | } | 622 | } |
@@ -1266,33 +1271,35 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName) | |||
1266 | 1271 | ||
1267 | read_lock(&GlobalSMBSeslock); | 1272 | read_lock(&GlobalSMBSeslock); |
1268 | list_for_each(tmp, &GlobalTreeConnectionList) { | 1273 | list_for_each(tmp, &GlobalTreeConnectionList) { |
1269 | cFYI(1, ("Next tcon - ")); | 1274 | cFYI(1, ("Next tcon")); |
1270 | tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); | 1275 | tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); |
1271 | if (tcon->ses) { | 1276 | if (tcon->ses) { |
1272 | if (tcon->ses->server) { | 1277 | if (tcon->ses->server) { |
1273 | cFYI(1, | 1278 | cFYI(1, |
1274 | (" old ip addr: %x == new ip %x ?", | 1279 | ("old ip addr: %x == new ip %x ?", |
1275 | tcon->ses->server->addr.sockAddr.sin_addr. | 1280 | tcon->ses->server->addr.sockAddr.sin_addr. |
1276 | s_addr, new_target_ip_addr)); | 1281 | s_addr, new_target_ip_addr)); |
1277 | if (tcon->ses->server->addr.sockAddr.sin_addr. | 1282 | if (tcon->ses->server->addr.sockAddr.sin_addr. |
1278 | s_addr == new_target_ip_addr) { | 1283 | s_addr == new_target_ip_addr) { |
1279 | /* BB lock tcon and server and tcp session and increment use count here? */ | 1284 | /* BB lock tcon, server and tcp session and increment use count here? */ |
1280 | /* found a match on the TCP session */ | 1285 | /* found a match on the TCP session */ |
1281 | /* BB check if reconnection needed */ | 1286 | /* BB check if reconnection needed */ |
1282 | cFYI(1,("Matched ip, old UNC: %s == new: %s ?", | 1287 | cFYI(1,("IP match, old UNC: %s new: %s", |
1283 | tcon->treeName, uncName)); | 1288 | tcon->treeName, uncName)); |
1284 | if (strncmp | 1289 | if (strncmp |
1285 | (tcon->treeName, uncName, | 1290 | (tcon->treeName, uncName, |
1286 | MAX_TREE_SIZE) == 0) { | 1291 | MAX_TREE_SIZE) == 0) { |
1287 | cFYI(1, | 1292 | cFYI(1, |
1288 | ("Matched UNC, old user: %s == new: %s ?", | 1293 | ("and old usr: %s new: %s", |
1289 | tcon->treeName, uncName)); | 1294 | tcon->treeName, uncName)); |
1290 | if (strncmp | 1295 | if (strncmp |
1291 | (tcon->ses->userName, | 1296 | (tcon->ses->userName, |
1292 | userName, | 1297 | userName, |
1293 | MAX_USERNAME_SIZE) == 0) { | 1298 | MAX_USERNAME_SIZE) == 0) { |
1294 | read_unlock(&GlobalSMBSeslock); | 1299 | read_unlock(&GlobalSMBSeslock); |
1295 | return tcon;/* also matched user (smb session)*/ | 1300 | /* matched smb session |
1301 | (user name */ | ||
1302 | return tcon; | ||
1296 | } | 1303 | } |
1297 | } | 1304 | } |
1298 | } | 1305 | } |
@@ -1969,7 +1976,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1969 | } | 1976 | } |
1970 | 1977 | ||
1971 | cFYI(1,("Negotiate caps 0x%x",(int)cap)); | 1978 | cFYI(1,("Negotiate caps 0x%x",(int)cap)); |
1972 | 1979 | #ifdef CONFIG_CIFS_DEBUG2 | |
1980 | if(cap & CIFS_UNIX_FCNTL_CAP) | ||
1981 | cFYI(1,("FCNTL cap")); | ||
1982 | if(cap & CIFS_UNIX_EXTATTR_CAP) | ||
1983 | cFYI(1,("EXTATTR cap")); | ||
1984 | if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) | ||
1985 | cFYI(1,("POSIX path cap")); | ||
1986 | if(cap & CIFS_UNIX_XATTR_CAP) | ||
1987 | cFYI(1,("XATTR cap")); | ||
1988 | if(cap & CIFS_UNIX_POSIX_ACL_CAP) | ||
1989 | cFYI(1,("POSIX ACL cap")); | ||
1990 | #endif /* CIFS_DEBUG2 */ | ||
1973 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { | 1991 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { |
1974 | cFYI(1,("setting capabilities failed")); | 1992 | cFYI(1,("setting capabilities failed")); |
1975 | } | 1993 | } |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ba4cbe9b0684..914239d53634 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -267,6 +267,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
267 | pCifsFile->invalidHandle = FALSE; | 267 | pCifsFile->invalidHandle = FALSE; |
268 | pCifsFile->closePend = FALSE; | 268 | pCifsFile->closePend = FALSE; |
269 | init_MUTEX(&pCifsFile->fh_sem); | 269 | init_MUTEX(&pCifsFile->fh_sem); |
270 | init_MUTEX(&pCifsFile->lock_sem); | ||
271 | INIT_LIST_HEAD(&pCifsFile->llist); | ||
272 | atomic_set(&pCifsFile->wrtPending,0); | ||
273 | |||
270 | /* set the following in open now | 274 | /* set the following in open now |
271 | pCifsFile->pfile = file; */ | 275 | pCifsFile->pfile = file; */ |
272 | write_lock(&GlobalSMBSeslock); | 276 | write_lock(&GlobalSMBSeslock); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 944d2b9e092d..e9c5ba9084fc 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2002,2003 | 6 | * Copyright (C) International Business Machines Corp., 2002,2003 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * Jeremy Allison (jra@samba.org) | ||
8 | * | 9 | * |
9 | * This library is free software; you can redistribute it and/or modify | 10 | * This library is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU Lesser General Public License as published | 11 | * it under the terms of the GNU Lesser General Public License as published |
@@ -47,6 +48,8 @@ static inline struct cifsFileInfo *cifs_init_private( | |||
47 | private_data->netfid = netfid; | 48 | private_data->netfid = netfid; |
48 | private_data->pid = current->tgid; | 49 | private_data->pid = current->tgid; |
49 | init_MUTEX(&private_data->fh_sem); | 50 | init_MUTEX(&private_data->fh_sem); |
51 | init_MUTEX(&private_data->lock_sem); | ||
52 | INIT_LIST_HEAD(&private_data->llist); | ||
50 | private_data->pfile = file; /* needed for writepage */ | 53 | private_data->pfile = file; /* needed for writepage */ |
51 | private_data->pInode = inode; | 54 | private_data->pInode = inode; |
52 | private_data->invalidHandle = FALSE; | 55 | private_data->invalidHandle = FALSE; |
@@ -473,6 +476,8 @@ int cifs_close(struct inode *inode, struct file *file) | |||
473 | cifs_sb = CIFS_SB(inode->i_sb); | 476 | cifs_sb = CIFS_SB(inode->i_sb); |
474 | pTcon = cifs_sb->tcon; | 477 | pTcon = cifs_sb->tcon; |
475 | if (pSMBFile) { | 478 | if (pSMBFile) { |
479 | struct cifsLockInfo *li, *tmp; | ||
480 | |||
476 | pSMBFile->closePend = TRUE; | 481 | pSMBFile->closePend = TRUE; |
477 | if (pTcon) { | 482 | if (pTcon) { |
478 | /* no sense reconnecting to close a file that is | 483 | /* no sense reconnecting to close a file that is |
@@ -496,6 +501,16 @@ int cifs_close(struct inode *inode, struct file *file) | |||
496 | pSMBFile->netfid); | 501 | pSMBFile->netfid); |
497 | } | 502 | } |
498 | } | 503 | } |
504 | |||
505 | /* Delete any outstanding lock records. | ||
506 | We'll lose them when the file is closed anyway. */ | ||
507 | down(&pSMBFile->lock_sem); | ||
508 | list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) { | ||
509 | list_del(&li->llist); | ||
510 | kfree(li); | ||
511 | } | ||
512 | up(&pSMBFile->lock_sem); | ||
513 | |||
499 | write_lock(&GlobalSMBSeslock); | 514 | write_lock(&GlobalSMBSeslock); |
500 | list_del(&pSMBFile->flist); | 515 | list_del(&pSMBFile->flist); |
501 | list_del(&pSMBFile->tlist); | 516 | list_del(&pSMBFile->tlist); |
@@ -570,6 +585,21 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
570 | return rc; | 585 | return rc; |
571 | } | 586 | } |
572 | 587 | ||
588 | static int store_file_lock(struct cifsFileInfo *fid, __u64 len, | ||
589 | __u64 offset, __u8 lockType) | ||
590 | { | ||
591 | struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); | ||
592 | if (li == NULL) | ||
593 | return -ENOMEM; | ||
594 | li->offset = offset; | ||
595 | li->length = len; | ||
596 | li->type = lockType; | ||
597 | down(&fid->lock_sem); | ||
598 | list_add(&li->llist, &fid->llist); | ||
599 | up(&fid->lock_sem); | ||
600 | return 0; | ||
601 | } | ||
602 | |||
573 | int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | 603 | int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) |
574 | { | 604 | { |
575 | int rc, xid; | 605 | int rc, xid; |
@@ -581,6 +611,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
581 | struct cifsTconInfo *pTcon; | 611 | struct cifsTconInfo *pTcon; |
582 | __u16 netfid; | 612 | __u16 netfid; |
583 | __u8 lockType = LOCKING_ANDX_LARGE_FILES; | 613 | __u8 lockType = LOCKING_ANDX_LARGE_FILES; |
614 | int posix_locking; | ||
584 | 615 | ||
585 | length = 1 + pfLock->fl_end - pfLock->fl_start; | 616 | length = 1 + pfLock->fl_end - pfLock->fl_start; |
586 | rc = -EACCES; | 617 | rc = -EACCES; |
@@ -639,15 +670,14 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
639 | } | 670 | } |
640 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; | 671 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; |
641 | 672 | ||
673 | posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && | ||
674 | (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability)); | ||
642 | 675 | ||
643 | /* BB add code here to normalize offset and length to | 676 | /* BB add code here to normalize offset and length to |
644 | account for negative length which we can not accept over the | 677 | account for negative length which we can not accept over the |
645 | wire */ | 678 | wire */ |
646 | if (IS_GETLK(cmd)) { | 679 | if (IS_GETLK(cmd)) { |
647 | if(experimEnabled && | 680 | if(posix_locking) { |
648 | (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && | ||
649 | (CIFS_UNIX_FCNTL_CAP & | ||
650 | le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { | ||
651 | int posix_lock_type; | 681 | int posix_lock_type; |
652 | if(lockType & LOCKING_ANDX_SHARED_LOCK) | 682 | if(lockType & LOCKING_ANDX_SHARED_LOCK) |
653 | posix_lock_type = CIFS_RDLCK; | 683 | posix_lock_type = CIFS_RDLCK; |
@@ -683,10 +713,15 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
683 | FreeXid(xid); | 713 | FreeXid(xid); |
684 | return rc; | 714 | return rc; |
685 | } | 715 | } |
686 | if (experimEnabled && | 716 | |
687 | (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && | 717 | if (!numLock && !numUnlock) { |
688 | (CIFS_UNIX_FCNTL_CAP & | 718 | /* if no lock or unlock then nothing |
689 | le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { | 719 | to do since we do not know what it is */ |
720 | FreeXid(xid); | ||
721 | return -EOPNOTSUPP; | ||
722 | } | ||
723 | |||
724 | if (posix_locking) { | ||
690 | int posix_lock_type; | 725 | int posix_lock_type; |
691 | if(lockType & LOCKING_ANDX_SHARED_LOCK) | 726 | if(lockType & LOCKING_ANDX_SHARED_LOCK) |
692 | posix_lock_type = CIFS_RDLCK; | 727 | posix_lock_type = CIFS_RDLCK; |
@@ -695,18 +730,46 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
695 | 730 | ||
696 | if(numUnlock == 1) | 731 | if(numUnlock == 1) |
697 | posix_lock_type = CIFS_UNLCK; | 732 | posix_lock_type = CIFS_UNLCK; |
698 | else if(numLock == 0) { | 733 | |
699 | /* if no lock or unlock then nothing | ||
700 | to do since we do not know what it is */ | ||
701 | FreeXid(xid); | ||
702 | return -EOPNOTSUPP; | ||
703 | } | ||
704 | rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, | 734 | rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, |
705 | length, pfLock, | 735 | length, pfLock, |
706 | posix_lock_type, wait_flag); | 736 | posix_lock_type, wait_flag); |
707 | } else | 737 | } else { |
708 | rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, | 738 | struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data; |
709 | numUnlock, numLock, lockType, wait_flag); | 739 | |
740 | if (numLock) { | ||
741 | rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, | ||
742 | 0, numLock, lockType, wait_flag); | ||
743 | |||
744 | if (rc == 0) { | ||
745 | /* For Windows locks we must store them. */ | ||
746 | rc = store_file_lock(fid, length, | ||
747 | pfLock->fl_start, lockType); | ||
748 | } | ||
749 | } else if (numUnlock) { | ||
750 | /* For each stored lock that this unlock overlaps | ||
751 | completely, unlock it. */ | ||
752 | int stored_rc = 0; | ||
753 | struct cifsLockInfo *li, *tmp; | ||
754 | |||
755 | down(&fid->lock_sem); | ||
756 | list_for_each_entry_safe(li, tmp, &fid->llist, llist) { | ||
757 | if (pfLock->fl_start <= li->offset && | ||
758 | length >= li->length) { | ||
759 | stored_rc = CIFSSMBLock(xid, pTcon, netfid, | ||
760 | li->length, li->offset, | ||
761 | 1, 0, li->type, FALSE); | ||
762 | if (stored_rc) | ||
763 | rc = stored_rc; | ||
764 | |||
765 | list_del(&li->llist); | ||
766 | kfree(li); | ||
767 | } | ||
768 | } | ||
769 | up(&fid->lock_sem); | ||
770 | } | ||
771 | } | ||
772 | |||
710 | if (pfLock->fl_flags & FL_POSIX) | 773 | if (pfLock->fl_flags & FL_POSIX) |
711 | posix_lock_file_wait(file, pfLock); | 774 | posix_lock_file_wait(file, pfLock); |
712 | FreeXid(xid); | 775 | FreeXid(xid); |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index b66eff5dc624..ce87550e918f 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -72,6 +72,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { | |||
72 | {ERRinvlevel,-EOPNOTSUPP}, | 72 | {ERRinvlevel,-EOPNOTSUPP}, |
73 | {ERRdirnotempty, -ENOTEMPTY}, | 73 | {ERRdirnotempty, -ENOTEMPTY}, |
74 | {ERRnotlocked, -ENOLCK}, | 74 | {ERRnotlocked, -ENOLCK}, |
75 | {ERRcancelviolation, -ENOLCK}, | ||
75 | {ERRalreadyexists, -EEXIST}, | 76 | {ERRalreadyexists, -EEXIST}, |
76 | {ERRmoredata, -EOVERFLOW}, | 77 | {ERRmoredata, -EOVERFLOW}, |
77 | {ERReasnotsupported,-EOPNOTSUPP}, | 78 | {ERReasnotsupported,-EOPNOTSUPP}, |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 03bbcb377913..105761e3ba0e 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -556,7 +556,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
556 | FIND_FILE_STANDARD_INFO * pFindData = | 556 | FIND_FILE_STANDARD_INFO * pFindData = |
557 | (FIND_FILE_STANDARD_INFO *)current_entry; | 557 | (FIND_FILE_STANDARD_INFO *)current_entry; |
558 | filename = &pFindData->FileName[0]; | 558 | filename = &pFindData->FileName[0]; |
559 | len = le32_to_cpu(pFindData->FileNameLength); | 559 | len = pFindData->FileNameLength; |
560 | } else { | 560 | } else { |
561 | cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); | 561 | cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); |
562 | } | 562 | } |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7202d534ef0b..d1705ab8136e 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -372,7 +372,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, | |||
372 | 372 | ||
373 | /* no capabilities flags in old lanman negotiation */ | 373 | /* no capabilities flags in old lanman negotiation */ |
374 | 374 | ||
375 | pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE; | 375 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); |
376 | /* BB calculate hash with password */ | 376 | /* BB calculate hash with password */ |
377 | /* and copy into bcc */ | 377 | /* and copy into bcc */ |
378 | 378 | ||
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h index cd41c67ff8d3..212c3c296409 100644 --- a/fs/cifs/smberr.h +++ b/fs/cifs/smberr.h | |||
@@ -95,6 +95,7 @@ | |||
95 | #define ERRinvlevel 124 | 95 | #define ERRinvlevel 124 |
96 | #define ERRdirnotempty 145 | 96 | #define ERRdirnotempty 145 |
97 | #define ERRnotlocked 158 | 97 | #define ERRnotlocked 158 |
98 | #define ERRcancelviolation 173 | ||
98 | #define ERRalreadyexists 183 | 99 | #define ERRalreadyexists 183 |
99 | #define ERRbadpipe 230 | 100 | #define ERRbadpipe 230 |
100 | #define ERRpipebusy 231 | 101 | #define ERRpipebusy 231 |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 17ba329e2b3d..48d47b46b1fb 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2005 | 4 | * Copyright (C) International Business Machines Corp., 2002,2005 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * Jeremy Allison (jra@samba.org) 2006. |
7 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | 8 | * This library is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU Lesser General Public License as published | 9 | * it under the terms of the GNU Lesser General Public License as published |
9 | * by the Free Software Foundation; either version 2.1 of the License, or | 10 | * by the Free Software Foundation; either version 2.1 of the License, or |
@@ -36,7 +37,7 @@ extern mempool_t *cifs_mid_poolp; | |||
36 | extern kmem_cache_t *cifs_oplock_cachep; | 37 | extern kmem_cache_t *cifs_oplock_cachep; |
37 | 38 | ||
38 | static struct mid_q_entry * | 39 | static struct mid_q_entry * |
39 | AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) | 40 | AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) |
40 | { | 41 | { |
41 | struct mid_q_entry *temp; | 42 | struct mid_q_entry *temp; |
42 | 43 | ||
@@ -203,6 +204,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer, | |||
203 | rc = 0; | 204 | rc = 0; |
204 | } | 205 | } |
205 | 206 | ||
207 | /* Don't want to modify the buffer as a | ||
208 | side effect of this call. */ | ||
209 | smb_buffer->smb_buf_length = smb_buf_length; | ||
210 | |||
206 | return rc; | 211 | return rc; |
207 | } | 212 | } |
208 | 213 | ||
@@ -217,6 +222,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, | |||
217 | unsigned int len = iov[0].iov_len; | 222 | unsigned int len = iov[0].iov_len; |
218 | unsigned int total_len; | 223 | unsigned int total_len; |
219 | int first_vec = 0; | 224 | int first_vec = 0; |
225 | unsigned int smb_buf_length = smb_buffer->smb_buf_length; | ||
220 | 226 | ||
221 | if(ssocket == NULL) | 227 | if(ssocket == NULL) |
222 | return -ENOTSOCK; /* BB eventually add reconnect code here */ | 228 | return -ENOTSOCK; /* BB eventually add reconnect code here */ |
@@ -293,36 +299,15 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, | |||
293 | } else | 299 | } else |
294 | rc = 0; | 300 | rc = 0; |
295 | 301 | ||
302 | /* Don't want to modify the buffer as a | ||
303 | side effect of this call. */ | ||
304 | smb_buffer->smb_buf_length = smb_buf_length; | ||
305 | |||
296 | return rc; | 306 | return rc; |
297 | } | 307 | } |
298 | 308 | ||
299 | int | 309 | static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) |
300 | SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | ||
301 | struct kvec *iov, int n_vec, int * pRespBufType /* ret */, | ||
302 | const int long_op) | ||
303 | { | 310 | { |
304 | int rc = 0; | ||
305 | unsigned int receive_len; | ||
306 | unsigned long timeout; | ||
307 | struct mid_q_entry *midQ; | ||
308 | struct smb_hdr *in_buf = iov[0].iov_base; | ||
309 | |||
310 | *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ | ||
311 | |||
312 | if ((ses == NULL) || (ses->server == NULL)) { | ||
313 | cifs_small_buf_release(in_buf); | ||
314 | cERROR(1,("Null session")); | ||
315 | return -EIO; | ||
316 | } | ||
317 | |||
318 | if(ses->server->tcpStatus == CifsExiting) { | ||
319 | cifs_small_buf_release(in_buf); | ||
320 | return -ENOENT; | ||
321 | } | ||
322 | |||
323 | /* Ensure that we do not send more than 50 overlapping requests | ||
324 | to the same server. We may make this configurable later or | ||
325 | use ses->maxReq */ | ||
326 | if(long_op == -1) { | 311 | if(long_op == -1) { |
327 | /* oplock breaks must not be held up */ | 312 | /* oplock breaks must not be held up */ |
328 | atomic_inc(&ses->server->inFlight); | 313 | atomic_inc(&ses->server->inFlight); |
@@ -345,53 +330,140 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
345 | } else { | 330 | } else { |
346 | if(ses->server->tcpStatus == CifsExiting) { | 331 | if(ses->server->tcpStatus == CifsExiting) { |
347 | spin_unlock(&GlobalMid_Lock); | 332 | spin_unlock(&GlobalMid_Lock); |
348 | cifs_small_buf_release(in_buf); | ||
349 | return -ENOENT; | 333 | return -ENOENT; |
350 | } | 334 | } |
351 | 335 | ||
352 | /* can not count locking commands against total since | 336 | /* can not count locking commands against total since |
353 | they are allowed to block on server */ | 337 | they are allowed to block on server */ |
354 | 338 | ||
355 | if(long_op < 3) { | ||
356 | /* update # of requests on the wire to server */ | 339 | /* update # of requests on the wire to server */ |
340 | if (long_op < 3) | ||
357 | atomic_inc(&ses->server->inFlight); | 341 | atomic_inc(&ses->server->inFlight); |
358 | } | ||
359 | spin_unlock(&GlobalMid_Lock); | 342 | spin_unlock(&GlobalMid_Lock); |
360 | break; | 343 | break; |
361 | } | 344 | } |
362 | } | 345 | } |
363 | } | 346 | } |
364 | /* make sure that we sign in the same order that we send on this socket | 347 | return 0; |
365 | and avoid races inside tcp sendmsg code that could cause corruption | 348 | } |
366 | of smb data */ | ||
367 | |||
368 | down(&ses->server->tcpSem); | ||
369 | 349 | ||
350 | static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, | ||
351 | struct mid_q_entry **ppmidQ) | ||
352 | { | ||
370 | if (ses->server->tcpStatus == CifsExiting) { | 353 | if (ses->server->tcpStatus == CifsExiting) { |
371 | rc = -ENOENT; | 354 | return -ENOENT; |
372 | goto out_unlock2; | ||
373 | } else if (ses->server->tcpStatus == CifsNeedReconnect) { | 355 | } else if (ses->server->tcpStatus == CifsNeedReconnect) { |
374 | cFYI(1,("tcp session dead - return to caller to retry")); | 356 | cFYI(1,("tcp session dead - return to caller to retry")); |
375 | rc = -EAGAIN; | 357 | return -EAGAIN; |
376 | goto out_unlock2; | ||
377 | } else if (ses->status != CifsGood) { | 358 | } else if (ses->status != CifsGood) { |
378 | /* check if SMB session is bad because we are setting it up */ | 359 | /* check if SMB session is bad because we are setting it up */ |
379 | if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && | 360 | if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && |
380 | (in_buf->Command != SMB_COM_NEGOTIATE)) { | 361 | (in_buf->Command != SMB_COM_NEGOTIATE)) { |
381 | rc = -EAGAIN; | 362 | return -EAGAIN; |
382 | goto out_unlock2; | ||
383 | } /* else ok - we are setting up session */ | 363 | } /* else ok - we are setting up session */ |
384 | } | 364 | } |
385 | midQ = AllocMidQEntry(in_buf, ses); | 365 | *ppmidQ = AllocMidQEntry(in_buf, ses); |
386 | if (midQ == NULL) { | 366 | if (*ppmidQ == NULL) { |
367 | return -ENOMEM; | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static int wait_for_response(struct cifsSesInfo *ses, | ||
373 | struct mid_q_entry *midQ, | ||
374 | unsigned long timeout, | ||
375 | unsigned long time_to_wait) | ||
376 | { | ||
377 | unsigned long curr_timeout; | ||
378 | |||
379 | for (;;) { | ||
380 | curr_timeout = timeout + jiffies; | ||
381 | wait_event(ses->server->response_q, | ||
382 | (!(midQ->midState == MID_REQUEST_SUBMITTED)) || | ||
383 | time_after(jiffies, curr_timeout) || | ||
384 | ((ses->server->tcpStatus != CifsGood) && | ||
385 | (ses->server->tcpStatus != CifsNew))); | ||
386 | |||
387 | if (time_after(jiffies, curr_timeout) && | ||
388 | (midQ->midState == MID_REQUEST_SUBMITTED) && | ||
389 | ((ses->server->tcpStatus == CifsGood) || | ||
390 | (ses->server->tcpStatus == CifsNew))) { | ||
391 | |||
392 | unsigned long lrt; | ||
393 | |||
394 | /* We timed out. Is the server still | ||
395 | sending replies ? */ | ||
396 | spin_lock(&GlobalMid_Lock); | ||
397 | lrt = ses->server->lstrp; | ||
398 | spin_unlock(&GlobalMid_Lock); | ||
399 | |||
400 | /* Calculate time_to_wait past last receive time. | ||
401 | Although we prefer not to time out if the | ||
402 | server is still responding - we will time | ||
403 | out if the server takes more than 15 (or 45 | ||
404 | or 180) seconds to respond to this request | ||
405 | and has not responded to any request from | ||
406 | other threads on the client within 10 seconds */ | ||
407 | lrt += time_to_wait; | ||
408 | if (time_after(jiffies, lrt)) { | ||
409 | /* No replies for time_to_wait. */ | ||
410 | cERROR(1,("server not responding")); | ||
411 | return -1; | ||
412 | } | ||
413 | } else { | ||
414 | return 0; | ||
415 | } | ||
416 | } | ||
417 | } | ||
418 | |||
419 | int | ||
420 | SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | ||
421 | struct kvec *iov, int n_vec, int * pRespBufType /* ret */, | ||
422 | const int long_op) | ||
423 | { | ||
424 | int rc = 0; | ||
425 | unsigned int receive_len; | ||
426 | unsigned long timeout; | ||
427 | struct mid_q_entry *midQ; | ||
428 | struct smb_hdr *in_buf = iov[0].iov_base; | ||
429 | |||
430 | *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ | ||
431 | |||
432 | if ((ses == NULL) || (ses->server == NULL)) { | ||
433 | cifs_small_buf_release(in_buf); | ||
434 | cERROR(1,("Null session")); | ||
435 | return -EIO; | ||
436 | } | ||
437 | |||
438 | if(ses->server->tcpStatus == CifsExiting) { | ||
439 | cifs_small_buf_release(in_buf); | ||
440 | return -ENOENT; | ||
441 | } | ||
442 | |||
443 | /* Ensure that we do not send more than 50 overlapping requests | ||
444 | to the same server. We may make this configurable later or | ||
445 | use ses->maxReq */ | ||
446 | |||
447 | rc = wait_for_free_request(ses, long_op); | ||
448 | if (rc) { | ||
449 | cifs_small_buf_release(in_buf); | ||
450 | return rc; | ||
451 | } | ||
452 | |||
453 | /* make sure that we sign in the same order that we send on this socket | ||
454 | and avoid races inside tcp sendmsg code that could cause corruption | ||
455 | of smb data */ | ||
456 | |||
457 | down(&ses->server->tcpSem); | ||
458 | |||
459 | rc = allocate_mid(ses, in_buf, &midQ); | ||
460 | if (rc) { | ||
387 | up(&ses->server->tcpSem); | 461 | up(&ses->server->tcpSem); |
388 | cifs_small_buf_release(in_buf); | 462 | cifs_small_buf_release(in_buf); |
389 | /* If not lock req, update # of requests on wire to server */ | 463 | /* Update # of requests on wire to server */ |
390 | if(long_op < 3) { | 464 | atomic_dec(&ses->server->inFlight); |
391 | atomic_dec(&ses->server->inFlight); | 465 | wake_up(&ses->server->request_q); |
392 | wake_up(&ses->server->request_q); | 466 | return rc; |
393 | } | ||
394 | return -ENOMEM; | ||
395 | } | 467 | } |
396 | 468 | ||
397 | rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); | 469 | rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); |
@@ -406,32 +478,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
406 | atomic_dec(&ses->server->inSend); | 478 | atomic_dec(&ses->server->inSend); |
407 | midQ->when_sent = jiffies; | 479 | midQ->when_sent = jiffies; |
408 | #endif | 480 | #endif |
409 | if(rc < 0) { | 481 | |
410 | DeleteMidQEntry(midQ); | 482 | up(&ses->server->tcpSem); |
411 | up(&ses->server->tcpSem); | 483 | cifs_small_buf_release(in_buf); |
412 | cifs_small_buf_release(in_buf); | 484 | |
413 | /* If not lock req, update # of requests on wire to server */ | 485 | if(rc < 0) |
414 | if(long_op < 3) { | 486 | goto out; |
415 | atomic_dec(&ses->server->inFlight); | ||
416 | wake_up(&ses->server->request_q); | ||
417 | } | ||
418 | return rc; | ||
419 | } else { | ||
420 | up(&ses->server->tcpSem); | ||
421 | cifs_small_buf_release(in_buf); | ||
422 | } | ||
423 | 487 | ||
424 | if (long_op == -1) | 488 | if (long_op == -1) |
425 | goto cifs_no_response_exit2; | 489 | goto out; |
426 | else if (long_op == 2) /* writes past end of file can take loong time */ | 490 | else if (long_op == 2) /* writes past end of file can take loong time */ |
427 | timeout = 180 * HZ; | 491 | timeout = 180 * HZ; |
428 | else if (long_op == 1) | 492 | else if (long_op == 1) |
429 | timeout = 45 * HZ; /* should be greater than | 493 | timeout = 45 * HZ; /* should be greater than |
430 | servers oplock break timeout (about 43 seconds) */ | 494 | servers oplock break timeout (about 43 seconds) */ |
431 | else if (long_op > 2) { | 495 | else |
432 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
433 | } else | ||
434 | timeout = 15 * HZ; | 496 | timeout = 15 * HZ; |
497 | |||
435 | /* wait for 15 seconds or until woken up due to response arriving or | 498 | /* wait for 15 seconds or until woken up due to response arriving or |
436 | due to last connection to this server being unmounted */ | 499 | due to last connection to this server being unmounted */ |
437 | if (signal_pending(current)) { | 500 | if (signal_pending(current)) { |
@@ -441,19 +504,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
441 | } | 504 | } |
442 | 505 | ||
443 | /* No user interrupts in wait - wreaks havoc with performance */ | 506 | /* No user interrupts in wait - wreaks havoc with performance */ |
444 | if(timeout != MAX_SCHEDULE_TIMEOUT) { | 507 | wait_for_response(ses, midQ, timeout, 10 * HZ); |
445 | timeout += jiffies; | ||
446 | wait_event(ses->server->response_q, | ||
447 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | ||
448 | time_after(jiffies, timeout) || | ||
449 | ((ses->server->tcpStatus != CifsGood) && | ||
450 | (ses->server->tcpStatus != CifsNew))); | ||
451 | } else { | ||
452 | wait_event(ses->server->response_q, | ||
453 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | ||
454 | ((ses->server->tcpStatus != CifsGood) && | ||
455 | (ses->server->tcpStatus != CifsNew))); | ||
456 | } | ||
457 | 508 | ||
458 | spin_lock(&GlobalMid_Lock); | 509 | spin_lock(&GlobalMid_Lock); |
459 | if (midQ->resp_buf) { | 510 | if (midQ->resp_buf) { |
@@ -481,11 +532,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
481 | } | 532 | } |
482 | spin_unlock(&GlobalMid_Lock); | 533 | spin_unlock(&GlobalMid_Lock); |
483 | DeleteMidQEntry(midQ); | 534 | DeleteMidQEntry(midQ); |
484 | /* If not lock req, update # of requests on wire to server */ | 535 | /* Update # of requests on wire to server */ |
485 | if(long_op < 3) { | 536 | atomic_dec(&ses->server->inFlight); |
486 | atomic_dec(&ses->server->inFlight); | 537 | wake_up(&ses->server->request_q); |
487 | wake_up(&ses->server->request_q); | ||
488 | } | ||
489 | return rc; | 538 | return rc; |
490 | } | 539 | } |
491 | 540 | ||
@@ -536,24 +585,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
536 | cFYI(1,("Bad MID state?")); | 585 | cFYI(1,("Bad MID state?")); |
537 | } | 586 | } |
538 | } | 587 | } |
539 | cifs_no_response_exit2: | ||
540 | DeleteMidQEntry(midQ); | ||
541 | |||
542 | if(long_op < 3) { | ||
543 | atomic_dec(&ses->server->inFlight); | ||
544 | wake_up(&ses->server->request_q); | ||
545 | } | ||
546 | 588 | ||
547 | return rc; | 589 | out: |
548 | 590 | ||
549 | out_unlock2: | 591 | DeleteMidQEntry(midQ); |
550 | up(&ses->server->tcpSem); | 592 | atomic_dec(&ses->server->inFlight); |
551 | cifs_small_buf_release(in_buf); | 593 | wake_up(&ses->server->request_q); |
552 | /* If not lock req, update # of requests on wire to server */ | ||
553 | if(long_op < 3) { | ||
554 | atomic_dec(&ses->server->inFlight); | ||
555 | wake_up(&ses->server->request_q); | ||
556 | } | ||
557 | 594 | ||
558 | return rc; | 595 | return rc; |
559 | } | 596 | } |
@@ -583,85 +620,34 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
583 | /* Ensure that we do not send more than 50 overlapping requests | 620 | /* Ensure that we do not send more than 50 overlapping requests |
584 | to the same server. We may make this configurable later or | 621 | to the same server. We may make this configurable later or |
585 | use ses->maxReq */ | 622 | use ses->maxReq */ |
586 | if(long_op == -1) { | ||
587 | /* oplock breaks must not be held up */ | ||
588 | atomic_inc(&ses->server->inFlight); | ||
589 | } else { | ||
590 | spin_lock(&GlobalMid_Lock); | ||
591 | while(1) { | ||
592 | if(atomic_read(&ses->server->inFlight) >= | ||
593 | cifs_max_pending){ | ||
594 | spin_unlock(&GlobalMid_Lock); | ||
595 | #ifdef CONFIG_CIFS_STATS2 | ||
596 | atomic_inc(&ses->server->num_waiters); | ||
597 | #endif | ||
598 | wait_event(ses->server->request_q, | ||
599 | atomic_read(&ses->server->inFlight) | ||
600 | < cifs_max_pending); | ||
601 | #ifdef CONFIG_CIFS_STATS2 | ||
602 | atomic_dec(&ses->server->num_waiters); | ||
603 | #endif | ||
604 | spin_lock(&GlobalMid_Lock); | ||
605 | } else { | ||
606 | if(ses->server->tcpStatus == CifsExiting) { | ||
607 | spin_unlock(&GlobalMid_Lock); | ||
608 | return -ENOENT; | ||
609 | } | ||
610 | 623 | ||
611 | /* can not count locking commands against total since | 624 | rc = wait_for_free_request(ses, long_op); |
612 | they are allowed to block on server */ | 625 | if (rc) |
613 | 626 | return rc; | |
614 | if(long_op < 3) { | 627 | |
615 | /* update # of requests on the wire to server */ | ||
616 | atomic_inc(&ses->server->inFlight); | ||
617 | } | ||
618 | spin_unlock(&GlobalMid_Lock); | ||
619 | break; | ||
620 | } | ||
621 | } | ||
622 | } | ||
623 | /* make sure that we sign in the same order that we send on this socket | 628 | /* make sure that we sign in the same order that we send on this socket |
624 | and avoid races inside tcp sendmsg code that could cause corruption | 629 | and avoid races inside tcp sendmsg code that could cause corruption |
625 | of smb data */ | 630 | of smb data */ |
626 | 631 | ||
627 | down(&ses->server->tcpSem); | 632 | down(&ses->server->tcpSem); |
628 | 633 | ||
629 | if (ses->server->tcpStatus == CifsExiting) { | 634 | rc = allocate_mid(ses, in_buf, &midQ); |
630 | rc = -ENOENT; | 635 | if (rc) { |
631 | goto out_unlock; | ||
632 | } else if (ses->server->tcpStatus == CifsNeedReconnect) { | ||
633 | cFYI(1,("tcp session dead - return to caller to retry")); | ||
634 | rc = -EAGAIN; | ||
635 | goto out_unlock; | ||
636 | } else if (ses->status != CifsGood) { | ||
637 | /* check if SMB session is bad because we are setting it up */ | ||
638 | if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && | ||
639 | (in_buf->Command != SMB_COM_NEGOTIATE)) { | ||
640 | rc = -EAGAIN; | ||
641 | goto out_unlock; | ||
642 | } /* else ok - we are setting up session */ | ||
643 | } | ||
644 | midQ = AllocMidQEntry(in_buf, ses); | ||
645 | if (midQ == NULL) { | ||
646 | up(&ses->server->tcpSem); | 636 | up(&ses->server->tcpSem); |
647 | /* If not lock req, update # of requests on wire to server */ | 637 | /* Update # of requests on wire to server */ |
648 | if(long_op < 3) { | 638 | atomic_dec(&ses->server->inFlight); |
649 | atomic_dec(&ses->server->inFlight); | 639 | wake_up(&ses->server->request_q); |
650 | wake_up(&ses->server->request_q); | 640 | return rc; |
651 | } | ||
652 | return -ENOMEM; | ||
653 | } | 641 | } |
654 | 642 | ||
655 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | 643 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
656 | up(&ses->server->tcpSem); | ||
657 | cERROR(1, ("Illegal length, greater than maximum frame, %d", | 644 | cERROR(1, ("Illegal length, greater than maximum frame, %d", |
658 | in_buf->smb_buf_length)); | 645 | in_buf->smb_buf_length)); |
659 | DeleteMidQEntry(midQ); | 646 | DeleteMidQEntry(midQ); |
660 | /* If not lock req, update # of requests on wire to server */ | 647 | up(&ses->server->tcpSem); |
661 | if(long_op < 3) { | 648 | /* Update # of requests on wire to server */ |
662 | atomic_dec(&ses->server->inFlight); | 649 | atomic_dec(&ses->server->inFlight); |
663 | wake_up(&ses->server->request_q); | 650 | wake_up(&ses->server->request_q); |
664 | } | ||
665 | return -EIO; | 651 | return -EIO; |
666 | } | 652 | } |
667 | 653 | ||
@@ -677,27 +663,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
677 | atomic_dec(&ses->server->inSend); | 663 | atomic_dec(&ses->server->inSend); |
678 | midQ->when_sent = jiffies; | 664 | midQ->when_sent = jiffies; |
679 | #endif | 665 | #endif |
680 | if(rc < 0) { | 666 | up(&ses->server->tcpSem); |
681 | DeleteMidQEntry(midQ); | 667 | |
682 | up(&ses->server->tcpSem); | 668 | if(rc < 0) |
683 | /* If not lock req, update # of requests on wire to server */ | 669 | goto out; |
684 | if(long_op < 3) { | 670 | |
685 | atomic_dec(&ses->server->inFlight); | ||
686 | wake_up(&ses->server->request_q); | ||
687 | } | ||
688 | return rc; | ||
689 | } else | ||
690 | up(&ses->server->tcpSem); | ||
691 | if (long_op == -1) | 671 | if (long_op == -1) |
692 | goto cifs_no_response_exit; | 672 | goto out; |
693 | else if (long_op == 2) /* writes past end of file can take loong time */ | 673 | else if (long_op == 2) /* writes past end of file can take loong time */ |
694 | timeout = 180 * HZ; | 674 | timeout = 180 * HZ; |
695 | else if (long_op == 1) | 675 | else if (long_op == 1) |
696 | timeout = 45 * HZ; /* should be greater than | 676 | timeout = 45 * HZ; /* should be greater than |
697 | servers oplock break timeout (about 43 seconds) */ | 677 | servers oplock break timeout (about 43 seconds) */ |
698 | else if (long_op > 2) { | 678 | else |
699 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
700 | } else | ||
701 | timeout = 15 * HZ; | 679 | timeout = 15 * HZ; |
702 | /* wait for 15 seconds or until woken up due to response arriving or | 680 | /* wait for 15 seconds or until woken up due to response arriving or |
703 | due to last connection to this server being unmounted */ | 681 | due to last connection to this server being unmounted */ |
@@ -708,19 +686,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
708 | } | 686 | } |
709 | 687 | ||
710 | /* No user interrupts in wait - wreaks havoc with performance */ | 688 | /* No user interrupts in wait - wreaks havoc with performance */ |
711 | if(timeout != MAX_SCHEDULE_TIMEOUT) { | 689 | wait_for_response(ses, midQ, timeout, 10 * HZ); |
712 | timeout += jiffies; | ||
713 | wait_event(ses->server->response_q, | ||
714 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | ||
715 | time_after(jiffies, timeout) || | ||
716 | ((ses->server->tcpStatus != CifsGood) && | ||
717 | (ses->server->tcpStatus != CifsNew))); | ||
718 | } else { | ||
719 | wait_event(ses->server->response_q, | ||
720 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | ||
721 | ((ses->server->tcpStatus != CifsGood) && | ||
722 | (ses->server->tcpStatus != CifsNew))); | ||
723 | } | ||
724 | 690 | ||
725 | spin_lock(&GlobalMid_Lock); | 691 | spin_lock(&GlobalMid_Lock); |
726 | if (midQ->resp_buf) { | 692 | if (midQ->resp_buf) { |
@@ -748,11 +714,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
748 | } | 714 | } |
749 | spin_unlock(&GlobalMid_Lock); | 715 | spin_unlock(&GlobalMid_Lock); |
750 | DeleteMidQEntry(midQ); | 716 | DeleteMidQEntry(midQ); |
751 | /* If not lock req, update # of requests on wire to server */ | 717 | /* Update # of requests on wire to server */ |
752 | if(long_op < 3) { | 718 | atomic_dec(&ses->server->inFlight); |
753 | atomic_dec(&ses->server->inFlight); | 719 | wake_up(&ses->server->request_q); |
754 | wake_up(&ses->server->request_q); | ||
755 | } | ||
756 | return rc; | 720 | return rc; |
757 | } | 721 | } |
758 | 722 | ||
@@ -799,23 +763,253 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
799 | cERROR(1,("Bad MID state?")); | 763 | cERROR(1,("Bad MID state?")); |
800 | } | 764 | } |
801 | } | 765 | } |
802 | cifs_no_response_exit: | 766 | |
767 | out: | ||
768 | |||
803 | DeleteMidQEntry(midQ); | 769 | DeleteMidQEntry(midQ); |
770 | atomic_dec(&ses->server->inFlight); | ||
771 | wake_up(&ses->server->request_q); | ||
804 | 772 | ||
805 | if(long_op < 3) { | 773 | return rc; |
806 | atomic_dec(&ses->server->inFlight); | 774 | } |
807 | wake_up(&ses->server->request_q); | 775 | |
808 | } | 776 | /* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */ |
777 | |||
778 | static int | ||
779 | send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf, | ||
780 | struct mid_q_entry *midQ) | ||
781 | { | ||
782 | int rc = 0; | ||
783 | struct cifsSesInfo *ses = tcon->ses; | ||
784 | __u16 mid = in_buf->Mid; | ||
809 | 785 | ||
786 | header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0); | ||
787 | in_buf->Mid = mid; | ||
788 | down(&ses->server->tcpSem); | ||
789 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); | ||
790 | if (rc) { | ||
791 | up(&ses->server->tcpSem); | ||
792 | return rc; | ||
793 | } | ||
794 | rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, | ||
795 | (struct sockaddr *) &(ses->server->addr.sockAddr)); | ||
796 | up(&ses->server->tcpSem); | ||
810 | return rc; | 797 | return rc; |
798 | } | ||
799 | |||
800 | /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows | ||
801 | blocking lock to return. */ | ||
802 | |||
803 | static int | ||
804 | send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, | ||
805 | struct smb_hdr *in_buf, | ||
806 | struct smb_hdr *out_buf) | ||
807 | { | ||
808 | int bytes_returned; | ||
809 | struct cifsSesInfo *ses = tcon->ses; | ||
810 | LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; | ||
811 | |||
812 | /* We just modify the current in_buf to change | ||
813 | the type of lock from LOCKING_ANDX_SHARED_LOCK | ||
814 | or LOCKING_ANDX_EXCLUSIVE_LOCK to | ||
815 | LOCKING_ANDX_CANCEL_LOCK. */ | ||
816 | |||
817 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; | ||
818 | pSMB->Timeout = 0; | ||
819 | pSMB->hdr.Mid = GetNextMid(ses->server); | ||
820 | |||
821 | return SendReceive(xid, ses, in_buf, out_buf, | ||
822 | &bytes_returned, 0); | ||
823 | } | ||
811 | 824 | ||
812 | out_unlock: | 825 | int |
826 | SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | ||
827 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | ||
828 | int *pbytes_returned) | ||
829 | { | ||
830 | int rc = 0; | ||
831 | int rstart = 0; | ||
832 | unsigned int receive_len; | ||
833 | struct mid_q_entry *midQ; | ||
834 | struct cifsSesInfo *ses; | ||
835 | |||
836 | if (tcon == NULL || tcon->ses == NULL) { | ||
837 | cERROR(1,("Null smb session")); | ||
838 | return -EIO; | ||
839 | } | ||
840 | ses = tcon->ses; | ||
841 | |||
842 | if(ses->server == NULL) { | ||
843 | cERROR(1,("Null tcp session")); | ||
844 | return -EIO; | ||
845 | } | ||
846 | |||
847 | if(ses->server->tcpStatus == CifsExiting) | ||
848 | return -ENOENT; | ||
849 | |||
850 | /* Ensure that we do not send more than 50 overlapping requests | ||
851 | to the same server. We may make this configurable later or | ||
852 | use ses->maxReq */ | ||
853 | |||
854 | rc = wait_for_free_request(ses, 3); | ||
855 | if (rc) | ||
856 | return rc; | ||
857 | |||
858 | /* make sure that we sign in the same order that we send on this socket | ||
859 | and avoid races inside tcp sendmsg code that could cause corruption | ||
860 | of smb data */ | ||
861 | |||
862 | down(&ses->server->tcpSem); | ||
863 | |||
864 | rc = allocate_mid(ses, in_buf, &midQ); | ||
865 | if (rc) { | ||
866 | up(&ses->server->tcpSem); | ||
867 | return rc; | ||
868 | } | ||
869 | |||
870 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | ||
871 | up(&ses->server->tcpSem); | ||
872 | cERROR(1, ("Illegal length, greater than maximum frame, %d", | ||
873 | in_buf->smb_buf_length)); | ||
874 | DeleteMidQEntry(midQ); | ||
875 | return -EIO; | ||
876 | } | ||
877 | |||
878 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); | ||
879 | |||
880 | midQ->midState = MID_REQUEST_SUBMITTED; | ||
881 | #ifdef CONFIG_CIFS_STATS2 | ||
882 | atomic_inc(&ses->server->inSend); | ||
883 | #endif | ||
884 | rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, | ||
885 | (struct sockaddr *) &(ses->server->addr.sockAddr)); | ||
886 | #ifdef CONFIG_CIFS_STATS2 | ||
887 | atomic_dec(&ses->server->inSend); | ||
888 | midQ->when_sent = jiffies; | ||
889 | #endif | ||
813 | up(&ses->server->tcpSem); | 890 | up(&ses->server->tcpSem); |
814 | /* If not lock req, update # of requests on wire to server */ | 891 | |
815 | if(long_op < 3) { | 892 | if(rc < 0) { |
816 | atomic_dec(&ses->server->inFlight); | 893 | DeleteMidQEntry(midQ); |
817 | wake_up(&ses->server->request_q); | 894 | return rc; |
895 | } | ||
896 | |||
897 | /* Wait for a reply - allow signals to interrupt. */ | ||
898 | rc = wait_event_interruptible(ses->server->response_q, | ||
899 | (!(midQ->midState == MID_REQUEST_SUBMITTED)) || | ||
900 | ((ses->server->tcpStatus != CifsGood) && | ||
901 | (ses->server->tcpStatus != CifsNew))); | ||
902 | |||
903 | /* Were we interrupted by a signal ? */ | ||
904 | if ((rc == -ERESTARTSYS) && | ||
905 | (midQ->midState == MID_REQUEST_SUBMITTED) && | ||
906 | ((ses->server->tcpStatus == CifsGood) || | ||
907 | (ses->server->tcpStatus == CifsNew))) { | ||
908 | |||
909 | if (in_buf->Command == SMB_COM_TRANSACTION2) { | ||
910 | /* POSIX lock. We send a NT_CANCEL SMB to cause the | ||
911 | blocking lock to return. */ | ||
912 | |||
913 | rc = send_nt_cancel(tcon, in_buf, midQ); | ||
914 | if (rc) { | ||
915 | DeleteMidQEntry(midQ); | ||
916 | return rc; | ||
917 | } | ||
918 | } else { | ||
919 | /* Windows lock. We send a LOCKINGX_CANCEL_LOCK | ||
920 | to cause the blocking lock to return. */ | ||
921 | |||
922 | rc = send_lock_cancel(xid, tcon, in_buf, out_buf); | ||
923 | |||
924 | /* If we get -ENOLCK back the lock may have | ||
925 | already been removed. Don't exit in this case. */ | ||
926 | if (rc && rc != -ENOLCK) { | ||
927 | DeleteMidQEntry(midQ); | ||
928 | return rc; | ||
929 | } | ||
930 | } | ||
931 | |||
932 | /* Wait 5 seconds for the response. */ | ||
933 | if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) { | ||
934 | /* We got the response - restart system call. */ | ||
935 | rstart = 1; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | spin_lock(&GlobalMid_Lock); | ||
940 | if (midQ->resp_buf) { | ||
941 | spin_unlock(&GlobalMid_Lock); | ||
942 | receive_len = midQ->resp_buf->smb_buf_length; | ||
943 | } else { | ||
944 | cERROR(1,("No response for cmd %d mid %d", | ||
945 | midQ->command, midQ->mid)); | ||
946 | if(midQ->midState == MID_REQUEST_SUBMITTED) { | ||
947 | if(ses->server->tcpStatus == CifsExiting) | ||
948 | rc = -EHOSTDOWN; | ||
949 | else { | ||
950 | ses->server->tcpStatus = CifsNeedReconnect; | ||
951 | midQ->midState = MID_RETRY_NEEDED; | ||
952 | } | ||
953 | } | ||
954 | |||
955 | if (rc != -EHOSTDOWN) { | ||
956 | if(midQ->midState == MID_RETRY_NEEDED) { | ||
957 | rc = -EAGAIN; | ||
958 | cFYI(1,("marking request for retry")); | ||
959 | } else { | ||
960 | rc = -EIO; | ||
961 | } | ||
962 | } | ||
963 | spin_unlock(&GlobalMid_Lock); | ||
964 | DeleteMidQEntry(midQ); | ||
965 | return rc; | ||
818 | } | 966 | } |
967 | |||
968 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | ||
969 | cERROR(1, ("Frame too large received. Length: %d Xid: %d", | ||
970 | receive_len, xid)); | ||
971 | rc = -EIO; | ||
972 | } else { /* rcvd frame is ok */ | ||
973 | |||
974 | if (midQ->resp_buf && out_buf | ||
975 | && (midQ->midState == MID_RESPONSE_RECEIVED)) { | ||
976 | out_buf->smb_buf_length = receive_len; | ||
977 | memcpy((char *)out_buf + 4, | ||
978 | (char *)midQ->resp_buf + 4, | ||
979 | receive_len); | ||
980 | |||
981 | dump_smb(out_buf, 92); | ||
982 | /* convert the length into a more usable form */ | ||
983 | if((receive_len > 24) && | ||
984 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | ||
985 | SECMODE_SIGN_ENABLED))) { | ||
986 | rc = cifs_verify_signature(out_buf, | ||
987 | ses->server->mac_signing_key, | ||
988 | midQ->sequence_number+1); | ||
989 | if(rc) { | ||
990 | cERROR(1,("Unexpected SMB signature")); | ||
991 | /* BB FIXME add code to kill session */ | ||
992 | } | ||
993 | } | ||
994 | |||
995 | *pbytes_returned = out_buf->smb_buf_length; | ||
996 | |||
997 | /* BB special case reconnect tid and uid here? */ | ||
998 | rc = map_smb_to_linux_error(out_buf); | ||
819 | 999 | ||
1000 | /* convert ByteCount if necessary */ | ||
1001 | if (receive_len >= | ||
1002 | sizeof (struct smb_hdr) - | ||
1003 | 4 /* do not count RFC1001 header */ + | ||
1004 | (2 * out_buf->WordCount) + 2 /* bcc */ ) | ||
1005 | BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); | ||
1006 | } else { | ||
1007 | rc = -EIO; | ||
1008 | cERROR(1,("Bad MID state?")); | ||
1009 | } | ||
1010 | } | ||
1011 | DeleteMidQEntry(midQ); | ||
1012 | if (rstart && rc == -EACCES) | ||
1013 | return -ERESTARTSYS; | ||
820 | return rc; | 1014 | return rc; |
821 | } | 1015 | } |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 7754d641775e..067648b7179b 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -330,11 +330,15 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) | |||
330 | sb = direntry->d_inode->i_sb; | 330 | sb = direntry->d_inode->i_sb; |
331 | if(sb == NULL) | 331 | if(sb == NULL) |
332 | return -EIO; | 332 | return -EIO; |
333 | xid = GetXid(); | ||
334 | 333 | ||
335 | cifs_sb = CIFS_SB(sb); | 334 | cifs_sb = CIFS_SB(sb); |
336 | pTcon = cifs_sb->tcon; | 335 | pTcon = cifs_sb->tcon; |
337 | 336 | ||
337 | if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | ||
338 | return -EOPNOTSUPP; | ||
339 | |||
340 | xid = GetXid(); | ||
341 | |||
338 | full_path = build_path_from_dentry(direntry); | 342 | full_path = build_path_from_dentry(direntry); |
339 | if(full_path == NULL) { | 343 | if(full_path == NULL) { |
340 | FreeXid(xid); | 344 | FreeXid(xid); |
diff --git a/fs/coda/file.c b/fs/coda/file.c index cc66c681bd11..dbfbcfa5b3c0 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c | |||
@@ -136,10 +136,8 @@ int coda_open(struct inode *coda_inode, struct file *coda_file) | |||
136 | coda_vfs_stat.open++; | 136 | coda_vfs_stat.open++; |
137 | 137 | ||
138 | cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); | 138 | cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); |
139 | if (!cfi) { | 139 | if (!cfi) |
140 | unlock_kernel(); | ||
141 | return -ENOMEM; | 140 | return -ENOMEM; |
142 | } | ||
143 | 141 | ||
144 | lock_kernel(); | 142 | lock_kernel(); |
145 | 143 | ||
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c index e249cf733a6b..1d30d2ff440f 100644 --- a/fs/efs/symlink.c +++ b/fs/efs/symlink.c | |||
@@ -22,7 +22,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page) | |||
22 | 22 | ||
23 | err = -ENAMETOOLONG; | 23 | err = -ENAMETOOLONG; |
24 | if (size > 2 * EFS_BLOCKSIZE) | 24 | if (size > 2 * EFS_BLOCKSIZE) |
25 | goto fail; | 25 | goto fail_notlocked; |
26 | 26 | ||
27 | lock_kernel(); | 27 | lock_kernel(); |
28 | /* read first 512 bytes of link target */ | 28 | /* read first 512 bytes of link target */ |
@@ -47,6 +47,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page) | |||
47 | return 0; | 47 | return 0; |
48 | fail: | 48 | fail: |
49 | unlock_kernel(); | 49 | unlock_kernel(); |
50 | fail_notlocked: | ||
50 | SetPageError(page); | 51 | SetPageError(page); |
51 | kunmap(page); | 52 | kunmap(page); |
52 | unlock_page(page); | 53 | unlock_page(page); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 19ffb043abbc..3a3567433b92 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi) | |||
1168 | eexit_1: | 1168 | eexit_1: |
1169 | 1169 | ||
1170 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", | 1170 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", |
1171 | current, ep, epi->file, error)); | 1171 | current, ep, epi->ffd.file, error)); |
1172 | 1172 | ||
1173 | return error; | 1173 | return error; |
1174 | } | 1174 | } |
@@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
1236 | struct eventpoll *ep = epi->ep; | 1236 | struct eventpoll *ep = epi->ep; |
1237 | 1237 | ||
1238 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", | 1238 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", |
1239 | current, epi->file, epi, ep)); | 1239 | current, epi->ffd.file, epi, ep)); |
1240 | 1240 | ||
1241 | write_lock_irqsave(&ep->lock, flags); | 1241 | write_lock_irqsave(&ep->lock, flags); |
1242 | 1242 | ||
@@ -486,8 +486,6 @@ struct file *open_exec(const char *name) | |||
486 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && | 486 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && |
487 | S_ISREG(inode->i_mode)) { | 487 | S_ISREG(inode->i_mode)) { |
488 | int err = vfs_permission(&nd, MAY_EXEC); | 488 | int err = vfs_permission(&nd, MAY_EXEC); |
489 | if (!err && !(inode->i_mode & 0111)) | ||
490 | err = -EACCES; | ||
491 | file = ERR_PTR(err); | 489 | file = ERR_PTR(err); |
492 | if (!err) { | 490 | if (!err) { |
493 | file = nameidata_to_filp(&nd, O_RDONLY); | 491 | file = nameidata_to_filp(&nd, O_RDONLY); |
@@ -753,7 +751,7 @@ no_thread_group: | |||
753 | 751 | ||
754 | write_lock_irq(&tasklist_lock); | 752 | write_lock_irq(&tasklist_lock); |
755 | spin_lock(&oldsighand->siglock); | 753 | spin_lock(&oldsighand->siglock); |
756 | spin_lock(&newsighand->siglock); | 754 | spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); |
757 | 755 | ||
758 | rcu_assign_pointer(current->sighand, newsighand); | 756 | rcu_assign_pointer(current->sighand, newsighand); |
759 | recalc_sigpending(); | 757 | recalc_sigpending(); |
@@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm) | |||
922 | int retval; | 920 | int retval; |
923 | 921 | ||
924 | mode = inode->i_mode; | 922 | mode = inode->i_mode; |
925 | /* | ||
926 | * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, | ||
927 | * generic_permission lets a non-executable through | ||
928 | */ | ||
929 | if (!(mode & 0111)) /* with at least _one_ execute bit set */ | ||
930 | return -EACCES; | ||
931 | if (bprm->file->f_op == NULL) | 923 | if (bprm->file->f_op == NULL) |
932 | return -EACCES; | 924 | return -EACCES; |
933 | 925 | ||
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f2702cda9779..681dea8f9532 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
@@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
775 | if (EXT2_INODE_SIZE(sb) == 0) | 775 | if (EXT2_INODE_SIZE(sb) == 0) |
776 | goto cantfind_ext2; | 776 | goto cantfind_ext2; |
777 | sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); | 777 | sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); |
778 | if (sbi->s_inodes_per_block == 0) | 778 | if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) |
779 | goto cantfind_ext2; | 779 | goto cantfind_ext2; |
780 | sbi->s_itb_per_group = sbi->s_inodes_per_group / | 780 | sbi->s_itb_per_group = sbi->s_inodes_per_group / |
781 | sbi->s_inodes_per_block; | 781 | sbi->s_inodes_per_block; |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a504a40d6d29..063d994bda0b 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, | |||
1269 | goal = le32_to_cpu(es->s_first_data_block); | 1269 | goal = le32_to_cpu(es->s_first_data_block); |
1270 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | 1270 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / |
1271 | EXT3_BLOCKS_PER_GROUP(sb); | 1271 | EXT3_BLOCKS_PER_GROUP(sb); |
1272 | goal_group = group_no; | ||
1273 | retry_alloc: | ||
1272 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 1274 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); |
1273 | if (!gdp) | 1275 | if (!gdp) |
1274 | goto io_error; | 1276 | goto io_error; |
1275 | 1277 | ||
1276 | goal_group = group_no; | ||
1277 | retry: | ||
1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | 1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); |
1279 | /* | 1279 | /* |
1280 | * if there is not enough free blocks to make a new resevation | 1280 | * if there is not enough free blocks to make a new resevation |
@@ -1349,7 +1349,7 @@ retry: | |||
1349 | if (my_rsv) { | 1349 | if (my_rsv) { |
1350 | my_rsv = NULL; | 1350 | my_rsv = NULL; |
1351 | group_no = goal_group; | 1351 | group_no = goal_group; |
1352 | goto retry; | 1352 | goto retry_alloc; |
1353 | } | 1353 | } |
1354 | /* No space left on the device */ | 1354 | /* No space left on the device */ |
1355 | *errp = -ENOSPC; | 1355 | *errp = -ENOSPC; |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f804d5e9d60c..c5ee9f0691e3 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -1158,7 +1158,7 @@ retry: | |||
1158 | ret = PTR_ERR(handle); | 1158 | ret = PTR_ERR(handle); |
1159 | goto out; | 1159 | goto out; |
1160 | } | 1160 | } |
1161 | if (test_opt(inode->i_sb, NOBH)) | 1161 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) |
1162 | ret = nobh_prepare_write(page, from, to, ext3_get_block); | 1162 | ret = nobh_prepare_write(page, from, to, ext3_get_block); |
1163 | else | 1163 | else |
1164 | ret = block_prepare_write(page, from, to, ext3_get_block); | 1164 | ret = block_prepare_write(page, from, to, ext3_get_block); |
@@ -1244,7 +1244,7 @@ static int ext3_writeback_commit_write(struct file *file, struct page *page, | |||
1244 | if (new_i_size > EXT3_I(inode)->i_disksize) | 1244 | if (new_i_size > EXT3_I(inode)->i_disksize) |
1245 | EXT3_I(inode)->i_disksize = new_i_size; | 1245 | EXT3_I(inode)->i_disksize = new_i_size; |
1246 | 1246 | ||
1247 | if (test_opt(inode->i_sb, NOBH)) | 1247 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) |
1248 | ret = nobh_commit_write(file, page, from, to); | 1248 | ret = nobh_commit_write(file, page, from, to); |
1249 | else | 1249 | else |
1250 | ret = generic_commit_write(file, page, from, to); | 1250 | ret = generic_commit_write(file, page, from, to); |
@@ -1494,7 +1494,7 @@ static int ext3_writeback_writepage(struct page *page, | |||
1494 | goto out_fail; | 1494 | goto out_fail; |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | if (test_opt(inode->i_sb, NOBH)) | 1497 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) |
1498 | ret = nobh_writepage(page, ext3_get_block, wbc); | 1498 | ret = nobh_writepage(page, ext3_get_block, wbc); |
1499 | else | 1499 | else |
1500 | ret = block_write_full_page(page, ext3_get_block, wbc); | 1500 | ret = block_write_full_page(page, ext3_get_block, wbc); |
@@ -2402,14 +2402,15 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, | |||
2402 | struct buffer_head *bh; | 2402 | struct buffer_head *bh; |
2403 | struct ext3_group_desc * gdp; | 2403 | struct ext3_group_desc * gdp; |
2404 | 2404 | ||
2405 | 2405 | if (!ext3_valid_inum(sb, ino)) { | |
2406 | if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && | 2406 | /* |
2407 | ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || | 2407 | * This error is already checked for in namei.c unless we are |
2408 | ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { | 2408 | * looking at an NFS filehandle, in which case no error |
2409 | ext3_error(sb, "ext3_get_inode_block", | 2409 | * report is needed |
2410 | "bad inode number: %lu", ino); | 2410 | */ |
2411 | return 0; | 2411 | return 0; |
2412 | } | 2412 | } |
2413 | |||
2413 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 2414 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); |
2414 | if (block_group >= EXT3_SB(sb)->s_groups_count) { | 2415 | if (block_group >= EXT3_SB(sb)->s_groups_count) { |
2415 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); | 2416 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index d9176dba3698..2aa7101b27cd 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -1000,7 +1000,12 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str | |||
1000 | if (bh) { | 1000 | if (bh) { |
1001 | unsigned long ino = le32_to_cpu(de->inode); | 1001 | unsigned long ino = le32_to_cpu(de->inode); |
1002 | brelse (bh); | 1002 | brelse (bh); |
1003 | inode = iget(dir->i_sb, ino); | 1003 | if (!ext3_valid_inum(dir->i_sb, ino)) { |
1004 | ext3_error(dir->i_sb, "ext3_lookup", | ||
1005 | "bad inode number: %lu", ino); | ||
1006 | inode = NULL; | ||
1007 | } else | ||
1008 | inode = iget(dir->i_sb, ino); | ||
1004 | 1009 | ||
1005 | if (!inode) | 1010 | if (!inode) |
1006 | return ERR_PTR(-EACCES); | 1011 | return ERR_PTR(-EACCES); |
@@ -1028,7 +1033,13 @@ struct dentry *ext3_get_parent(struct dentry *child) | |||
1028 | return ERR_PTR(-ENOENT); | 1033 | return ERR_PTR(-ENOENT); |
1029 | ino = le32_to_cpu(de->inode); | 1034 | ino = le32_to_cpu(de->inode); |
1030 | brelse(bh); | 1035 | brelse(bh); |
1031 | inode = iget(child->d_inode->i_sb, ino); | 1036 | |
1037 | if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { | ||
1038 | ext3_error(child->d_inode->i_sb, "ext3_get_parent", | ||
1039 | "bad inode number: %lu", ino); | ||
1040 | inode = NULL; | ||
1041 | } else | ||
1042 | inode = iget(child->d_inode->i_sb, ino); | ||
1032 | 1043 | ||
1033 | if (!inode) | 1044 | if (!inode) |
1034 | return ERR_PTR(-EACCES); | 1045 | return ERR_PTR(-EACCES); |
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 29cce456c7ce..43886fa00a2a 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c | |||
@@ -246,6 +246,8 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) | |||
246 | u_long page, npages, block, pblocks, nblocks, offset; | 246 | u_long page, npages, block, pblocks, nblocks, offset; |
247 | loff_t pos; | 247 | loff_t pos; |
248 | 248 | ||
249 | lock_kernel(); | ||
250 | |||
249 | switch ((long)fp->f_pos) { | 251 | switch ((long)fp->f_pos) { |
250 | case 0: | 252 | case 0: |
251 | if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) | 253 | if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) |
diff --git a/fs/fuse/control.c b/fs/fuse/control.c index a3bce3a77253..46fe60b2da23 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c | |||
@@ -105,7 +105,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, | |||
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Add a connection to the control filesystem (if it exists). Caller | 107 | * Add a connection to the control filesystem (if it exists). Caller |
108 | * must host fuse_mutex | 108 | * must hold fuse_mutex |
109 | */ | 109 | */ |
110 | int fuse_ctl_add_conn(struct fuse_conn *fc) | 110 | int fuse_ctl_add_conn(struct fuse_conn *fc) |
111 | { | 111 | { |
@@ -139,7 +139,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) | |||
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Remove a connection from the control filesystem (if it exists). | 141 | * Remove a connection from the control filesystem (if it exists). |
142 | * Caller must host fuse_mutex | 142 | * Caller must hold fuse_mutex |
143 | */ | 143 | */ |
144 | void fuse_ctl_remove_conn(struct fuse_conn *fc) | 144 | void fuse_ctl_remove_conn(struct fuse_conn *fc) |
145 | { | 145 | { |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 72a74cde6de8..409ce6a7cca4 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -14,6 +14,33 @@ | |||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/namei.h> | 15 | #include <linux/namei.h> |
16 | 16 | ||
17 | #if BITS_PER_LONG >= 64 | ||
18 | static inline void fuse_dentry_settime(struct dentry *entry, u64 time) | ||
19 | { | ||
20 | entry->d_time = time; | ||
21 | } | ||
22 | |||
23 | static inline u64 fuse_dentry_time(struct dentry *entry) | ||
24 | { | ||
25 | return entry->d_time; | ||
26 | } | ||
27 | #else | ||
28 | /* | ||
29 | * On 32 bit archs store the high 32 bits of time in d_fsdata | ||
30 | */ | ||
31 | static void fuse_dentry_settime(struct dentry *entry, u64 time) | ||
32 | { | ||
33 | entry->d_time = time; | ||
34 | entry->d_fsdata = (void *) (unsigned long) (time >> 32); | ||
35 | } | ||
36 | |||
37 | static u64 fuse_dentry_time(struct dentry *entry) | ||
38 | { | ||
39 | return (u64) entry->d_time + | ||
40 | ((u64) (unsigned long) entry->d_fsdata << 32); | ||
41 | } | ||
42 | #endif | ||
43 | |||
17 | /* | 44 | /* |
18 | * FUSE caches dentries and attributes with separate timeout. The | 45 | * FUSE caches dentries and attributes with separate timeout. The |
19 | * time in jiffies until the dentry/attributes are valid is stored in | 46 | * time in jiffies until the dentry/attributes are valid is stored in |
@@ -23,10 +50,13 @@ | |||
23 | /* | 50 | /* |
24 | * Calculate the time in jiffies until a dentry/attributes are valid | 51 | * Calculate the time in jiffies until a dentry/attributes are valid |
25 | */ | 52 | */ |
26 | static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) | 53 | static u64 time_to_jiffies(unsigned long sec, unsigned long nsec) |
27 | { | 54 | { |
28 | struct timespec ts = {sec, nsec}; | 55 | if (sec || nsec) { |
29 | return jiffies + timespec_to_jiffies(&ts); | 56 | struct timespec ts = {sec, nsec}; |
57 | return get_jiffies_64() + timespec_to_jiffies(&ts); | ||
58 | } else | ||
59 | return 0; | ||
30 | } | 60 | } |
31 | 61 | ||
32 | /* | 62 | /* |
@@ -35,7 +65,8 @@ static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) | |||
35 | */ | 65 | */ |
36 | static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) | 66 | static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) |
37 | { | 67 | { |
38 | entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec); | 68 | fuse_dentry_settime(entry, |
69 | time_to_jiffies(o->entry_valid, o->entry_valid_nsec)); | ||
39 | if (entry->d_inode) | 70 | if (entry->d_inode) |
40 | get_fuse_inode(entry->d_inode)->i_time = | 71 | get_fuse_inode(entry->d_inode)->i_time = |
41 | time_to_jiffies(o->attr_valid, o->attr_valid_nsec); | 72 | time_to_jiffies(o->attr_valid, o->attr_valid_nsec); |
@@ -47,7 +78,7 @@ static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) | |||
47 | */ | 78 | */ |
48 | void fuse_invalidate_attr(struct inode *inode) | 79 | void fuse_invalidate_attr(struct inode *inode) |
49 | { | 80 | { |
50 | get_fuse_inode(inode)->i_time = jiffies - 1; | 81 | get_fuse_inode(inode)->i_time = 0; |
51 | } | 82 | } |
52 | 83 | ||
53 | /* | 84 | /* |
@@ -60,7 +91,7 @@ void fuse_invalidate_attr(struct inode *inode) | |||
60 | */ | 91 | */ |
61 | static void fuse_invalidate_entry_cache(struct dentry *entry) | 92 | static void fuse_invalidate_entry_cache(struct dentry *entry) |
62 | { | 93 | { |
63 | entry->d_time = jiffies - 1; | 94 | fuse_dentry_settime(entry, 0); |
64 | } | 95 | } |
65 | 96 | ||
66 | /* | 97 | /* |
@@ -102,7 +133,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) | |||
102 | 133 | ||
103 | if (inode && is_bad_inode(inode)) | 134 | if (inode && is_bad_inode(inode)) |
104 | return 0; | 135 | return 0; |
105 | else if (time_after(jiffies, entry->d_time)) { | 136 | else if (fuse_dentry_time(entry) < get_jiffies_64()) { |
106 | int err; | 137 | int err; |
107 | struct fuse_entry_out outarg; | 138 | struct fuse_entry_out outarg; |
108 | struct fuse_conn *fc; | 139 | struct fuse_conn *fc; |
@@ -666,7 +697,7 @@ static int fuse_revalidate(struct dentry *entry) | |||
666 | if (!fuse_allow_task(fc, current)) | 697 | if (!fuse_allow_task(fc, current)) |
667 | return -EACCES; | 698 | return -EACCES; |
668 | if (get_node_id(inode) != FUSE_ROOT_ID && | 699 | if (get_node_id(inode) != FUSE_ROOT_ID && |
669 | time_before_eq(jiffies, fi->i_time)) | 700 | fi->i_time >= get_jiffies_64()) |
670 | return 0; | 701 | return 0; |
671 | 702 | ||
672 | return fuse_do_getattr(inode); | 703 | return fuse_do_getattr(inode); |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 63614ed16336..5c4fcd1dbf59 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, | |||
395 | struct fuse_readpages_data data; | 395 | struct fuse_readpages_data data; |
396 | int err; | 396 | int err; |
397 | 397 | ||
398 | err = -EIO; | ||
398 | if (is_bad_inode(inode)) | 399 | if (is_bad_inode(inode)) |
399 | return -EIO; | 400 | goto clean_pages_up; |
400 | 401 | ||
401 | data.file = file; | 402 | data.file = file; |
402 | data.inode = inode; | 403 | data.inode = inode; |
403 | data.req = fuse_get_req(fc); | 404 | data.req = fuse_get_req(fc); |
405 | err = PTR_ERR(data.req); | ||
404 | if (IS_ERR(data.req)) | 406 | if (IS_ERR(data.req)) |
405 | return PTR_ERR(data.req); | 407 | goto clean_pages_up; |
406 | 408 | ||
407 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | 409 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); |
408 | if (!err) { | 410 | if (!err) { |
@@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, | |||
412 | fuse_put_request(fc, data.req); | 414 | fuse_put_request(fc, data.req); |
413 | } | 415 | } |
414 | return err; | 416 | return err; |
417 | |||
418 | clean_pages_up: | ||
419 | put_pages_list(pages); | ||
420 | return err; | ||
415 | } | 421 | } |
416 | 422 | ||
417 | static size_t fuse_send_write(struct fuse_req *req, struct file *file, | 423 | static size_t fuse_send_write(struct fuse_req *req, struct file *file, |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0dbf96621841..69c7750d55b8 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -59,7 +59,7 @@ struct fuse_inode { | |||
59 | struct fuse_req *forget_req; | 59 | struct fuse_req *forget_req; |
60 | 60 | ||
61 | /** Time in jiffies until the file attributes are valid */ | 61 | /** Time in jiffies until the file attributes are valid */ |
62 | unsigned long i_time; | 62 | u64 i_time; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** FUSE specific file data */ | 65 | /** FUSE specific file data */ |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index dcaaabd3b9c4..7d25092262ae 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -51,7 +51,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) | |||
51 | return NULL; | 51 | return NULL; |
52 | 52 | ||
53 | fi = get_fuse_inode(inode); | 53 | fi = get_fuse_inode(inode); |
54 | fi->i_time = jiffies - 1; | 54 | fi->i_time = 0; |
55 | fi->nodeid = 0; | 55 | fi->nodeid = 0; |
56 | fi->nlookup = 0; | 56 | fi->nlookup = 0; |
57 | fi->forget_req = fuse_request_alloc(); | 57 | fi->forget_req = fuse_request_alloc(); |
diff --git a/fs/inotify_user.c b/fs/inotify_user.c index f2386442adee..017cb0f134d6 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c | |||
@@ -187,7 +187,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | |||
187 | { | 187 | { |
188 | struct inotify_kernel_event *kevent; | 188 | struct inotify_kernel_event *kevent; |
189 | 189 | ||
190 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | 190 | kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); |
191 | if (unlikely(!kevent)) | 191 | if (unlikely(!kevent)) |
192 | return NULL; | 192 | return NULL; |
193 | 193 | ||
diff --git a/fs/ioprio.c b/fs/ioprio.c index 93aa5715f224..78b1deae3fa2 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
@@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) | |||
44 | task->ioprio = ioprio; | 44 | task->ioprio = ioprio; |
45 | 45 | ||
46 | ioc = task->io_context; | 46 | ioc = task->io_context; |
47 | /* see wmb() in current_io_context() */ | ||
48 | smp_read_barrier_depends(); | ||
49 | |||
47 | if (ioc && ioc->set_ioprio) | 50 | if (ioc && ioc->set_ioprio) |
48 | ioc->set_ioprio(ioc, ioprio); | 51 | ioc->set_ioprio(ioc, ioprio); |
49 | 52 | ||
@@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) | |||
111 | continue; | 114 | continue; |
112 | ret = set_task_ioprio(p, ioprio); | 115 | ret = set_task_ioprio(p, ioprio); |
113 | if (ret) | 116 | if (ret) |
114 | break; | 117 | goto free_uid; |
115 | } while_each_thread(g, p); | 118 | } while_each_thread(g, p); |
116 | 119 | free_uid: | |
117 | if (who) | 120 | if (who) |
118 | free_uid(user); | 121 | free_uid(user); |
119 | break; | 122 | break; |
@@ -137,6 +140,29 @@ out: | |||
137 | return ret; | 140 | return ret; |
138 | } | 141 | } |
139 | 142 | ||
143 | int ioprio_best(unsigned short aprio, unsigned short bprio) | ||
144 | { | ||
145 | unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); | ||
146 | unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); | ||
147 | |||
148 | if (!ioprio_valid(aprio)) | ||
149 | return bprio; | ||
150 | if (!ioprio_valid(bprio)) | ||
151 | return aprio; | ||
152 | |||
153 | if (aclass == IOPRIO_CLASS_NONE) | ||
154 | aclass = IOPRIO_CLASS_BE; | ||
155 | if (bclass == IOPRIO_CLASS_NONE) | ||
156 | bclass = IOPRIO_CLASS_BE; | ||
157 | |||
158 | if (aclass == bclass) | ||
159 | return min(aprio, bprio); | ||
160 | if (aclass > bclass) | ||
161 | return bprio; | ||
162 | else | ||
163 | return aprio; | ||
164 | } | ||
165 | |||
140 | asmlinkage long sys_ioprio_get(int which, int who) | 166 | asmlinkage long sys_ioprio_get(int which, int who) |
141 | { | 167 | { |
142 | struct task_struct *g, *p; | 168 | struct task_struct *g, *p; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 0971814c38b8..42da60784311 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal) | |||
261 | struct buffer_head *bh = jh2bh(jh); | 261 | struct buffer_head *bh = jh2bh(jh); |
262 | 262 | ||
263 | jbd_lock_bh_state(bh); | 263 | jbd_lock_bh_state(bh); |
264 | kfree(jh->b_committed_data); | 264 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
265 | jh->b_committed_data = NULL; | 265 | jh->b_committed_data = NULL; |
266 | jbd_unlock_bh_state(bh); | 266 | jbd_unlock_bh_state(bh); |
267 | } | 267 | } |
@@ -745,14 +745,14 @@ restart_loop: | |||
745 | * Otherwise, we can just throw away the frozen data now. | 745 | * Otherwise, we can just throw away the frozen data now. |
746 | */ | 746 | */ |
747 | if (jh->b_committed_data) { | 747 | if (jh->b_committed_data) { |
748 | kfree(jh->b_committed_data); | 748 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
749 | jh->b_committed_data = NULL; | 749 | jh->b_committed_data = NULL; |
750 | if (jh->b_frozen_data) { | 750 | if (jh->b_frozen_data) { |
751 | jh->b_committed_data = jh->b_frozen_data; | 751 | jh->b_committed_data = jh->b_frozen_data; |
752 | jh->b_frozen_data = NULL; | 752 | jh->b_frozen_data = NULL; |
753 | } | 753 | } |
754 | } else if (jh->b_frozen_data) { | 754 | } else if (jh->b_frozen_data) { |
755 | kfree(jh->b_frozen_data); | 755 | jbd_slab_free(jh->b_frozen_data, bh->b_size); |
756 | jh->b_frozen_data = NULL; | 756 | jh->b_frozen_data = NULL; |
757 | } | 757 | } |
758 | 758 | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 8c9b28dff119..f66724ce443a 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit); | |||
84 | 84 | ||
85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | 85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); |
86 | static void __journal_abort_soft (journal_t *journal, int errno); | 86 | static void __journal_abort_soft (journal_t *journal, int errno); |
87 | static int journal_create_jbd_slab(size_t slab_size); | ||
87 | 88 | ||
88 | /* | 89 | /* |
89 | * Helper function used to manage commit timeouts | 90 | * Helper function used to manage commit timeouts |
@@ -328,10 +329,10 @@ repeat: | |||
328 | char *tmp; | 329 | char *tmp; |
329 | 330 | ||
330 | jbd_unlock_bh_state(bh_in); | 331 | jbd_unlock_bh_state(bh_in); |
331 | tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); | 332 | tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); |
332 | jbd_lock_bh_state(bh_in); | 333 | jbd_lock_bh_state(bh_in); |
333 | if (jh_in->b_frozen_data) { | 334 | if (jh_in->b_frozen_data) { |
334 | kfree(tmp); | 335 | jbd_slab_free(tmp, bh_in->b_size); |
335 | goto repeat; | 336 | goto repeat; |
336 | } | 337 | } |
337 | 338 | ||
@@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal) | |||
1069 | int journal_load(journal_t *journal) | 1070 | int journal_load(journal_t *journal) |
1070 | { | 1071 | { |
1071 | int err; | 1072 | int err; |
1073 | journal_superblock_t *sb; | ||
1072 | 1074 | ||
1073 | err = load_superblock(journal); | 1075 | err = load_superblock(journal); |
1074 | if (err) | 1076 | if (err) |
1075 | return err; | 1077 | return err; |
1076 | 1078 | ||
1079 | sb = journal->j_superblock; | ||
1077 | /* If this is a V2 superblock, then we have to check the | 1080 | /* If this is a V2 superblock, then we have to check the |
1078 | * features flags on it. */ | 1081 | * features flags on it. */ |
1079 | 1082 | ||
1080 | if (journal->j_format_version >= 2) { | 1083 | if (journal->j_format_version >= 2) { |
1081 | journal_superblock_t *sb = journal->j_superblock; | ||
1082 | |||
1083 | if ((sb->s_feature_ro_compat & | 1084 | if ((sb->s_feature_ro_compat & |
1084 | ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || | 1085 | ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || |
1085 | (sb->s_feature_incompat & | 1086 | (sb->s_feature_incompat & |
@@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal) | |||
1090 | } | 1091 | } |
1091 | } | 1092 | } |
1092 | 1093 | ||
1094 | /* | ||
1095 | * Create a slab for this blocksize | ||
1096 | */ | ||
1097 | err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); | ||
1098 | if (err) | ||
1099 | return err; | ||
1100 | |||
1093 | /* Let the recovery code check whether it needs to recover any | 1101 | /* Let the recovery code check whether it needs to recover any |
1094 | * data from the journal. */ | 1102 | * data from the journal. */ |
1095 | if (journal_recover(journal)) | 1103 | if (journal_recover(journal)) |
@@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) | |||
1612 | } | 1620 | } |
1613 | 1621 | ||
1614 | /* | 1622 | /* |
1623 | * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed | ||
1624 | * and allocate frozen and commit buffers from these slabs. | ||
1625 | * | ||
1626 | * Reason for doing this is to avoid, SLAB_DEBUG - since it could | ||
1627 | * cause bh to cross page boundary. | ||
1628 | */ | ||
1629 | |||
1630 | #define JBD_MAX_SLABS 5 | ||
1631 | #define JBD_SLAB_INDEX(size) (size >> 11) | ||
1632 | |||
1633 | static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; | ||
1634 | static const char *jbd_slab_names[JBD_MAX_SLABS] = { | ||
1635 | "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" | ||
1636 | }; | ||
1637 | |||
1638 | static void journal_destroy_jbd_slabs(void) | ||
1639 | { | ||
1640 | int i; | ||
1641 | |||
1642 | for (i = 0; i < JBD_MAX_SLABS; i++) { | ||
1643 | if (jbd_slab[i]) | ||
1644 | kmem_cache_destroy(jbd_slab[i]); | ||
1645 | jbd_slab[i] = NULL; | ||
1646 | } | ||
1647 | } | ||
1648 | |||
1649 | static int journal_create_jbd_slab(size_t slab_size) | ||
1650 | { | ||
1651 | int i = JBD_SLAB_INDEX(slab_size); | ||
1652 | |||
1653 | BUG_ON(i >= JBD_MAX_SLABS); | ||
1654 | |||
1655 | /* | ||
1656 | * Check if we already have a slab created for this size | ||
1657 | */ | ||
1658 | if (jbd_slab[i]) | ||
1659 | return 0; | ||
1660 | |||
1661 | /* | ||
1662 | * Create a slab and force alignment to be same as slabsize - | ||
1663 | * this will make sure that allocations won't cross the page | ||
1664 | * boundary. | ||
1665 | */ | ||
1666 | jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], | ||
1667 | slab_size, slab_size, 0, NULL, NULL); | ||
1668 | if (!jbd_slab[i]) { | ||
1669 | printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); | ||
1670 | return -ENOMEM; | ||
1671 | } | ||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | void * jbd_slab_alloc(size_t size, gfp_t flags) | ||
1676 | { | ||
1677 | int idx; | ||
1678 | |||
1679 | idx = JBD_SLAB_INDEX(size); | ||
1680 | BUG_ON(jbd_slab[idx] == NULL); | ||
1681 | return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); | ||
1682 | } | ||
1683 | |||
1684 | void jbd_slab_free(void *ptr, size_t size) | ||
1685 | { | ||
1686 | int idx; | ||
1687 | |||
1688 | idx = JBD_SLAB_INDEX(size); | ||
1689 | BUG_ON(jbd_slab[idx] == NULL); | ||
1690 | kmem_cache_free(jbd_slab[idx], ptr); | ||
1691 | } | ||
1692 | |||
1693 | /* | ||
1615 | * Journal_head storage management | 1694 | * Journal_head storage management |
1616 | */ | 1695 | */ |
1617 | static kmem_cache_t *journal_head_cache; | 1696 | static kmem_cache_t *journal_head_cache; |
@@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) | |||
1799 | printk(KERN_WARNING "%s: freeing " | 1878 | printk(KERN_WARNING "%s: freeing " |
1800 | "b_frozen_data\n", | 1879 | "b_frozen_data\n", |
1801 | __FUNCTION__); | 1880 | __FUNCTION__); |
1802 | kfree(jh->b_frozen_data); | 1881 | jbd_slab_free(jh->b_frozen_data, bh->b_size); |
1803 | } | 1882 | } |
1804 | if (jh->b_committed_data) { | 1883 | if (jh->b_committed_data) { |
1805 | printk(KERN_WARNING "%s: freeing " | 1884 | printk(KERN_WARNING "%s: freeing " |
1806 | "b_committed_data\n", | 1885 | "b_committed_data\n", |
1807 | __FUNCTION__); | 1886 | __FUNCTION__); |
1808 | kfree(jh->b_committed_data); | 1887 | jbd_slab_free(jh->b_committed_data, bh->b_size); |
1809 | } | 1888 | } |
1810 | bh->b_private = NULL; | 1889 | bh->b_private = NULL; |
1811 | jh->b_bh = NULL; /* debug, really */ | 1890 | jh->b_bh = NULL; /* debug, really */ |
@@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void) | |||
1961 | journal_destroy_revoke_caches(); | 2040 | journal_destroy_revoke_caches(); |
1962 | journal_destroy_journal_head_cache(); | 2041 | journal_destroy_journal_head_cache(); |
1963 | journal_destroy_handle_cache(); | 2042 | journal_destroy_handle_cache(); |
2043 | journal_destroy_jbd_slabs(); | ||
1964 | } | 2044 | } |
1965 | 2045 | ||
1966 | static int __init journal_init(void) | 2046 | static int __init journal_init(void) |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 508b2ea91f43..f5169a96260e 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -666,8 +666,9 @@ repeat: | |||
666 | if (!frozen_buffer) { | 666 | if (!frozen_buffer) { |
667 | JBUFFER_TRACE(jh, "allocate memory for buffer"); | 667 | JBUFFER_TRACE(jh, "allocate memory for buffer"); |
668 | jbd_unlock_bh_state(bh); | 668 | jbd_unlock_bh_state(bh); |
669 | frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, | 669 | frozen_buffer = |
670 | GFP_NOFS); | 670 | jbd_slab_alloc(jh2bh(jh)->b_size, |
671 | GFP_NOFS); | ||
671 | if (!frozen_buffer) { | 672 | if (!frozen_buffer) { |
672 | printk(KERN_EMERG | 673 | printk(KERN_EMERG |
673 | "%s: OOM for frozen_buffer\n", | 674 | "%s: OOM for frozen_buffer\n", |
@@ -726,7 +727,7 @@ done: | |||
726 | 727 | ||
727 | out: | 728 | out: |
728 | if (unlikely(frozen_buffer)) /* It's usually NULL */ | 729 | if (unlikely(frozen_buffer)) /* It's usually NULL */ |
729 | kfree(frozen_buffer); | 730 | jbd_slab_free(frozen_buffer, bh->b_size); |
730 | 731 | ||
731 | JBUFFER_TRACE(jh, "exit"); | 732 | JBUFFER_TRACE(jh, "exit"); |
732 | return error; | 733 | return error; |
@@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) | |||
879 | 880 | ||
880 | repeat: | 881 | repeat: |
881 | if (!jh->b_committed_data) { | 882 | if (!jh->b_committed_data) { |
882 | committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); | 883 | committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
883 | if (!committed_data) { | 884 | if (!committed_data) { |
884 | printk(KERN_EMERG "%s: No memory for committed data\n", | 885 | printk(KERN_EMERG "%s: No memory for committed data\n", |
885 | __FUNCTION__); | 886 | __FUNCTION__); |
@@ -906,7 +907,7 @@ repeat: | |||
906 | out: | 907 | out: |
907 | journal_put_journal_head(jh); | 908 | journal_put_journal_head(jh); |
908 | if (unlikely(committed_data)) | 909 | if (unlikely(committed_data)) |
909 | kfree(committed_data); | 910 | jbd_slab_free(committed_data, bh->b_size); |
910 | return err; | 911 | return err; |
911 | } | 912 | } |
912 | 913 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 43e3f566aad6..a223cf4faa9b 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode) | |||
168 | set_cflag(COMMIT_Dirty, inode); | 168 | set_cflag(COMMIT_Dirty, inode); |
169 | } | 169 | } |
170 | 170 | ||
171 | static int | 171 | int jfs_get_block(struct inode *ip, sector_t lblock, |
172 | jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, | 172 | struct buffer_head *bh_result, int create) |
173 | struct buffer_head *bh_result, int create) | ||
174 | { | 173 | { |
175 | s64 lblock64 = lblock; | 174 | s64 lblock64 = lblock; |
176 | int rc = 0; | 175 | int rc = 0; |
177 | xad_t xad; | 176 | xad_t xad; |
178 | s64 xaddr; | 177 | s64 xaddr; |
179 | int xflag; | 178 | int xflag; |
180 | s32 xlen = max_blocks; | 179 | s32 xlen = bh_result->b_size >> ip->i_blkbits; |
181 | 180 | ||
182 | /* | 181 | /* |
183 | * Take appropriate lock on inode | 182 | * Take appropriate lock on inode |
@@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, | |||
188 | IREAD_LOCK(ip); | 187 | IREAD_LOCK(ip); |
189 | 188 | ||
190 | if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && | 189 | if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && |
191 | (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && | 190 | (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && |
192 | xaddr) { | 191 | xaddr) { |
193 | if (xflag & XAD_NOTRECORDED) { | 192 | if (xflag & XAD_NOTRECORDED) { |
194 | if (!create) | 193 | if (!create) |
@@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, | |||
255 | return rc; | 254 | return rc; |
256 | } | 255 | } |
257 | 256 | ||
258 | static int jfs_get_block(struct inode *ip, sector_t lblock, | ||
259 | struct buffer_head *bh_result, int create) | ||
260 | { | ||
261 | return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, | ||
262 | bh_result, create); | ||
263 | } | ||
264 | |||
265 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) | 257 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) |
266 | { | 258 | { |
267 | return nobh_writepage(page, jfs_get_block, wbc); | 259 | return nobh_writepage(page, jfs_get_block, wbc); |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b5c7da6190dc..1fc48df670c8 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t); | |||
32 | extern void jfs_free_zero_link(struct inode *); | 32 | extern void jfs_free_zero_link(struct inode *); |
33 | extern struct dentry *jfs_get_parent(struct dentry *dentry); | 33 | extern struct dentry *jfs_get_parent(struct dentry *dentry); |
34 | extern void jfs_set_inode_flags(struct inode *); | 34 | extern void jfs_set_inode_flags(struct inode *); |
35 | extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); | ||
35 | 36 | ||
36 | extern const struct address_space_operations jfs_aops; | 37 | extern const struct address_space_operations jfs_aops; |
37 | extern struct inode_operations jfs_dir_inode_operations; | 38 | extern struct inode_operations jfs_dir_inode_operations; |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f6cfebc82db..143bcd1d5eaa 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/moduleparam.h> | 26 | #include <linux/moduleparam.h> |
27 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
28 | #include <linux/posix_acl.h> | 28 | #include <linux/posix_acl.h> |
29 | #include <linux/buffer_head.h> | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
31 | 32 | ||
@@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, | |||
298 | break; | 299 | break; |
299 | } | 300 | } |
300 | 301 | ||
301 | #if defined(CONFIG_QUOTA) | 302 | #ifdef CONFIG_QUOTA |
302 | case Opt_quota: | 303 | case Opt_quota: |
303 | case Opt_usrquota: | 304 | case Opt_usrquota: |
304 | *flag |= JFS_USRQUOTA; | 305 | *flag |= JFS_USRQUOTA; |
@@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
597 | if (sbi->flag & JFS_NOINTEGRITY) | 598 | if (sbi->flag & JFS_NOINTEGRITY) |
598 | seq_puts(seq, ",nointegrity"); | 599 | seq_puts(seq, ",nointegrity"); |
599 | 600 | ||
600 | #if defined(CONFIG_QUOTA) | 601 | #ifdef CONFIG_QUOTA |
601 | if (sbi->flag & JFS_USRQUOTA) | 602 | if (sbi->flag & JFS_USRQUOTA) |
602 | seq_puts(seq, ",usrquota"); | 603 | seq_puts(seq, ",usrquota"); |
603 | 604 | ||
@@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
608 | return 0; | 609 | return 0; |
609 | } | 610 | } |
610 | 611 | ||
612 | #ifdef CONFIG_QUOTA | ||
613 | |||
614 | /* Read data from quotafile - avoid pagecache and such because we cannot afford | ||
615 | * acquiring the locks... As quota files are never truncated and quota code | ||
616 | * itself serializes the operations (and noone else should touch the files) | ||
617 | * we don't have to be afraid of races */ | ||
618 | static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, | ||
619 | size_t len, loff_t off) | ||
620 | { | ||
621 | struct inode *inode = sb_dqopt(sb)->files[type]; | ||
622 | sector_t blk = off >> sb->s_blocksize_bits; | ||
623 | int err = 0; | ||
624 | int offset = off & (sb->s_blocksize - 1); | ||
625 | int tocopy; | ||
626 | size_t toread; | ||
627 | struct buffer_head tmp_bh; | ||
628 | struct buffer_head *bh; | ||
629 | loff_t i_size = i_size_read(inode); | ||
630 | |||
631 | if (off > i_size) | ||
632 | return 0; | ||
633 | if (off+len > i_size) | ||
634 | len = i_size-off; | ||
635 | toread = len; | ||
636 | while (toread > 0) { | ||
637 | tocopy = sb->s_blocksize - offset < toread ? | ||
638 | sb->s_blocksize - offset : toread; | ||
639 | |||
640 | tmp_bh.b_state = 0; | ||
641 | tmp_bh.b_size = 1 << inode->i_blkbits; | ||
642 | err = jfs_get_block(inode, blk, &tmp_bh, 0); | ||
643 | if (err) | ||
644 | return err; | ||
645 | if (!buffer_mapped(&tmp_bh)) /* A hole? */ | ||
646 | memset(data, 0, tocopy); | ||
647 | else { | ||
648 | bh = sb_bread(sb, tmp_bh.b_blocknr); | ||
649 | if (!bh) | ||
650 | return -EIO; | ||
651 | memcpy(data, bh->b_data+offset, tocopy); | ||
652 | brelse(bh); | ||
653 | } | ||
654 | offset = 0; | ||
655 | toread -= tocopy; | ||
656 | data += tocopy; | ||
657 | blk++; | ||
658 | } | ||
659 | return len; | ||
660 | } | ||
661 | |||
662 | /* Write to quotafile */ | ||
663 | static ssize_t jfs_quota_write(struct super_block *sb, int type, | ||
664 | const char *data, size_t len, loff_t off) | ||
665 | { | ||
666 | struct inode *inode = sb_dqopt(sb)->files[type]; | ||
667 | sector_t blk = off >> sb->s_blocksize_bits; | ||
668 | int err = 0; | ||
669 | int offset = off & (sb->s_blocksize - 1); | ||
670 | int tocopy; | ||
671 | size_t towrite = len; | ||
672 | struct buffer_head tmp_bh; | ||
673 | struct buffer_head *bh; | ||
674 | |||
675 | mutex_lock(&inode->i_mutex); | ||
676 | while (towrite > 0) { | ||
677 | tocopy = sb->s_blocksize - offset < towrite ? | ||
678 | sb->s_blocksize - offset : towrite; | ||
679 | |||
680 | tmp_bh.b_state = 0; | ||
681 | tmp_bh.b_size = 1 << inode->i_blkbits; | ||
682 | err = jfs_get_block(inode, blk, &tmp_bh, 1); | ||
683 | if (err) | ||
684 | goto out; | ||
685 | if (offset || tocopy != sb->s_blocksize) | ||
686 | bh = sb_bread(sb, tmp_bh.b_blocknr); | ||
687 | else | ||
688 | bh = sb_getblk(sb, tmp_bh.b_blocknr); | ||
689 | if (!bh) { | ||
690 | err = -EIO; | ||
691 | goto out; | ||
692 | } | ||
693 | lock_buffer(bh); | ||
694 | memcpy(bh->b_data+offset, data, tocopy); | ||
695 | flush_dcache_page(bh->b_page); | ||
696 | set_buffer_uptodate(bh); | ||
697 | mark_buffer_dirty(bh); | ||
698 | unlock_buffer(bh); | ||
699 | brelse(bh); | ||
700 | offset = 0; | ||
701 | towrite -= tocopy; | ||
702 | data += tocopy; | ||
703 | blk++; | ||
704 | } | ||
705 | out: | ||
706 | if (len == towrite) | ||
707 | return err; | ||
708 | if (inode->i_size < off+len-towrite) | ||
709 | i_size_write(inode, off+len-towrite); | ||
710 | inode->i_version++; | ||
711 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
712 | mark_inode_dirty(inode); | ||
713 | mutex_unlock(&inode->i_mutex); | ||
714 | return len - towrite; | ||
715 | } | ||
716 | |||
717 | #endif | ||
718 | |||
611 | static struct super_operations jfs_super_operations = { | 719 | static struct super_operations jfs_super_operations = { |
612 | .alloc_inode = jfs_alloc_inode, | 720 | .alloc_inode = jfs_alloc_inode, |
613 | .destroy_inode = jfs_destroy_inode, | 721 | .destroy_inode = jfs_destroy_inode, |
@@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = { | |||
621 | .unlockfs = jfs_unlockfs, | 729 | .unlockfs = jfs_unlockfs, |
622 | .statfs = jfs_statfs, | 730 | .statfs = jfs_statfs, |
623 | .remount_fs = jfs_remount, | 731 | .remount_fs = jfs_remount, |
624 | .show_options = jfs_show_options | 732 | .show_options = jfs_show_options, |
733 | #ifdef CONFIG_QUOTA | ||
734 | .quota_read = jfs_quota_read, | ||
735 | .quota_write = jfs_quota_write, | ||
736 | #endif | ||
625 | }; | 737 | }; |
626 | 738 | ||
627 | static struct export_operations jfs_export_operations = { | 739 | static struct export_operations jfs_export_operations = { |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index baf5ae513481..c9d419703cf3 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) | |||
638 | if (task->tk_status < 0) { | 638 | if (task->tk_status < 0) { |
639 | /* RPC error: Re-insert for retransmission */ | 639 | /* RPC error: Re-insert for retransmission */ |
640 | timeout = 10 * HZ; | 640 | timeout = 10 * HZ; |
641 | } else if (block->b_done) { | ||
642 | /* Block already removed, kill it for real */ | ||
643 | timeout = 0; | ||
644 | } else { | 641 | } else { |
645 | /* Call was successful, now wait for client callback */ | 642 | /* Call was successful, now wait for client callback */ |
646 | timeout = 60 * HZ; | 643 | timeout = 60 * HZ; |
@@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void) | |||
709 | break; | 706 | break; |
710 | if (time_after(block->b_when,jiffies)) | 707 | if (time_after(block->b_when,jiffies)) |
711 | break; | 708 | break; |
712 | dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", | 709 | dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", |
713 | block, block->b_when, block->b_done); | 710 | block, block->b_when); |
714 | kref_get(&block->b_count); | 711 | kref_get(&block->b_count); |
715 | if (block->b_done) | 712 | nlmsvc_grant_blocked(block); |
716 | nlmsvc_unlink_block(block); | ||
717 | else | ||
718 | nlmsvc_grant_blocked(block); | ||
719 | nlmsvc_release_block(block); | 713 | nlmsvc_release_block(block); |
720 | } | 714 | } |
721 | 715 | ||
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 2a4df9b3779a..01b4db9e5466 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
@@ -237,19 +237,22 @@ static int | |||
237 | nlm_traverse_files(struct nlm_host *host, int action) | 237 | nlm_traverse_files(struct nlm_host *host, int action) |
238 | { | 238 | { |
239 | struct nlm_file *file, **fp; | 239 | struct nlm_file *file, **fp; |
240 | int i; | 240 | int i, ret = 0; |
241 | 241 | ||
242 | mutex_lock(&nlm_file_mutex); | 242 | mutex_lock(&nlm_file_mutex); |
243 | for (i = 0; i < FILE_NRHASH; i++) { | 243 | for (i = 0; i < FILE_NRHASH; i++) { |
244 | fp = nlm_files + i; | 244 | fp = nlm_files + i; |
245 | while ((file = *fp) != NULL) { | 245 | while ((file = *fp) != NULL) { |
246 | file->f_count++; | ||
247 | mutex_unlock(&nlm_file_mutex); | ||
248 | |||
246 | /* Traverse locks, blocks and shares of this file | 249 | /* Traverse locks, blocks and shares of this file |
247 | * and update file->f_locks count */ | 250 | * and update file->f_locks count */ |
248 | if (nlm_inspect_file(host, file, action)) { | 251 | if (nlm_inspect_file(host, file, action)) |
249 | mutex_unlock(&nlm_file_mutex); | 252 | ret = 1; |
250 | return 1; | ||
251 | } | ||
252 | 253 | ||
254 | mutex_lock(&nlm_file_mutex); | ||
255 | file->f_count--; | ||
253 | /* No more references to this file. Let go of it. */ | 256 | /* No more references to this file. Let go of it. */ |
254 | if (!file->f_blocks && !file->f_locks | 257 | if (!file->f_blocks && !file->f_locks |
255 | && !file->f_shares && !file->f_count) { | 258 | && !file->f_shares && !file->f_count) { |
@@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action) | |||
262 | } | 265 | } |
263 | } | 266 | } |
264 | mutex_unlock(&nlm_file_mutex); | 267 | mutex_unlock(&nlm_file_mutex); |
265 | return 0; | 268 | return ret; |
266 | } | 269 | } |
267 | 270 | ||
268 | /* | 271 | /* |
diff --git a/fs/locks.c b/fs/locks.c index b0b41a64e10b..d7c53392cac1 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1421 | if (!leases_enable) | 1421 | if (!leases_enable) |
1422 | goto out; | 1422 | goto out; |
1423 | 1423 | ||
1424 | error = lease_alloc(filp, arg, &fl); | 1424 | error = -ENOMEM; |
1425 | if (error) | 1425 | fl = locks_alloc_lock(); |
1426 | if (fl == NULL) | ||
1426 | goto out; | 1427 | goto out; |
1427 | 1428 | ||
1428 | locks_copy_lock(fl, lease); | 1429 | locks_copy_lock(fl, lease); |
@@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1430 | locks_insert_lock(before, fl); | 1431 | locks_insert_lock(before, fl); |
1431 | 1432 | ||
1432 | *flp = fl; | 1433 | *flp = fl; |
1434 | error = 0; | ||
1433 | out: | 1435 | out: |
1434 | return error; | 1436 | return error; |
1435 | } | 1437 | } |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 9ea91c5eeb7b..330ff9fc7cf0 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
204 | /* | 204 | /* |
205 | * Allocate the buffer map to keep the superblock small. | 205 | * Allocate the buffer map to keep the superblock small. |
206 | */ | 206 | */ |
207 | if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) | ||
208 | goto out_illegal_sb; | ||
207 | i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); | 209 | i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); |
208 | map = kmalloc(i, GFP_KERNEL); | 210 | map = kmalloc(i, GFP_KERNEL); |
209 | if (!map) | 211 | if (!map) |
@@ -263,7 +265,7 @@ out_no_root: | |||
263 | 265 | ||
264 | out_no_bitmap: | 266 | out_no_bitmap: |
265 | printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); | 267 | printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); |
266 | out_freemap: | 268 | out_freemap: |
267 | for (i = 0; i < sbi->s_imap_blocks; i++) | 269 | for (i = 0; i < sbi->s_imap_blocks; i++) |
268 | brelse(sbi->s_imap[i]); | 270 | brelse(sbi->s_imap[i]); |
269 | for (i = 0; i < sbi->s_zmap_blocks; i++) | 271 | for (i = 0; i < sbi->s_zmap_blocks; i++) |
@@ -276,11 +278,16 @@ out_no_map: | |||
276 | printk("MINIX-fs: can't allocate map\n"); | 278 | printk("MINIX-fs: can't allocate map\n"); |
277 | goto out_release; | 279 | goto out_release; |
278 | 280 | ||
281 | out_illegal_sb: | ||
282 | if (!silent) | ||
283 | printk("MINIX-fs: bad superblock\n"); | ||
284 | goto out_release; | ||
285 | |||
279 | out_no_fs: | 286 | out_no_fs: |
280 | if (!silent) | 287 | if (!silent) |
281 | printk("VFS: Can't find a Minix or Minix V2 filesystem " | 288 | printk("VFS: Can't find a Minix or Minix V2 filesystem " |
282 | "on device %s\n", s->s_id); | 289 | "on device %s\n", s->s_id); |
283 | out_release: | 290 | out_release: |
284 | brelse(bh); | 291 | brelse(bh); |
285 | goto out; | 292 | goto out; |
286 | 293 | ||
@@ -290,7 +297,7 @@ out_bad_hblock: | |||
290 | 297 | ||
291 | out_bad_sb: | 298 | out_bad_sb: |
292 | printk("MINIX-fs: unable to read superblock\n"); | 299 | printk("MINIX-fs: unable to read superblock\n"); |
293 | out: | 300 | out: |
294 | s->s_fs_info = NULL; | 301 | s->s_fs_info = NULL; |
295 | kfree(sbi); | 302 | kfree(sbi); |
296 | return -EINVAL; | 303 | return -EINVAL; |
diff --git a/fs/namei.c b/fs/namei.c index e01070d7bf58..432d6bc6fab0 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -159,7 +159,7 @@ char * getname(const char __user * filename) | |||
159 | #ifdef CONFIG_AUDITSYSCALL | 159 | #ifdef CONFIG_AUDITSYSCALL |
160 | void putname(const char *name) | 160 | void putname(const char *name) |
161 | { | 161 | { |
162 | if (unlikely(current->audit_context)) | 162 | if (unlikely(!audit_dummy_context())) |
163 | audit_putname(name); | 163 | audit_putname(name); |
164 | else | 164 | else |
165 | __putname(name); | 165 | __putname(name); |
@@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask, | |||
227 | 227 | ||
228 | int permission(struct inode *inode, int mask, struct nameidata *nd) | 228 | int permission(struct inode *inode, int mask, struct nameidata *nd) |
229 | { | 229 | { |
230 | umode_t mode = inode->i_mode; | ||
230 | int retval, submask; | 231 | int retval, submask; |
231 | 232 | ||
232 | if (mask & MAY_WRITE) { | 233 | if (mask & MAY_WRITE) { |
233 | umode_t mode = inode->i_mode; | ||
234 | 234 | ||
235 | /* | 235 | /* |
236 | * Nobody gets write access to a read-only fs. | 236 | * Nobody gets write access to a read-only fs. |
@@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) | |||
247 | } | 247 | } |
248 | 248 | ||
249 | 249 | ||
250 | /* | ||
251 | * MAY_EXEC on regular files requires special handling: We override | ||
252 | * filesystem execute permissions if the mode bits aren't set. | ||
253 | */ | ||
254 | if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) | ||
255 | return -EACCES; | ||
256 | |||
250 | /* Ordinary permission routines do not understand MAY_APPEND. */ | 257 | /* Ordinary permission routines do not understand MAY_APPEND. */ |
251 | submask = mask & ~MAY_APPEND; | 258 | submask = mask & ~MAY_APPEND; |
252 | if (inode->i_op && inode->i_op->permission) | 259 | if (inode->i_op && inode->i_op->permission) |
@@ -1125,7 +1132,7 @@ static int fastcall do_path_lookup(int dfd, const char *name, | |||
1125 | retval = link_path_walk(name, nd); | 1132 | retval = link_path_walk(name, nd); |
1126 | out: | 1133 | out: |
1127 | if (likely(retval == 0)) { | 1134 | if (likely(retval == 0)) { |
1128 | if (unlikely(current->audit_context && nd && nd->dentry && | 1135 | if (unlikely(!audit_dummy_context() && nd && nd->dentry && |
1129 | nd->dentry->d_inode)) | 1136 | nd->dentry->d_inode)) |
1130 | audit_inode(name, nd->dentry->d_inode); | 1137 | audit_inode(name, nd->dentry->d_inode); |
1131 | } | 1138 | } |
@@ -1357,7 +1364,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) | |||
1357 | return -ENOENT; | 1364 | return -ENOENT; |
1358 | 1365 | ||
1359 | BUG_ON(victim->d_parent->d_inode != dir); | 1366 | BUG_ON(victim->d_parent->d_inode != dir); |
1360 | audit_inode_child(victim->d_name.name, victim->d_inode, dir->i_ino); | 1367 | audit_inode_child(victim->d_name.name, victim->d_inode, dir); |
1361 | 1368 | ||
1362 | error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); | 1369 | error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); |
1363 | if (error) | 1370 | if (error) |
@@ -1659,6 +1666,7 @@ do_last: | |||
1659 | * It already exists. | 1666 | * It already exists. |
1660 | */ | 1667 | */ |
1661 | mutex_unlock(&dir->d_inode->i_mutex); | 1668 | mutex_unlock(&dir->d_inode->i_mutex); |
1669 | audit_inode_update(path.dentry->d_inode); | ||
1662 | 1670 | ||
1663 | error = -EEXIST; | 1671 | error = -EEXIST; |
1664 | if (flag & O_EXCL) | 1672 | if (flag & O_EXCL) |
@@ -1669,6 +1677,7 @@ do_last: | |||
1669 | if (flag & O_NOFOLLOW) | 1677 | if (flag & O_NOFOLLOW) |
1670 | goto exit_dput; | 1678 | goto exit_dput; |
1671 | } | 1679 | } |
1680 | |||
1672 | error = -ENOENT; | 1681 | error = -ENOENT; |
1673 | if (!path.dentry->d_inode) | 1682 | if (!path.dentry->d_inode) |
1674 | goto exit_dput; | 1683 | goto exit_dput; |
@@ -1765,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) | |||
1765 | if (nd->last_type != LAST_NORM) | 1774 | if (nd->last_type != LAST_NORM) |
1766 | goto fail; | 1775 | goto fail; |
1767 | nd->flags &= ~LOOKUP_PARENT; | 1776 | nd->flags &= ~LOOKUP_PARENT; |
1777 | nd->flags |= LOOKUP_CREATE; | ||
1778 | nd->intent.open.flags = O_EXCL; | ||
1768 | 1779 | ||
1769 | /* | 1780 | /* |
1770 | * Do the final lookup. | 1781 | * Do the final lookup. |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc2b874ad5a4..48e892880d5b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) | |||
312 | 312 | ||
313 | static int nfs_release_page(struct page *page, gfp_t gfp) | 313 | static int nfs_release_page(struct page *page, gfp_t gfp) |
314 | { | 314 | { |
315 | return !nfs_wb_page(page->mapping->host, page); | 315 | if (gfp & __GFP_FS) |
316 | return !nfs_wb_page(page->mapping->host, page); | ||
317 | else | ||
318 | /* | ||
319 | * Avoid deadlock on nfs_wait_on_request(). | ||
320 | */ | ||
321 | return 0; | ||
316 | } | 322 | } |
317 | 323 | ||
318 | const struct address_space_operations nfs_file_aops = { | 324 | const struct address_space_operations nfs_file_aops = { |
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b81e7ed3c902..07a5dd57646e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
@@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp) | |||
130 | 130 | ||
131 | if (!idmap) | 131 | if (!idmap) |
132 | return; | 132 | return; |
133 | dput(idmap->idmap_dentry); | 133 | rpc_unlink(idmap->idmap_dentry); |
134 | idmap->idmap_dentry = NULL; | ||
135 | rpc_unlink(idmap->idmap_path); | ||
136 | clp->cl_idmap = NULL; | 134 | clp->cl_idmap = NULL; |
137 | kfree(idmap); | 135 | kfree(idmap); |
138 | } | 136 | } |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 19b98ca468eb..86b3169c8cac 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
51 | namelen = dentry->d_name.len; | 51 | namelen = dentry->d_name.len; |
52 | buflen -= namelen + 1; | 52 | buflen -= namelen + 1; |
53 | if (buflen < 0) | 53 | if (buflen < 0) |
54 | goto Elong; | 54 | goto Elong_unlock; |
55 | end -= namelen; | 55 | end -= namelen; |
56 | memcpy(end, dentry->d_name.name, namelen); | 56 | memcpy(end, dentry->d_name.name, namelen); |
57 | *--end = '/'; | 57 | *--end = '/'; |
@@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
68 | end -= namelen; | 68 | end -= namelen; |
69 | memcpy(end, base, namelen); | 69 | memcpy(end, base, namelen); |
70 | return end; | 70 | return end; |
71 | Elong_unlock: | ||
72 | spin_unlock(&dcache_lock); | ||
71 | Elong: | 73 | Elong: |
72 | return ERR_PTR(-ENAMETOOLONG); | 74 | return ERR_PTR(-ENAMETOOLONG); |
73 | } | 75 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e6ee97f19d81..153898e1331f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2668,7 +2668,7 @@ out: | |||
2668 | nfs4_set_cached_acl(inode, acl); | 2668 | nfs4_set_cached_acl(inode, acl); |
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | 2671 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) |
2672 | { | 2672 | { |
2673 | struct page *pages[NFS4ACL_MAXPAGES]; | 2673 | struct page *pages[NFS4ACL_MAXPAGES]; |
2674 | struct nfs_getaclargs args = { | 2674 | struct nfs_getaclargs args = { |
@@ -2721,6 +2721,19 @@ out_free: | |||
2721 | return ret; | 2721 | return ret; |
2722 | } | 2722 | } |
2723 | 2723 | ||
2724 | static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) | ||
2725 | { | ||
2726 | struct nfs4_exception exception = { }; | ||
2727 | ssize_t ret; | ||
2728 | do { | ||
2729 | ret = __nfs4_get_acl_uncached(inode, buf, buflen); | ||
2730 | if (ret >= 0) | ||
2731 | break; | ||
2732 | ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); | ||
2733 | } while (exception.retry); | ||
2734 | return ret; | ||
2735 | } | ||
2736 | |||
2724 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | 2737 | static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) |
2725 | { | 2738 | { |
2726 | struct nfs_server *server = NFS_SERVER(inode); | 2739 | struct nfs_server *server = NFS_SERVER(inode); |
@@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) | |||
2737 | return nfs4_get_acl_uncached(inode, buf, buflen); | 2750 | return nfs4_get_acl_uncached(inode, buf, buflen); |
2738 | } | 2751 | } |
2739 | 2752 | ||
2740 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | 2753 | static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) |
2741 | { | 2754 | { |
2742 | struct nfs_server *server = NFS_SERVER(inode); | 2755 | struct nfs_server *server = NFS_SERVER(inode); |
2743 | struct page *pages[NFS4ACL_MAXPAGES]; | 2756 | struct page *pages[NFS4ACL_MAXPAGES]; |
@@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen | |||
2763 | return ret; | 2776 | return ret; |
2764 | } | 2777 | } |
2765 | 2778 | ||
2779 | static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) | ||
2780 | { | ||
2781 | struct nfs4_exception exception = { }; | ||
2782 | int err; | ||
2783 | do { | ||
2784 | err = nfs4_handle_exception(NFS_SERVER(inode), | ||
2785 | __nfs4_proc_set_acl(inode, buf, buflen), | ||
2786 | &exception); | ||
2787 | } while (exception.retry); | ||
2788 | return err; | ||
2789 | } | ||
2790 | |||
2766 | static int | 2791 | static int |
2767 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) | 2792 | nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) |
2768 | { | 2793 | { |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1750d996f49f..730ec8fb31c6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3355 | struct kvec *iov = rcvbuf->head; | 3355 | struct kvec *iov = rcvbuf->head; |
3356 | unsigned int nr, pglen = rcvbuf->page_len; | 3356 | unsigned int nr, pglen = rcvbuf->page_len; |
3357 | uint32_t *end, *entry, *p, *kaddr; | 3357 | uint32_t *end, *entry, *p, *kaddr; |
3358 | uint32_t len, attrlen; | 3358 | uint32_t len, attrlen, xlen; |
3359 | int hdrlen, recvd, status; | 3359 | int hdrlen, recvd, status; |
3360 | 3360 | ||
3361 | status = decode_op_hdr(xdr, OP_READDIR); | 3361 | status = decode_op_hdr(xdr, OP_READDIR); |
@@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3377 | 3377 | ||
3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); | 3378 | BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); |
3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); | 3379 | kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); |
3380 | end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); | 3380 | end = p + ((pglen + readdir->pgbase) >> 2); |
3381 | entry = p; | 3381 | entry = p; |
3382 | for (nr = 0; *p++; nr++) { | 3382 | for (nr = 0; *p++; nr++) { |
3383 | if (p + 3 > end) | 3383 | if (end - p < 3) |
3384 | goto short_pkt; | 3384 | goto short_pkt; |
3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); | 3385 | dprintk("cookie = %Lu, ", *((unsigned long long *)p)); |
3386 | p += 2; /* cookie */ | 3386 | p += 2; /* cookie */ |
@@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n | |||
3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); | 3389 | printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); |
3390 | goto err_unmap; | 3390 | goto err_unmap; |
3391 | } | 3391 | } |
3392 | dprintk("filename = %*s\n", len, (char *)p); | 3392 | xlen = XDR_QUADLEN(len); |
3393 | p += XDR_QUADLEN(len); | 3393 | if (end - p < xlen + 1) |
3394 | if (p + 1 > end) | ||
3395 | goto short_pkt; | 3394 | goto short_pkt; |
3395 | dprintk("filename = %*s\n", len, (char *)p); | ||
3396 | p += xlen; | ||
3396 | len = ntohl(*p++); /* bitmap length */ | 3397 | len = ntohl(*p++); /* bitmap length */ |
3397 | p += len; | 3398 | if (end - p < len + 1) |
3398 | if (p + 1 > end) | ||
3399 | goto short_pkt; | 3399 | goto short_pkt; |
3400 | p += len; | ||
3400 | attrlen = XDR_QUADLEN(ntohl(*p++)); | 3401 | attrlen = XDR_QUADLEN(ntohl(*p++)); |
3401 | p += attrlen; /* attributes */ | 3402 | if (end - p < attrlen + 2) |
3402 | if (p + 2 > end) | ||
3403 | goto short_pkt; | 3403 | goto short_pkt; |
3404 | p += attrlen; /* attributes */ | ||
3404 | entry = p; | 3405 | entry = p; |
3405 | } | 3406 | } |
3406 | if (!nr && (entry[0] != 0 || entry[1] == 0)) | 3407 | if (!nr && (entry[0] != 0 || entry[1] == 0)) |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 52bf634260a1..da9cf11c326f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | |||
63 | return p; | 63 | return p; |
64 | } | 64 | } |
65 | 65 | ||
66 | void nfs_readdata_free(struct nfs_read_data *p) | 66 | static void nfs_readdata_free(struct nfs_read_data *p) |
67 | { | 67 | { |
68 | if (p && (p->pagevec != &p->page_array[0])) | 68 | if (p && (p->pagevec != &p->page_array[0])) |
69 | kfree(p->pagevec); | 69 | kfree(p->pagevec); |
@@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
116 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 116 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
117 | base &= ~PAGE_CACHE_MASK; | 117 | base &= ~PAGE_CACHE_MASK; |
118 | pglen = PAGE_CACHE_SIZE - base; | 118 | pglen = PAGE_CACHE_SIZE - base; |
119 | if (pglen < remainder) | 119 | for (;;) { |
120 | if (remainder <= pglen) { | ||
121 | memclear_highpage_flush(*pages, base, remainder); | ||
122 | break; | ||
123 | } | ||
120 | memclear_highpage_flush(*pages, base, pglen); | 124 | memclear_highpage_flush(*pages, base, pglen); |
121 | else | 125 | pages++; |
122 | memclear_highpage_flush(*pages, base, remainder); | 126 | remainder -= pglen; |
127 | pglen = PAGE_CACHE_SIZE; | ||
128 | base = 0; | ||
129 | } | ||
123 | } | 130 | } |
124 | 131 | ||
125 | /* | 132 | /* |
@@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
476 | unsigned int base = data->args.pgbase; | 483 | unsigned int base = data->args.pgbase; |
477 | struct page **pages; | 484 | struct page **pages; |
478 | 485 | ||
486 | if (data->res.eof) | ||
487 | count = data->args.count; | ||
479 | if (unlikely(count == 0)) | 488 | if (unlikely(count == 0)) |
480 | return; | 489 | return; |
481 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | 490 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; |
@@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
483 | count += base; | 492 | count += base; |
484 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 493 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
485 | SetPageUptodate(*pages); | 494 | SetPageUptodate(*pages); |
486 | /* | 495 | if (count != 0) |
487 | * Was this an eof or a short read? If the latter, don't mark the page | ||
488 | * as uptodate yet. | ||
489 | */ | ||
490 | if (count > 0 && (data->res.eof || data->args.count == data->res.count)) | ||
491 | SetPageUptodate(*pages); | 496 | SetPageUptodate(*pages); |
492 | } | 497 | } |
493 | 498 | ||
@@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) | |||
502 | count += base; | 507 | count += base; |
503 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 508 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
504 | SetPageError(*pages); | 509 | SetPageError(*pages); |
510 | if (count != 0) | ||
511 | SetPageError(*pages); | ||
505 | } | 512 | } |
506 | 513 | ||
507 | /* | 514 | /* |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 86bac6a5008e..50774991f8d5 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
137 | return p; | 137 | return p; |
138 | } | 138 | } |
139 | 139 | ||
140 | void nfs_writedata_free(struct nfs_write_data *p) | 140 | static void nfs_writedata_free(struct nfs_write_data *p) |
141 | { | 141 | { |
142 | if (p && (p->pagevec != &p->page_array[0])) | 142 | if (p && (p->pagevec != &p->page_array[0])) |
143 | kfree(p->pagevec); | 143 | kfree(p->pagevec); |
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index ecc439d2565f..501d83884530 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c | |||
@@ -187,6 +187,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) | |||
187 | goto out; | 187 | goto out; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* Set user creds for this exportpoint */ | ||
191 | error = nfserrno(nfsd_setuser(rqstp, exp)); | ||
192 | if (error) | ||
193 | goto out; | ||
194 | |||
190 | /* | 195 | /* |
191 | * Look up the dentry using the NFS file handle. | 196 | * Look up the dentry using the NFS file handle. |
192 | */ | 197 | */ |
@@ -241,16 +246,17 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) | |||
241 | dprintk("nfsd: fh_verify - just checking\n"); | 246 | dprintk("nfsd: fh_verify - just checking\n"); |
242 | dentry = fhp->fh_dentry; | 247 | dentry = fhp->fh_dentry; |
243 | exp = fhp->fh_export; | 248 | exp = fhp->fh_export; |
249 | /* Set user creds for this exportpoint; necessary even | ||
250 | * in the "just checking" case because this may be a | ||
251 | * filehandle that was created by fh_compose, and that | ||
252 | * is about to be used in another nfsv4 compound | ||
253 | * operation */ | ||
254 | error = nfserrno(nfsd_setuser(rqstp, exp)); | ||
255 | if (error) | ||
256 | goto out; | ||
244 | } | 257 | } |
245 | cache_get(&exp->h); | 258 | cache_get(&exp->h); |
246 | 259 | ||
247 | /* Set user creds for this exportpoint; necessary even in the "just | ||
248 | * checking" case because this may be a filehandle that was created by | ||
249 | * fh_compose, and that is about to be used in another nfsv4 compound | ||
250 | * operation */ | ||
251 | error = nfserrno(nfsd_setuser(rqstp, exp)); | ||
252 | if (error) | ||
253 | goto out; | ||
254 | 260 | ||
255 | error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); | 261 | error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); |
256 | if (error) | 262 | if (error) |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 1b8346dd0572..9503240ef0e5 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2375,7 +2375,6 @@ leave: | |||
2375 | mlog(0, "returning %d\n", ret); | 2375 | mlog(0, "returning %d\n", ret); |
2376 | return ret; | 2376 | return ret; |
2377 | } | 2377 | } |
2378 | EXPORT_SYMBOL_GPL(dlm_migrate_lockres); | ||
2379 | 2378 | ||
2380 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 2379 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
2381 | { | 2380 | { |
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index b0c3134f4f70..37be4b2e0d4a 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, | |||
155 | else | 155 | else |
156 | status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); | 156 | status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); |
157 | 157 | ||
158 | if (status != DLM_NORMAL) | 158 | if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) |
159 | goto leave; | 159 | goto leave; |
160 | 160 | ||
161 | /* By now this has been masked out of cancel requests. */ | 161 | /* By now this has been masked out of cancel requests. */ |
@@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, | |||
183 | spin_lock(&lock->spinlock); | 183 | spin_lock(&lock->spinlock); |
184 | /* if the master told us the lock was already granted, | 184 | /* if the master told us the lock was already granted, |
185 | * let the ast handle all of these actions */ | 185 | * let the ast handle all of these actions */ |
186 | if (status == DLM_NORMAL && | 186 | if (status == DLM_CANCELGRANT) { |
187 | lksb->status == DLM_CANCELGRANT) { | ||
188 | actions &= ~(DLM_UNLOCK_REMOVE_LOCK| | 187 | actions &= ~(DLM_UNLOCK_REMOVE_LOCK| |
189 | DLM_UNLOCK_REGRANT_LOCK| | 188 | DLM_UNLOCK_REGRANT_LOCK| |
190 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | 189 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); |
@@ -349,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | |||
349 | vec, veclen, owner, &status); | 348 | vec, veclen, owner, &status); |
350 | if (tmpret >= 0) { | 349 | if (tmpret >= 0) { |
351 | // successfully sent and received | 350 | // successfully sent and received |
352 | if (status == DLM_CANCELGRANT) | 351 | if (status == DLM_FORWARD) |
353 | ret = DLM_NORMAL; | ||
354 | else if (status == DLM_FORWARD) { | ||
355 | mlog(0, "master was in-progress. retry\n"); | 352 | mlog(0, "master was in-progress. retry\n"); |
356 | ret = DLM_FORWARD; | 353 | ret = status; |
357 | } else | ||
358 | ret = status; | ||
359 | lksb->status = status; | ||
360 | } else { | 354 | } else { |
361 | mlog_errno(tmpret); | 355 | mlog_errno(tmpret); |
362 | if (dlm_is_host_down(tmpret)) { | 356 | if (dlm_is_host_down(tmpret)) { |
@@ -372,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | |||
372 | /* something bad. this will BUG in ocfs2 */ | 366 | /* something bad. this will BUG in ocfs2 */ |
373 | ret = dlm_err_to_dlm_status(tmpret); | 367 | ret = dlm_err_to_dlm_status(tmpret); |
374 | } | 368 | } |
375 | lksb->status = ret; | ||
376 | } | 369 | } |
377 | 370 | ||
378 | return ret; | 371 | return ret; |
@@ -483,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) | |||
483 | 476 | ||
484 | /* lock was found on queue */ | 477 | /* lock was found on queue */ |
485 | lksb = lock->lksb; | 478 | lksb = lock->lksb; |
479 | if (flags & (LKM_VALBLK|LKM_PUT_LVB) && | ||
480 | lock->ml.type != LKM_EXMODE) | ||
481 | flags &= ~(LKM_VALBLK|LKM_PUT_LVB); | ||
482 | |||
486 | /* unlockast only called on originating node */ | 483 | /* unlockast only called on originating node */ |
487 | if (flags & LKM_PUT_LVB) { | 484 | if (flags & LKM_PUT_LVB) { |
488 | lksb->flags |= DLM_LKSB_PUT_LVB; | 485 | lksb->flags |= DLM_LKSB_PUT_LVB; |
@@ -507,11 +504,8 @@ not_found: | |||
507 | "cookie=%u:%llu\n", | 504 | "cookie=%u:%llu\n", |
508 | dlm_get_lock_cookie_node(unlock->cookie), | 505 | dlm_get_lock_cookie_node(unlock->cookie), |
509 | dlm_get_lock_cookie_seq(unlock->cookie)); | 506 | dlm_get_lock_cookie_seq(unlock->cookie)); |
510 | else { | 507 | else |
511 | /* send the lksb->status back to the other node */ | ||
512 | status = lksb->status; | ||
513 | dlm_lock_put(lock); | 508 | dlm_lock_put(lock); |
514 | } | ||
515 | 509 | ||
516 | leave: | 510 | leave: |
517 | if (res) | 511 | if (res) |
@@ -533,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, | |||
533 | 527 | ||
534 | if (dlm_lock_on_list(&res->blocked, lock)) { | 528 | if (dlm_lock_on_list(&res->blocked, lock)) { |
535 | /* cancel this outright */ | 529 | /* cancel this outright */ |
536 | lksb->status = DLM_NORMAL; | ||
537 | status = DLM_NORMAL; | 530 | status = DLM_NORMAL; |
538 | *actions = (DLM_UNLOCK_CALL_AST | | 531 | *actions = (DLM_UNLOCK_CALL_AST | |
539 | DLM_UNLOCK_REMOVE_LOCK); | 532 | DLM_UNLOCK_REMOVE_LOCK); |
540 | } else if (dlm_lock_on_list(&res->converting, lock)) { | 533 | } else if (dlm_lock_on_list(&res->converting, lock)) { |
541 | /* cancel the request, put back on granted */ | 534 | /* cancel the request, put back on granted */ |
542 | lksb->status = DLM_NORMAL; | ||
543 | status = DLM_NORMAL; | 535 | status = DLM_NORMAL; |
544 | *actions = (DLM_UNLOCK_CALL_AST | | 536 | *actions = (DLM_UNLOCK_CALL_AST | |
545 | DLM_UNLOCK_REMOVE_LOCK | | 537 | DLM_UNLOCK_REMOVE_LOCK | |
546 | DLM_UNLOCK_REGRANT_LOCK | | 538 | DLM_UNLOCK_REGRANT_LOCK | |
547 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | 539 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); |
548 | } else if (dlm_lock_on_list(&res->granted, lock)) { | 540 | } else if (dlm_lock_on_list(&res->granted, lock)) { |
549 | /* too late, already granted. DLM_CANCELGRANT */ | 541 | /* too late, already granted. */ |
550 | lksb->status = DLM_CANCELGRANT; | 542 | status = DLM_CANCELGRANT; |
551 | status = DLM_NORMAL; | ||
552 | *actions = DLM_UNLOCK_CALL_AST; | 543 | *actions = DLM_UNLOCK_CALL_AST; |
553 | } else { | 544 | } else { |
554 | mlog(ML_ERROR, "lock to cancel is not on any list!\n"); | 545 | mlog(ML_ERROR, "lock to cancel is not on any list!\n"); |
555 | lksb->status = DLM_IVLOCKID; | ||
556 | status = DLM_IVLOCKID; | 546 | status = DLM_IVLOCKID; |
557 | *actions = 0; | 547 | *actions = 0; |
558 | } | 548 | } |
@@ -569,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, | |||
569 | 559 | ||
570 | /* unlock request */ | 560 | /* unlock request */ |
571 | if (!dlm_lock_on_list(&res->granted, lock)) { | 561 | if (!dlm_lock_on_list(&res->granted, lock)) { |
572 | lksb->status = DLM_DENIED; | ||
573 | status = DLM_DENIED; | 562 | status = DLM_DENIED; |
574 | dlm_error(status); | 563 | dlm_error(status); |
575 | *actions = 0; | 564 | *actions = 0; |
576 | } else { | 565 | } else { |
577 | /* unlock granted lock */ | 566 | /* unlock granted lock */ |
578 | lksb->status = DLM_NORMAL; | ||
579 | status = DLM_NORMAL; | 567 | status = DLM_NORMAL; |
580 | *actions = (DLM_UNLOCK_FREE_LOCK | | 568 | *actions = (DLM_UNLOCK_FREE_LOCK | |
581 | DLM_UNLOCK_CALL_AST | | 569 | DLM_UNLOCK_CALL_AST | |
@@ -632,6 +620,8 @@ retry: | |||
632 | 620 | ||
633 | spin_lock(&res->spinlock); | 621 | spin_lock(&res->spinlock); |
634 | is_master = (res->owner == dlm->node_num); | 622 | is_master = (res->owner == dlm->node_num); |
623 | if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) | ||
624 | flags &= ~LKM_VALBLK; | ||
635 | spin_unlock(&res->spinlock); | 625 | spin_unlock(&res->spinlock); |
636 | 626 | ||
637 | if (is_master) { | 627 | if (is_master) { |
@@ -665,7 +655,7 @@ retry: | |||
665 | } | 655 | } |
666 | 656 | ||
667 | if (call_ast) { | 657 | if (call_ast) { |
668 | mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); | 658 | mlog(0, "calling unlockast(%p, %d)\n", data, status); |
669 | if (is_master) { | 659 | if (is_master) { |
670 | /* it is possible that there is one last bast | 660 | /* it is possible that there is one last bast |
671 | * pending. make sure it is flushed, then | 661 | * pending. make sure it is flushed, then |
@@ -677,9 +667,12 @@ retry: | |||
677 | wait_event(dlm->ast_wq, | 667 | wait_event(dlm->ast_wq, |
678 | dlm_lock_basts_flushed(dlm, lock)); | 668 | dlm_lock_basts_flushed(dlm, lock)); |
679 | } | 669 | } |
680 | (*unlockast)(data, lksb->status); | 670 | (*unlockast)(data, status); |
681 | } | 671 | } |
682 | 672 | ||
673 | if (status == DLM_CANCELGRANT) | ||
674 | status = DLM_NORMAL; | ||
675 | |||
683 | if (status == DLM_NORMAL) { | 676 | if (status == DLM_NORMAL) { |
684 | mlog(0, "kicking the thread\n"); | 677 | mlog(0, "kicking the thread\n"); |
685 | dlm_kick_thread(dlm, res); | 678 | dlm_kick_thread(dlm, res); |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 0d1973ea32b0..1f17a4d08287 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, | |||
840 | 840 | ||
841 | mlog(0, "Allocating %u clusters for a new window.\n", | 841 | mlog(0, "Allocating %u clusters for a new window.\n", |
842 | ocfs2_local_alloc_window_bits(osb)); | 842 | ocfs2_local_alloc_window_bits(osb)); |
843 | |||
844 | /* Instruct the allocation code to try the most recently used | ||
845 | * cluster group. We'll re-record the group used this pass | ||
846 | * below. */ | ||
847 | ac->ac_last_group = osb->la_last_gd; | ||
848 | |||
843 | /* we used the generic suballoc reserve function, but we set | 849 | /* we used the generic suballoc reserve function, but we set |
844 | * everything up nicely, so there's no reason why we can't use | 850 | * everything up nicely, so there's no reason why we can't use |
845 | * the more specific cluster api to claim bits. */ | 851 | * the more specific cluster api to claim bits. */ |
@@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, | |||
852 | goto bail; | 858 | goto bail; |
853 | } | 859 | } |
854 | 860 | ||
861 | osb->la_last_gd = ac->ac_last_group; | ||
862 | |||
855 | la->la_bm_off = cpu_to_le32(cluster_off); | 863 | la->la_bm_off = cpu_to_le32(cluster_off); |
856 | alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); | 864 | alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); |
857 | /* just in case... In the future when we find space ourselves, | 865 | /* just in case... In the future when we find space ourselves, |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index cd4a6f253d13..0462a7f4e21b 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -197,7 +197,6 @@ struct ocfs2_super | |||
197 | struct ocfs2_node_map recovery_map; | 197 | struct ocfs2_node_map recovery_map; |
198 | struct ocfs2_node_map umount_map; | 198 | struct ocfs2_node_map umount_map; |
199 | 199 | ||
200 | u32 num_clusters; | ||
201 | u64 root_blkno; | 200 | u64 root_blkno; |
202 | u64 system_dir_blkno; | 201 | u64 system_dir_blkno; |
203 | u64 bitmap_blkno; | 202 | u64 bitmap_blkno; |
@@ -237,6 +236,7 @@ struct ocfs2_super | |||
237 | 236 | ||
238 | enum ocfs2_local_alloc_state local_alloc_state; | 237 | enum ocfs2_local_alloc_state local_alloc_state; |
239 | struct buffer_head *local_alloc_bh; | 238 | struct buffer_head *local_alloc_bh; |
239 | u64 la_last_gd; | ||
240 | 240 | ||
241 | /* Next two fields are for local node slot recovery during | 241 | /* Next two fields are for local node slot recovery during |
242 | * mount. */ | 242 | * mount. */ |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 195523090c87..9d91e66f51a9 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode, | |||
70 | struct buffer_head *group_bh, | 70 | struct buffer_head *group_bh, |
71 | u32 bits_wanted, u32 min_bits, | 71 | u32 bits_wanted, u32 min_bits, |
72 | u16 *bit_off, u16 *bits_found); | 72 | u16 *bit_off, u16 *bits_found); |
73 | static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | ||
74 | u32 bits_wanted, | ||
75 | u32 min_bits, | ||
76 | u16 *bit_off, | ||
77 | unsigned int *num_bits, | ||
78 | u64 *bg_blkno); | ||
79 | static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | 73 | static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, |
80 | struct ocfs2_alloc_context *ac, | 74 | struct ocfs2_alloc_context *ac, |
81 | u32 bits_wanted, | 75 | u32 bits_wanted, |
@@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
85 | u64 *bg_blkno); | 79 | u64 *bg_blkno); |
86 | static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, | 80 | static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, |
87 | int nr); | 81 | int nr); |
88 | static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, | ||
89 | struct buffer_head *bg_bh, | ||
90 | unsigned int bits_wanted, | ||
91 | u16 *bit_off, | ||
92 | u16 *bits_found); | ||
93 | static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, | 82 | static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, |
94 | struct inode *alloc_inode, | 83 | struct inode *alloc_inode, |
95 | struct ocfs2_group_desc *bg, | 84 | struct ocfs2_group_desc *bg, |
@@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) | |||
143 | return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); | 132 | return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); |
144 | } | 133 | } |
145 | 134 | ||
135 | /* somewhat more expensive than our other checks, so use sparingly. */ | ||
136 | static int ocfs2_check_group_descriptor(struct super_block *sb, | ||
137 | struct ocfs2_dinode *di, | ||
138 | struct ocfs2_group_desc *gd) | ||
139 | { | ||
140 | unsigned int max_bits; | ||
141 | |||
142 | if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { | ||
143 | OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); | ||
144 | return -EIO; | ||
145 | } | ||
146 | |||
147 | if (di->i_blkno != gd->bg_parent_dinode) { | ||
148 | ocfs2_error(sb, "Group descriptor # %llu has bad parent " | ||
149 | "pointer (%llu, expected %llu)", | ||
150 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
151 | (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), | ||
152 | (unsigned long long)le64_to_cpu(di->i_blkno)); | ||
153 | return -EIO; | ||
154 | } | ||
155 | |||
156 | max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); | ||
157 | if (le16_to_cpu(gd->bg_bits) > max_bits) { | ||
158 | ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", | ||
159 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
160 | le16_to_cpu(gd->bg_bits)); | ||
161 | return -EIO; | ||
162 | } | ||
163 | |||
164 | if (le16_to_cpu(gd->bg_chain) >= | ||
165 | le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { | ||
166 | ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", | ||
167 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
168 | le16_to_cpu(gd->bg_chain)); | ||
169 | return -EIO; | ||
170 | } | ||
171 | |||
172 | if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { | ||
173 | ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " | ||
174 | "claims that %u are free", | ||
175 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
176 | le16_to_cpu(gd->bg_bits), | ||
177 | le16_to_cpu(gd->bg_free_bits_count)); | ||
178 | return -EIO; | ||
179 | } | ||
180 | |||
181 | if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { | ||
182 | ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " | ||
183 | "max bitmap bits of %u", | ||
184 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
185 | le16_to_cpu(gd->bg_bits), | ||
186 | 8 * le16_to_cpu(gd->bg_size)); | ||
187 | return -EIO; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
146 | static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, | 193 | static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, |
147 | struct inode *alloc_inode, | 194 | struct inode *alloc_inode, |
148 | struct buffer_head *bg_bh, | 195 | struct buffer_head *bg_bh, |
@@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, | |||
663 | static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, | 710 | static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, |
664 | struct buffer_head *bg_bh, | 711 | struct buffer_head *bg_bh, |
665 | unsigned int bits_wanted, | 712 | unsigned int bits_wanted, |
713 | unsigned int total_bits, | ||
666 | u16 *bit_off, | 714 | u16 *bit_off, |
667 | u16 *bits_found) | 715 | u16 *bits_found) |
668 | { | 716 | { |
@@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, | |||
679 | found = start = best_offset = best_size = 0; | 727 | found = start = best_offset = best_size = 0; |
680 | bitmap = bg->bg_bitmap; | 728 | bitmap = bg->bg_bitmap; |
681 | 729 | ||
682 | while((offset = ocfs2_find_next_zero_bit(bitmap, | 730 | while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { |
683 | le16_to_cpu(bg->bg_bits), | 731 | if (offset == total_bits) |
684 | start)) != -1) { | ||
685 | if (offset == le16_to_cpu(bg->bg_bits)) | ||
686 | break; | 732 | break; |
687 | 733 | ||
688 | if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { | 734 | if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { |
@@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode, | |||
911 | { | 957 | { |
912 | int search = -ENOSPC; | 958 | int search = -ENOSPC; |
913 | int ret; | 959 | int ret; |
914 | struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; | 960 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data; |
915 | u16 tmp_off, tmp_found; | 961 | u16 tmp_off, tmp_found; |
962 | unsigned int max_bits, gd_cluster_off; | ||
916 | 963 | ||
917 | BUG_ON(!ocfs2_is_cluster_bitmap(inode)); | 964 | BUG_ON(!ocfs2_is_cluster_bitmap(inode)); |
918 | 965 | ||
919 | if (bg->bg_free_bits_count) { | 966 | if (gd->bg_free_bits_count) { |
967 | max_bits = le16_to_cpu(gd->bg_bits); | ||
968 | |||
969 | /* Tail groups in cluster bitmaps which aren't cpg | ||
970 | * aligned are prone to partial extention by a failed | ||
971 | * fs resize. If the file system resize never got to | ||
972 | * update the dinode cluster count, then we don't want | ||
973 | * to trust any clusters past it, regardless of what | ||
974 | * the group descriptor says. */ | ||
975 | gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb, | ||
976 | le64_to_cpu(gd->bg_blkno)); | ||
977 | if ((gd_cluster_off + max_bits) > | ||
978 | OCFS2_I(inode)->ip_clusters) { | ||
979 | max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; | ||
980 | mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", | ||
981 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
982 | le16_to_cpu(gd->bg_bits), | ||
983 | OCFS2_I(inode)->ip_clusters, max_bits); | ||
984 | } | ||
985 | |||
920 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), | 986 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), |
921 | group_bh, bits_wanted, | 987 | group_bh, bits_wanted, |
988 | max_bits, | ||
922 | &tmp_off, &tmp_found); | 989 | &tmp_off, &tmp_found); |
923 | if (ret) | 990 | if (ret) |
924 | return ret; | 991 | return ret; |
@@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode, | |||
951 | if (bg->bg_free_bits_count) | 1018 | if (bg->bg_free_bits_count) |
952 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), | 1019 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), |
953 | group_bh, bits_wanted, | 1020 | group_bh, bits_wanted, |
1021 | le16_to_cpu(bg->bg_bits), | ||
954 | bit_off, bits_found); | 1022 | bit_off, bits_found); |
955 | 1023 | ||
956 | return ret; | 1024 | return ret; |
957 | } | 1025 | } |
958 | 1026 | ||
1027 | static int ocfs2_alloc_dinode_update_counts(struct inode *inode, | ||
1028 | struct ocfs2_journal_handle *handle, | ||
1029 | struct buffer_head *di_bh, | ||
1030 | u32 num_bits, | ||
1031 | u16 chain) | ||
1032 | { | ||
1033 | int ret; | ||
1034 | u32 tmp_used; | ||
1035 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; | ||
1036 | struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; | ||
1037 | |||
1038 | ret = ocfs2_journal_access(handle, inode, di_bh, | ||
1039 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
1040 | if (ret < 0) { | ||
1041 | mlog_errno(ret); | ||
1042 | goto out; | ||
1043 | } | ||
1044 | |||
1045 | tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); | ||
1046 | di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); | ||
1047 | le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); | ||
1048 | |||
1049 | ret = ocfs2_journal_dirty(handle, di_bh); | ||
1050 | if (ret < 0) | ||
1051 | mlog_errno(ret); | ||
1052 | |||
1053 | out: | ||
1054 | return ret; | ||
1055 | } | ||
1056 | |||
1057 | static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | ||
1058 | u32 bits_wanted, | ||
1059 | u32 min_bits, | ||
1060 | u16 *bit_off, | ||
1061 | unsigned int *num_bits, | ||
1062 | u64 gd_blkno, | ||
1063 | u16 *bits_left) | ||
1064 | { | ||
1065 | int ret; | ||
1066 | u16 found; | ||
1067 | struct buffer_head *group_bh = NULL; | ||
1068 | struct ocfs2_group_desc *gd; | ||
1069 | struct inode *alloc_inode = ac->ac_inode; | ||
1070 | struct ocfs2_journal_handle *handle = ac->ac_handle; | ||
1071 | |||
1072 | ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, | ||
1073 | &group_bh, OCFS2_BH_CACHED, alloc_inode); | ||
1074 | if (ret < 0) { | ||
1075 | mlog_errno(ret); | ||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | gd = (struct ocfs2_group_desc *) group_bh->b_data; | ||
1080 | if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { | ||
1081 | OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); | ||
1082 | ret = -EIO; | ||
1083 | goto out; | ||
1084 | } | ||
1085 | |||
1086 | ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, | ||
1087 | bit_off, &found); | ||
1088 | if (ret < 0) { | ||
1089 | if (ret != -ENOSPC) | ||
1090 | mlog_errno(ret); | ||
1091 | goto out; | ||
1092 | } | ||
1093 | |||
1094 | *num_bits = found; | ||
1095 | |||
1096 | ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, | ||
1097 | *num_bits, | ||
1098 | le16_to_cpu(gd->bg_chain)); | ||
1099 | if (ret < 0) { | ||
1100 | mlog_errno(ret); | ||
1101 | goto out; | ||
1102 | } | ||
1103 | |||
1104 | ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, | ||
1105 | *bit_off, *num_bits); | ||
1106 | if (ret < 0) | ||
1107 | mlog_errno(ret); | ||
1108 | |||
1109 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); | ||
1110 | |||
1111 | out: | ||
1112 | brelse(group_bh); | ||
1113 | |||
1114 | return ret; | ||
1115 | } | ||
1116 | |||
959 | static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | 1117 | static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, |
960 | u32 bits_wanted, | 1118 | u32 bits_wanted, |
961 | u32 min_bits, | 1119 | u32 min_bits, |
962 | u16 *bit_off, | 1120 | u16 *bit_off, |
963 | unsigned int *num_bits, | 1121 | unsigned int *num_bits, |
964 | u64 *bg_blkno) | 1122 | u64 *bg_blkno, |
1123 | u16 *bits_left) | ||
965 | { | 1124 | { |
966 | int status; | 1125 | int status; |
967 | u16 chain, tmp_bits; | 1126 | u16 chain, tmp_bits; |
@@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
988 | goto bail; | 1147 | goto bail; |
989 | } | 1148 | } |
990 | bg = (struct ocfs2_group_desc *) group_bh->b_data; | 1149 | bg = (struct ocfs2_group_desc *) group_bh->b_data; |
991 | if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { | 1150 | status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); |
992 | OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); | 1151 | if (status) { |
993 | status = -EIO; | 1152 | mlog_errno(status); |
994 | goto bail; | 1153 | goto bail; |
995 | } | 1154 | } |
996 | 1155 | ||
@@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1018 | goto bail; | 1177 | goto bail; |
1019 | } | 1178 | } |
1020 | bg = (struct ocfs2_group_desc *) group_bh->b_data; | 1179 | bg = (struct ocfs2_group_desc *) group_bh->b_data; |
1021 | if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { | 1180 | status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); |
1022 | OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); | 1181 | if (status) { |
1023 | status = -EIO; | 1182 | mlog_errno(status); |
1024 | goto bail; | 1183 | goto bail; |
1025 | } | 1184 | } |
1026 | } | 1185 | } |
@@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1099 | (unsigned long long)fe->i_blkno); | 1258 | (unsigned long long)fe->i_blkno); |
1100 | 1259 | ||
1101 | *bg_blkno = le64_to_cpu(bg->bg_blkno); | 1260 | *bg_blkno = le64_to_cpu(bg->bg_blkno); |
1261 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); | ||
1102 | bail: | 1262 | bail: |
1103 | if (group_bh) | 1263 | if (group_bh) |
1104 | brelse(group_bh); | 1264 | brelse(group_bh); |
@@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
1120 | { | 1280 | { |
1121 | int status; | 1281 | int status; |
1122 | u16 victim, i; | 1282 | u16 victim, i; |
1283 | u16 bits_left = 0; | ||
1284 | u64 hint_blkno = ac->ac_last_group; | ||
1123 | struct ocfs2_chain_list *cl; | 1285 | struct ocfs2_chain_list *cl; |
1124 | struct ocfs2_dinode *fe; | 1286 | struct ocfs2_dinode *fe; |
1125 | 1287 | ||
@@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
1146 | goto bail; | 1308 | goto bail; |
1147 | } | 1309 | } |
1148 | 1310 | ||
1311 | if (hint_blkno) { | ||
1312 | /* Attempt to short-circuit the usual search mechanism | ||
1313 | * by jumping straight to the most recently used | ||
1314 | * allocation group. This helps us mantain some | ||
1315 | * contiguousness across allocations. */ | ||
1316 | status = ocfs2_search_one_group(ac, bits_wanted, min_bits, | ||
1317 | bit_off, num_bits, | ||
1318 | hint_blkno, &bits_left); | ||
1319 | if (!status) { | ||
1320 | /* Be careful to update *bg_blkno here as the | ||
1321 | * caller is expecting it to be filled in, and | ||
1322 | * ocfs2_search_one_group() won't do that for | ||
1323 | * us. */ | ||
1324 | *bg_blkno = hint_blkno; | ||
1325 | goto set_hint; | ||
1326 | } | ||
1327 | if (status < 0 && status != -ENOSPC) { | ||
1328 | mlog_errno(status); | ||
1329 | goto bail; | ||
1330 | } | ||
1331 | } | ||
1332 | |||
1149 | cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; | 1333 | cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; |
1150 | 1334 | ||
1151 | victim = ocfs2_find_victim_chain(cl); | 1335 | victim = ocfs2_find_victim_chain(cl); |
@@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
1153 | ac->ac_allow_chain_relink = 1; | 1337 | ac->ac_allow_chain_relink = 1; |
1154 | 1338 | ||
1155 | status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, | 1339 | status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, |
1156 | num_bits, bg_blkno); | 1340 | num_bits, bg_blkno, &bits_left); |
1157 | if (!status) | 1341 | if (!status) |
1158 | goto bail; | 1342 | goto set_hint; |
1159 | if (status < 0 && status != -ENOSPC) { | 1343 | if (status < 0 && status != -ENOSPC) { |
1160 | mlog_errno(status); | 1344 | mlog_errno(status); |
1161 | goto bail; | 1345 | goto bail; |
@@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
1177 | 1361 | ||
1178 | ac->ac_chain = i; | 1362 | ac->ac_chain = i; |
1179 | status = ocfs2_search_chain(ac, bits_wanted, min_bits, | 1363 | status = ocfs2_search_chain(ac, bits_wanted, min_bits, |
1180 | bit_off, num_bits, | 1364 | bit_off, num_bits, bg_blkno, |
1181 | bg_blkno); | 1365 | &bits_left); |
1182 | if (!status) | 1366 | if (!status) |
1183 | break; | 1367 | break; |
1184 | if (status < 0 && status != -ENOSPC) { | 1368 | if (status < 0 && status != -ENOSPC) { |
@@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, | |||
1186 | goto bail; | 1370 | goto bail; |
1187 | } | 1371 | } |
1188 | } | 1372 | } |
1189 | bail: | ||
1190 | 1373 | ||
1374 | set_hint: | ||
1375 | if (status != -ENOSPC) { | ||
1376 | /* If the next search of this group is not likely to | ||
1377 | * yield a suitable extent, then we reset the last | ||
1378 | * group hint so as to not waste a disk read */ | ||
1379 | if (bits_left < min_bits) | ||
1380 | ac->ac_last_group = 0; | ||
1381 | else | ||
1382 | ac->ac_last_group = *bg_blkno; | ||
1383 | } | ||
1384 | |||
1385 | bail: | ||
1191 | mlog_exit(status); | 1386 | mlog_exit(status); |
1192 | return status; | 1387 | return status; |
1193 | } | 1388 | } |
@@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, | |||
1341 | { | 1536 | { |
1342 | int status; | 1537 | int status; |
1343 | unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; | 1538 | unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; |
1344 | u64 bg_blkno; | 1539 | u64 bg_blkno = 0; |
1345 | u16 bg_bit_off; | 1540 | u16 bg_bit_off; |
1346 | 1541 | ||
1347 | mlog_entry_void(); | 1542 | mlog_entry_void(); |
@@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, | |||
1494 | } | 1689 | } |
1495 | 1690 | ||
1496 | group = (struct ocfs2_group_desc *) group_bh->b_data; | 1691 | group = (struct ocfs2_group_desc *) group_bh->b_data; |
1497 | if (!OCFS2_IS_VALID_GROUP_DESC(group)) { | 1692 | status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); |
1498 | OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); | 1693 | if (status) { |
1499 | status = -EIO; | 1694 | mlog_errno(status); |
1500 | goto bail; | 1695 | goto bail; |
1501 | } | 1696 | } |
1502 | BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); | 1697 | BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); |
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a76c82a7ceac..c787838d1052 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
@@ -49,6 +49,8 @@ struct ocfs2_alloc_context { | |||
49 | u16 ac_chain; | 49 | u16 ac_chain; |
50 | int ac_allow_chain_relink; | 50 | int ac_allow_chain_relink; |
51 | group_search_t *ac_group_search; | 51 | group_search_t *ac_group_search; |
52 | |||
53 | u64 ac_last_group; | ||
52 | }; | 54 | }; |
53 | 55 | ||
54 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); | 56 | void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 382706a67ffd..d17e33e66a1e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1442,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1442 | 1442 | ||
1443 | osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; | 1443 | osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; |
1444 | 1444 | ||
1445 | /* We don't have a cluster lock on the bitmap here because | ||
1446 | * we're only interested in static information and the extra | ||
1447 | * complexity at mount time isn't worht it. Don't pass the | ||
1448 | * inode in to the read function though as we don't want it to | ||
1449 | * be put in the cache. */ | ||
1445 | status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, | 1450 | status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, |
1446 | inode); | 1451 | NULL); |
1447 | iput(inode); | 1452 | iput(inode); |
1448 | if (status < 0) { | 1453 | if (status < 0) { |
1449 | mlog_errno(status); | 1454 | mlog_errno(status); |
@@ -1452,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1452 | 1457 | ||
1453 | di = (struct ocfs2_dinode *) bitmap_bh->b_data; | 1458 | di = (struct ocfs2_dinode *) bitmap_bh->b_data; |
1454 | osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); | 1459 | osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); |
1455 | osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total); | ||
1456 | brelse(bitmap_bh); | 1460 | brelse(bitmap_bh); |
1457 | mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", | 1461 | mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", |
1458 | (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); | 1462 | (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); |
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index c9a478099281..e478f1941831 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig | |||
@@ -99,7 +99,7 @@ config IBM_PARTITION | |||
99 | 99 | ||
100 | config MAC_PARTITION | 100 | config MAC_PARTITION |
101 | bool "Macintosh partition map support" if PARTITION_ADVANCED | 101 | bool "Macintosh partition map support" if PARTITION_ADVANCED |
102 | default y if MAC | 102 | default y if (MAC || PPC_PMAC) |
103 | help | 103 | help |
104 | Say Y here if you would like to use hard disks under Linux which | 104 | Say Y here if you would like to use hard disks under Linux which |
105 | were partitioned on a Macintosh. | 105 | were partitioned on a Macintosh. |
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c index abe91ca03edf..0a5927c806ca 100644 --- a/fs/partitions/sun.c +++ b/fs/partitions/sun.c | |||
@@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
74 | spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); | 74 | spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); |
75 | for (i = 0; i < 8; i++, p++) { | 75 | for (i = 0; i < 8; i++, p++) { |
76 | unsigned long st_sector; | 76 | unsigned long st_sector; |
77 | int num_sectors; | 77 | unsigned int num_sectors; |
78 | 78 | ||
79 | st_sector = be32_to_cpu(p->start_cylinder) * spc; | 79 | st_sector = be32_to_cpu(p->start_cylinder) * spc; |
80 | num_sectors = be32_to_cpu(p->num_sectors); | 80 | num_sectors = be32_to_cpu(p->num_sectors); |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 9f2cfc30f9cf..942156225447 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
169 | "Mapped: %8lu kB\n" | 169 | "Mapped: %8lu kB\n" |
170 | "Slab: %8lu kB\n" | 170 | "Slab: %8lu kB\n" |
171 | "PageTables: %8lu kB\n" | 171 | "PageTables: %8lu kB\n" |
172 | "NFS Unstable: %8lu kB\n" | 172 | "NFS_Unstable: %8lu kB\n" |
173 | "Bounce: %8lu kB\n" | 173 | "Bounce: %8lu kB\n" |
174 | "CommitLimit: %8lu kB\n" | 174 | "CommitLimit: %8lu kB\n" |
175 | "Committed_AS: %8lu kB\n" | 175 | "Committed_AS: %8lu kB\n" |
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index f318b58510fd..1627edd50810 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c | |||
@@ -48,8 +48,8 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp) | |||
48 | return 0; | 48 | return 0; |
49 | } | 49 | } |
50 | 50 | ||
51 | reiserfs_write_lock(inode->i_sb); | ||
52 | mutex_lock(&inode->i_mutex); | 51 | mutex_lock(&inode->i_mutex); |
52 | reiserfs_write_lock(inode->i_sb); | ||
53 | /* freeing preallocation only involves relogging blocks that | 53 | /* freeing preallocation only involves relogging blocks that |
54 | * are already in the current transaction. preallocation gets | 54 | * are already in the current transaction. preallocation gets |
55 | * freed at the end of each transaction, so it is impossible for | 55 | * freed at the end of each transaction, so it is impossible for |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 12dfdcfbee3d..52f1e2136546 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -39,14 +39,10 @@ void reiserfs_delete_inode(struct inode *inode) | |||
39 | 39 | ||
40 | /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ | 40 | /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ |
41 | if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ | 41 | if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ |
42 | mutex_lock(&inode->i_mutex); | ||
43 | |||
44 | reiserfs_delete_xattrs(inode); | 42 | reiserfs_delete_xattrs(inode); |
45 | 43 | ||
46 | if (journal_begin(&th, inode->i_sb, jbegin_count)) { | 44 | if (journal_begin(&th, inode->i_sb, jbegin_count)) |
47 | mutex_unlock(&inode->i_mutex); | ||
48 | goto out; | 45 | goto out; |
49 | } | ||
50 | reiserfs_update_inode_transaction(inode); | 46 | reiserfs_update_inode_transaction(inode); |
51 | 47 | ||
52 | err = reiserfs_delete_object(&th, inode); | 48 | err = reiserfs_delete_object(&th, inode); |
@@ -57,12 +53,8 @@ void reiserfs_delete_inode(struct inode *inode) | |||
57 | if (!err) | 53 | if (!err) |
58 | DQUOT_FREE_INODE(inode); | 54 | DQUOT_FREE_INODE(inode); |
59 | 55 | ||
60 | if (journal_end(&th, inode->i_sb, jbegin_count)) { | 56 | if (journal_end(&th, inode->i_sb, jbegin_count)) |
61 | mutex_unlock(&inode->i_mutex); | ||
62 | goto out; | 57 | goto out; |
63 | } | ||
64 | |||
65 | mutex_unlock(&inode->i_mutex); | ||
66 | 58 | ||
67 | /* check return value from reiserfs_delete_object after | 59 | /* check return value from reiserfs_delete_object after |
68 | * ending the transaction | 60 | * ending the transaction |
@@ -2348,6 +2340,7 @@ static int reiserfs_write_full_page(struct page *page, | |||
2348 | unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; | 2340 | unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; |
2349 | int error = 0; | 2341 | int error = 0; |
2350 | unsigned long block; | 2342 | unsigned long block; |
2343 | sector_t last_block; | ||
2351 | struct buffer_head *head, *bh; | 2344 | struct buffer_head *head, *bh; |
2352 | int partial = 0; | 2345 | int partial = 0; |
2353 | int nr = 0; | 2346 | int nr = 0; |
@@ -2395,10 +2388,19 @@ static int reiserfs_write_full_page(struct page *page, | |||
2395 | } | 2388 | } |
2396 | bh = head; | 2389 | bh = head; |
2397 | block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); | 2390 | block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); |
2391 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; | ||
2398 | /* first map all the buffers, logging any direct items we find */ | 2392 | /* first map all the buffers, logging any direct items we find */ |
2399 | do { | 2393 | do { |
2400 | if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) || | 2394 | if (block > last_block) { |
2401 | (buffer_mapped(bh) | 2395 | /* |
2396 | * This can happen when the block size is less than | ||
2397 | * the page size. The corresponding bytes in the page | ||
2398 | * were zero filled above | ||
2399 | */ | ||
2400 | clear_buffer_dirty(bh); | ||
2401 | set_buffer_uptodate(bh); | ||
2402 | } else if ((checked || buffer_dirty(bh)) && | ||
2403 | (!buffer_mapped(bh) || (buffer_mapped(bh) | ||
2402 | && bh->b_blocknr == | 2404 | && bh->b_blocknr == |
2403 | 0))) { | 2405 | 0))) { |
2404 | /* not mapped yet, or it points to a direct item, search | 2406 | /* not mapped yet, or it points to a direct item, search |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 745c88100895..a986b5e1e288 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
@@ -116,12 +116,12 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
116 | if (REISERFS_I(inode)->i_flags & i_nopack_mask) { | 116 | if (REISERFS_I(inode)->i_flags & i_nopack_mask) { |
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | reiserfs_write_lock(inode->i_sb); | ||
120 | 119 | ||
121 | /* we need to make sure nobody is changing the file size beneath | 120 | /* we need to make sure nobody is changing the file size beneath |
122 | ** us | 121 | ** us |
123 | */ | 122 | */ |
124 | mutex_lock(&inode->i_mutex); | 123 | mutex_lock(&inode->i_mutex); |
124 | reiserfs_write_lock(inode->i_sb); | ||
125 | 125 | ||
126 | write_from = inode->i_size & (blocksize - 1); | 126 | write_from = inode->i_size & (blocksize - 1); |
127 | /* if we are on a block boundary, we are already unpacked. */ | 127 | /* if we are on a block boundary, we are already unpacked. */ |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 39fedaa88a0c..d935fb9394e3 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf) | |||
424 | int res = -ENOTDIR; | 424 | int res = -ENOTDIR; |
425 | if (!file->f_op || !file->f_op->readdir) | 425 | if (!file->f_op || !file->f_op->readdir) |
426 | goto out; | 426 | goto out; |
427 | mutex_lock(&inode->i_mutex); | 427 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); |
428 | // down(&inode->i_zombie); | 428 | // down(&inode->i_zombie); |
429 | res = -ENOENT; | 429 | res = -ENOENT; |
430 | if (!IS_DEADDIR(inode)) { | 430 | if (!IS_DEADDIR(inode)) { |
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 3873c672cb4c..33323473e3c4 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
@@ -75,6 +75,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
75 | } | 75 | } |
76 | *err = -ENOSPC; | 76 | *err = -ENOSPC; |
77 | 77 | ||
78 | UDF_I_UNIQUE(inode) = 0; | ||
79 | UDF_I_LENEXTENTS(inode) = 0; | ||
80 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | ||
81 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | ||
82 | UDF_I_STRAT4096(inode) = 0; | ||
83 | |||
78 | block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, | 84 | block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, |
79 | start, err); | 85 | start, err); |
80 | if (*err) | 86 | if (*err) |
@@ -84,11 +90,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
84 | } | 90 | } |
85 | 91 | ||
86 | mutex_lock(&sbi->s_alloc_mutex); | 92 | mutex_lock(&sbi->s_alloc_mutex); |
87 | UDF_I_UNIQUE(inode) = 0; | ||
88 | UDF_I_LENEXTENTS(inode) = 0; | ||
89 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | ||
90 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | ||
91 | UDF_I_STRAT4096(inode) = 0; | ||
92 | if (UDF_SB_LVIDBH(sb)) | 93 | if (UDF_SB_LVIDBH(sb)) |
93 | { | 94 | { |
94 | struct logicalVolHeaderDesc *lvhd; | 95 | struct logicalVolHeaderDesc *lvhd; |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 4df822c881b6..fcce1a21a51b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb) | |||
115 | ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); | 115 | ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); |
116 | if (!ei) | 116 | if (!ei) |
117 | return NULL; | 117 | return NULL; |
118 | |||
119 | ei->i_unique = 0; | ||
120 | ei->i_lenExtents = 0; | ||
121 | ei->i_next_alloc_block = 0; | ||
122 | ei->i_next_alloc_goal = 0; | ||
123 | ei->i_strat4096 = 0; | ||
124 | |||
118 | return &ei->vfs_inode; | 125 | return &ei->vfs_inode; |
119 | } | 126 | } |
120 | 127 | ||
@@ -1652,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1652 | iput(inode); | 1659 | iput(inode); |
1653 | goto error_out; | 1660 | goto error_out; |
1654 | } | 1661 | } |
1655 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 1662 | sb->s_maxbytes = 1<<30; |
1656 | return 0; | 1663 | return 0; |
1657 | 1664 | ||
1658 | error_out: | 1665 | error_out: |
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index e1b0e8cfecb4..0abd66ce36ea 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c | |||
@@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode) | |||
239 | { | 239 | { |
240 | if (offset) | 240 | if (offset) |
241 | { | 241 | { |
242 | extoffset -= adsize; | 242 | /* |
243 | etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); | 243 | * OK, there is not extent covering inode->i_size and |
244 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) | 244 | * no extent above inode->i_size => truncate is |
245 | { | 245 | * extending the file by 'offset'. |
246 | extoffset -= adsize; | 246 | */ |
247 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); | 247 | if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) || |
248 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); | 248 | (bh && extoffset == sizeof(struct allocExtDesc))) { |
249 | /* File has no extents at all! */ | ||
250 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
251 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
252 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
249 | } | 253 | } |
250 | else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) | 254 | else { |
251 | { | ||
252 | kernel_lb_addr neloc = { 0, 0 }; | ||
253 | extoffset -= adsize; | 255 | extoffset -= adsize; |
254 | nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | | 256 | etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); |
255 | ((elen + offset + inode->i_sb->s_blocksize - 1) & | 257 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) |
256 | ~(inode->i_sb->s_blocksize - 1)); | 258 | { |
257 | udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); | 259 | extoffset -= adsize; |
258 | udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); | 260 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); |
259 | } | 261 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); |
260 | else | 262 | } |
261 | { | 263 | else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) |
262 | if (elen & (inode->i_sb->s_blocksize - 1)) | ||
263 | { | 264 | { |
265 | kernel_lb_addr neloc = { 0, 0 }; | ||
264 | extoffset -= adsize; | 266 | extoffset -= adsize; |
265 | elen = EXT_RECORDED_ALLOCATED | | 267 | nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | |
266 | ((elen + inode->i_sb->s_blocksize - 1) & | 268 | ((elen + offset + inode->i_sb->s_blocksize - 1) & |
267 | ~(inode->i_sb->s_blocksize - 1)); | 269 | ~(inode->i_sb->s_blocksize - 1)); |
268 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); | 270 | udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); |
271 | udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); | ||
272 | } | ||
273 | else | ||
274 | { | ||
275 | if (elen & (inode->i_sb->s_blocksize - 1)) | ||
276 | { | ||
277 | extoffset -= adsize; | ||
278 | elen = EXT_RECORDED_ALLOCATED | | ||
279 | ((elen + inode->i_sb->s_blocksize - 1) & | ||
280 | ~(inode->i_sb->s_blocksize - 1)); | ||
281 | udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); | ||
282 | } | ||
283 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
284 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
285 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
269 | } | 286 | } |
270 | memset(&eloc, 0x00, sizeof(kernel_lb_addr)); | ||
271 | elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; | ||
272 | udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); | ||
273 | } | 287 | } |
274 | } | 288 | } |
275 | } | 289 | } |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index b01804baa120..b82381475779 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -248,7 +248,7 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, | |||
248 | 248 | ||
249 | if (likely(cur_index != index)) { | 249 | if (likely(cur_index != index)) { |
250 | page = ufs_get_locked_page(mapping, index); | 250 | page = ufs_get_locked_page(mapping, index); |
251 | if (IS_ERR(page)) | 251 | if (!page || IS_ERR(page)) /* it was truncated or EIO */ |
252 | continue; | 252 | continue; |
253 | } else | 253 | } else |
254 | page = locked_page; | 254 | page = locked_page; |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index e7c8615beb65..30c6e8a9446c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) | |||
169 | 169 | ||
170 | static struct buffer_head * | 170 | static struct buffer_head * |
171 | ufs_clear_frags(struct inode *inode, sector_t beg, | 171 | ufs_clear_frags(struct inode *inode, sector_t beg, |
172 | unsigned int n) | 172 | unsigned int n, sector_t want) |
173 | { | 173 | { |
174 | struct buffer_head *res, *bh; | 174 | struct buffer_head *res = NULL, *bh; |
175 | sector_t end = beg + n; | 175 | sector_t end = beg + n; |
176 | 176 | ||
177 | res = sb_getblk(inode->i_sb, beg); | 177 | for (; beg < end; ++beg) { |
178 | ufs_clear_frag(inode, res); | ||
179 | for (++beg; beg < end; ++beg) { | ||
180 | bh = sb_getblk(inode->i_sb, beg); | 178 | bh = sb_getblk(inode->i_sb, beg); |
181 | ufs_clear_frag(inode, bh); | 179 | ufs_clear_frag(inode, bh); |
182 | brelse(bh); | 180 | if (want != beg) |
181 | brelse(bh); | ||
182 | else | ||
183 | res = bh; | ||
183 | } | 184 | } |
185 | BUG_ON(!res); | ||
184 | return res; | 186 | return res; |
185 | } | 187 | } |
186 | 188 | ||
@@ -265,7 +267,9 @@ repeat: | |||
265 | lastfrag = ufsi->i_lastfrag; | 267 | lastfrag = ufsi->i_lastfrag; |
266 | 268 | ||
267 | } | 269 | } |
268 | goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; | 270 | tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); |
271 | if (tmp) | ||
272 | goal = tmp + uspi->s_fpb; | ||
269 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | 273 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, |
270 | goal, required + blockoff, | 274 | goal, required + blockoff, |
271 | err, locked_page); | 275 | err, locked_page); |
@@ -277,13 +281,15 @@ repeat: | |||
277 | tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), | 281 | tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), |
278 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), | 282 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), |
279 | err, locked_page); | 283 | err, locked_page); |
280 | } | 284 | } else /* (lastblock > block) */ { |
281 | /* | 285 | /* |
282 | * We will allocate new block before last allocated block | 286 | * We will allocate new block before last allocated block |
283 | */ | 287 | */ |
284 | else /* (lastblock > block) */ { | 288 | if (block) { |
285 | if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) | 289 | tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); |
286 | goal = tmp + uspi->s_fpb; | 290 | if (tmp) |
291 | goal = tmp + uspi->s_fpb; | ||
292 | } | ||
287 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, | 293 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, |
288 | goal, uspi->s_fpb, err, locked_page); | 294 | goal, uspi->s_fpb, err, locked_page); |
289 | } | 295 | } |
@@ -296,7 +302,7 @@ repeat: | |||
296 | } | 302 | } |
297 | 303 | ||
298 | if (!phys) { | 304 | if (!phys) { |
299 | result = ufs_clear_frags(inode, tmp + blockoff, required); | 305 | result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); |
300 | } else { | 306 | } else { |
301 | *phys = tmp + blockoff; | 307 | *phys = tmp + blockoff; |
302 | result = NULL; | 308 | result = NULL; |
@@ -383,7 +389,7 @@ repeat: | |||
383 | } | 389 | } |
384 | } | 390 | } |
385 | 391 | ||
386 | if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) | 392 | if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) |
387 | goal = tmp + uspi->s_fpb; | 393 | goal = tmp + uspi->s_fpb; |
388 | else | 394 | else |
389 | goal = bh->b_blocknr + uspi->s_fpb; | 395 | goal = bh->b_blocknr + uspi->s_fpb; |
@@ -397,7 +403,8 @@ repeat: | |||
397 | 403 | ||
398 | 404 | ||
399 | if (!phys) { | 405 | if (!phys) { |
400 | result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); | 406 | result = ufs_clear_frags(inode, tmp, uspi->s_fpb, |
407 | tmp + blockoff); | ||
401 | } else { | 408 | } else { |
402 | *phys = tmp + blockoff; | 409 | *phys = tmp + blockoff; |
403 | *new = 1; | 410 | *new = 1; |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index abd5f23a426d..d344b411e261 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -129,7 +129,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
129 | struct inode * inode; | 129 | struct inode * inode; |
130 | 130 | ||
131 | if (l > sb->s_blocksize) | 131 | if (l > sb->s_blocksize) |
132 | goto out; | 132 | goto out_notlocked; |
133 | 133 | ||
134 | lock_kernel(); | 134 | lock_kernel(); |
135 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); | 135 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); |
@@ -155,6 +155,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
155 | err = ufs_add_nondir(dentry, inode); | 155 | err = ufs_add_nondir(dentry, inode); |
156 | out: | 156 | out: |
157 | unlock_kernel(); | 157 | unlock_kernel(); |
158 | out_notlocked: | ||
158 | return err; | 159 | return err; |
159 | 160 | ||
160 | out_fail: | 161 | out_fail: |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index c9b55872079b..ea11d04c41a0 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode) | |||
375 | int err = 0; | 375 | int err = 0; |
376 | struct address_space *mapping = inode->i_mapping; | 376 | struct address_space *mapping = inode->i_mapping; |
377 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; | 377 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; |
378 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
379 | unsigned lastfrag, i, end; | 378 | unsigned lastfrag, i, end; |
380 | struct page *lastpage; | 379 | struct page *lastpage; |
381 | struct buffer_head *bh; | 380 | struct buffer_head *bh; |
382 | 381 | ||
383 | lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; | 382 | lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; |
384 | 383 | ||
385 | if (!lastfrag) { | 384 | if (!lastfrag) |
386 | ufsi->i_lastfrag = 0; | ||
387 | goto out; | 385 | goto out; |
388 | } | 386 | |
389 | lastfrag--; | 387 | lastfrag--; |
390 | 388 | ||
391 | lastpage = ufs_get_locked_page(mapping, lastfrag >> | 389 | lastpage = ufs_get_locked_page(mapping, lastfrag >> |
@@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode) | |||
400 | for (i = 0; i < end; ++i) | 398 | for (i = 0; i < end; ++i) |
401 | bh = bh->b_this_page; | 399 | bh = bh->b_this_page; |
402 | 400 | ||
403 | if (!buffer_mapped(bh)) { | 401 | |
404 | err = ufs_getfrag_block(inode, lastfrag, bh, 1); | 402 | err = ufs_getfrag_block(inode, lastfrag, bh, 1); |
405 | 403 | ||
406 | if (unlikely(err)) | 404 | if (unlikely(err)) |
407 | goto out_unlock; | 405 | goto out_unlock; |
408 | 406 | ||
409 | if (buffer_new(bh)) { | 407 | if (buffer_new(bh)) { |
410 | clear_buffer_new(bh); | 408 | clear_buffer_new(bh); |
411 | unmap_underlying_metadata(bh->b_bdev, | 409 | unmap_underlying_metadata(bh->b_bdev, |
412 | bh->b_blocknr); | 410 | bh->b_blocknr); |
413 | /* | 411 | /* |
414 | * we do not zeroize fragment, because of | 412 | * we do not zeroize fragment, because of |
415 | * if it maped to hole, it already contains zeroes | 413 | * if it maped to hole, it already contains zeroes |
416 | */ | 414 | */ |
417 | set_buffer_uptodate(bh); | 415 | set_buffer_uptodate(bh); |
418 | mark_buffer_dirty(bh); | 416 | mark_buffer_dirty(bh); |
419 | set_page_dirty(lastpage); | 417 | set_page_dirty(lastpage); |
420 | } | ||
421 | } | 418 | } |
419 | |||
422 | out_unlock: | 420 | out_unlock: |
423 | ufs_put_locked_page(lastpage); | 421 | ufs_put_locked_page(lastpage); |
424 | out: | 422 | out: |
@@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
440 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | 438 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
441 | return -EPERM; | 439 | return -EPERM; |
442 | 440 | ||
443 | if (inode->i_size > old_i_size) { | 441 | err = ufs_alloc_lastblock(inode); |
444 | /* | ||
445 | * if we expand file we should care about | ||
446 | * allocation of block for last byte first of all | ||
447 | */ | ||
448 | err = ufs_alloc_lastblock(inode); | ||
449 | 442 | ||
450 | if (err) { | 443 | if (err) { |
451 | i_size_write(inode, old_i_size); | 444 | i_size_write(inode, old_i_size); |
452 | goto out; | 445 | goto out; |
453 | } | ||
454 | /* | ||
455 | * go away, because of we expand file, and we do not | ||
456 | * need free blocks, and zeroizes page | ||
457 | */ | ||
458 | lock_kernel(); | ||
459 | goto almost_end; | ||
460 | } | 446 | } |
461 | 447 | ||
462 | block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); | 448 | block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); |
@@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
477 | yield(); | 463 | yield(); |
478 | } | 464 | } |
479 | 465 | ||
480 | if (inode->i_size < old_i_size) { | ||
481 | /* | ||
482 | * now we should have enough space | ||
483 | * to allocate block for last byte | ||
484 | */ | ||
485 | err = ufs_alloc_lastblock(inode); | ||
486 | if (err) | ||
487 | /* | ||
488 | * looks like all the same - we have no space, | ||
489 | * but we truncate file already | ||
490 | */ | ||
491 | inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize; | ||
492 | } | ||
493 | almost_end: | ||
494 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 466 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
467 | ufsi->i_lastfrag = DIRECT_FRAGMENT; | ||
495 | unlock_kernel(); | 468 | unlock_kernel(); |
496 | mark_inode_dirty(inode); | 469 | mark_inode_dirty(inode); |
497 | out: | 470 | out: |
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 337cf2c46d10..22f820a9b15c 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
@@ -251,12 +251,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, | |||
251 | { | 251 | { |
252 | struct page *page; | 252 | struct page *page; |
253 | 253 | ||
254 | try_again: | ||
255 | page = find_lock_page(mapping, index); | 254 | page = find_lock_page(mapping, index); |
256 | if (!page) { | 255 | if (!page) { |
257 | page = read_cache_page(mapping, index, | 256 | page = read_cache_page(mapping, index, |
258 | (filler_t*)mapping->a_ops->readpage, | 257 | (filler_t*)mapping->a_ops->readpage, |
259 | NULL); | 258 | NULL); |
259 | |||
260 | if (IS_ERR(page)) { | 260 | if (IS_ERR(page)) { |
261 | printk(KERN_ERR "ufs_change_blocknr: " | 261 | printk(KERN_ERR "ufs_change_blocknr: " |
262 | "read_cache_page error: ino %lu, index: %lu\n", | 262 | "read_cache_page error: ino %lu, index: %lu\n", |
@@ -266,6 +266,14 @@ try_again: | |||
266 | 266 | ||
267 | lock_page(page); | 267 | lock_page(page); |
268 | 268 | ||
269 | if (unlikely(page->mapping == NULL)) { | ||
270 | /* Truncate got there first */ | ||
271 | unlock_page(page); | ||
272 | page_cache_release(page); | ||
273 | page = NULL; | ||
274 | goto out; | ||
275 | } | ||
276 | |||
269 | if (!PageUptodate(page) || PageError(page)) { | 277 | if (!PageUptodate(page) || PageError(page)) { |
270 | unlock_page(page); | 278 | unlock_page(page); |
271 | page_cache_release(page); | 279 | page_cache_release(page); |
@@ -275,15 +283,8 @@ try_again: | |||
275 | mapping->host->i_ino, index); | 283 | mapping->host->i_ino, index); |
276 | 284 | ||
277 | page = ERR_PTR(-EIO); | 285 | page = ERR_PTR(-EIO); |
278 | goto out; | ||
279 | } | 286 | } |
280 | } | 287 | } |
281 | |||
282 | if (unlikely(!page->mapping || !page_has_buffers(page))) { | ||
283 | unlock_page(page); | ||
284 | page_cache_release(page); | ||
285 | goto try_again;/*we really need these buffers*/ | ||
286 | } | ||
287 | out: | 288 | out: |
288 | return page; | 289 | return page; |
289 | } | 290 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index ceda3a2859d2..7858703ed84c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); | |||
246 | #define BUF_BUSY XBF_DONT_BLOCK | 246 | #define BUF_BUSY XBF_DONT_BLOCK |
247 | 247 | ||
248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) | 248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) |
249 | #define XFS_BUF_ZEROFLAGS(bp) \ | 249 | #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ |
250 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) | 250 | ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) |
251 | 251 | ||
252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) | 252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) |
253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) | 253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bdef9d51900..4754f342a5d3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) | |||
314 | return; | 314 | return; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (xfs_readonly_buftarg(mp->m_ddev_targp)) { | ||
318 | xfs_fs_cmn_err(CE_NOTE, mp, | ||
319 | "Disabling barriers, underlying device is readonly"); | ||
320 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | ||
321 | return; | ||
322 | } | ||
323 | |||
317 | error = xfs_barrier_test(mp); | 324 | error = xfs_barrier_test(mp); |
318 | if (error) { | 325 | if (error) { |
319 | xfs_fs_cmn_err(CE_NOTE, mp, | 326 | xfs_fs_cmn_err(CE_NOTE, mp, |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index e95e99f7168f..f137856c3261 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -217,17 +217,24 @@ xfs_qm_statvfs( | |||
217 | return 0; | 217 | return 0; |
218 | dp = &dqp->q_core; | 218 | dp = &dqp->q_core; |
219 | 219 | ||
220 | limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; | 220 | limit = dp->d_blk_softlimit ? |
221 | be64_to_cpu(dp->d_blk_softlimit) : | ||
222 | be64_to_cpu(dp->d_blk_hardlimit); | ||
221 | if (limit && statp->f_blocks > limit) { | 223 | if (limit && statp->f_blocks > limit) { |
222 | statp->f_blocks = limit; | 224 | statp->f_blocks = limit; |
223 | statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? | 225 | statp->f_bfree = |
224 | (statp->f_blocks - dp->d_bcount) : 0; | 226 | (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? |
227 | (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; | ||
225 | } | 228 | } |
226 | limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; | 229 | |
230 | limit = dp->d_ino_softlimit ? | ||
231 | be64_to_cpu(dp->d_ino_softlimit) : | ||
232 | be64_to_cpu(dp->d_ino_hardlimit); | ||
227 | if (limit && statp->f_files > limit) { | 233 | if (limit && statp->f_files > limit) { |
228 | statp->f_files = limit; | 234 | statp->f_files = limit; |
229 | statp->f_ffree = (statp->f_files > dp->d_icount) ? | 235 | statp->f_ffree = |
230 | (statp->f_ffree - dp->d_icount) : 0; | 236 | (statp->f_files > be64_to_cpu(dp->d_icount)) ? |
237 | (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; | ||
231 | } | 238 | } |
232 | 239 | ||
233 | xfs_qm_dqput(dqp); | 240 | xfs_qm_dqput(dqp); |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index eef6763f3a67..d2bbcd882a69 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist( | |||
1835 | &agbp))) | 1835 | &agbp))) |
1836 | return error; | 1836 | return error; |
1837 | if (!pag->pagf_init) { | 1837 | if (!pag->pagf_init) { |
1838 | ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); | ||
1839 | ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); | ||
1838 | args->agbp = NULL; | 1840 | args->agbp = NULL; |
1839 | return 0; | 1841 | return 0; |
1840 | } | 1842 | } |
1841 | } else | 1843 | } else |
1842 | agbp = NULL; | 1844 | agbp = NULL; |
1843 | 1845 | ||
1844 | /* If this is a metadata preferred pag and we are user data | 1846 | /* |
1847 | * If this is a metadata preferred pag and we are user data | ||
1845 | * then try somewhere else if we are not being asked to | 1848 | * then try somewhere else if we are not being asked to |
1846 | * try harder at this point | 1849 | * try harder at this point |
1847 | */ | 1850 | */ |
1848 | if (pag->pagf_metadata && args->userdata && flags) { | 1851 | if (pag->pagf_metadata && args->userdata && |
1852 | (flags & XFS_ALLOC_FLAG_TRYLOCK)) { | ||
1853 | ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); | ||
1849 | args->agbp = NULL; | 1854 | args->agbp = NULL; |
1850 | return 0; | 1855 | return 0; |
1851 | } | 1856 | } |
1852 | 1857 | ||
1853 | need = XFS_MIN_FREELIST_PAG(pag, mp); | 1858 | if (!(flags & XFS_ALLOC_FLAG_FREEING)) { |
1854 | delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; | 1859 | need = XFS_MIN_FREELIST_PAG(pag, mp); |
1855 | /* | 1860 | delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; |
1856 | * If it looks like there isn't a long enough extent, or enough | 1861 | /* |
1857 | * total blocks, reject it. | 1862 | * If it looks like there isn't a long enough extent, or enough |
1858 | */ | 1863 | * total blocks, reject it. |
1859 | longest = (pag->pagf_longest > delta) ? | 1864 | */ |
1860 | (pag->pagf_longest - delta) : | 1865 | longest = (pag->pagf_longest > delta) ? |
1861 | (pag->pagf_flcount > 0 || pag->pagf_longest > 0); | 1866 | (pag->pagf_longest - delta) : |
1862 | if (args->minlen + args->alignment + args->minalignslop - 1 > longest || | 1867 | (pag->pagf_flcount > 0 || pag->pagf_longest > 0); |
1863 | (!(flags & XFS_ALLOC_FLAG_FREEING) && | 1868 | if ((args->minlen + args->alignment + args->minalignslop - 1) > |
1864 | (int)(pag->pagf_freeblks + pag->pagf_flcount - | 1869 | longest || |
1865 | need - args->total) < | 1870 | ((int)(pag->pagf_freeblks + pag->pagf_flcount - |
1866 | (int)args->minleft)) { | 1871 | need - args->total) < (int)args->minleft)) { |
1867 | if (agbp) | 1872 | if (agbp) |
1868 | xfs_trans_brelse(tp, agbp); | 1873 | xfs_trans_brelse(tp, agbp); |
1869 | args->agbp = NULL; | 1874 | args->agbp = NULL; |
1870 | return 0; | 1875 | return 0; |
1876 | } | ||
1871 | } | 1877 | } |
1878 | |||
1872 | /* | 1879 | /* |
1873 | * Get the a.g. freespace buffer. | 1880 | * Get the a.g. freespace buffer. |
1874 | * Can fail if we're not blocking on locks, and it's held. | 1881 | * Can fail if we're not blocking on locks, and it's held. |
@@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist( | |||
1878 | &agbp))) | 1885 | &agbp))) |
1879 | return error; | 1886 | return error; |
1880 | if (agbp == NULL) { | 1887 | if (agbp == NULL) { |
1888 | ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); | ||
1889 | ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); | ||
1881 | args->agbp = NULL; | 1890 | args->agbp = NULL; |
1882 | return 0; | 1891 | return 0; |
1883 | } | 1892 | } |
@@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist( | |||
1887 | */ | 1896 | */ |
1888 | agf = XFS_BUF_TO_AGF(agbp); | 1897 | agf = XFS_BUF_TO_AGF(agbp); |
1889 | need = XFS_MIN_FREELIST(agf, mp); | 1898 | need = XFS_MIN_FREELIST(agf, mp); |
1890 | delta = need > be32_to_cpu(agf->agf_flcount) ? | ||
1891 | (need - be32_to_cpu(agf->agf_flcount)) : 0; | ||
1892 | /* | 1899 | /* |
1893 | * If there isn't enough total or single-extent, reject it. | 1900 | * If there isn't enough total or single-extent, reject it. |
1894 | */ | 1901 | */ |
1895 | longest = be32_to_cpu(agf->agf_longest); | 1902 | if (!(flags & XFS_ALLOC_FLAG_FREEING)) { |
1896 | longest = (longest > delta) ? (longest - delta) : | 1903 | delta = need > be32_to_cpu(agf->agf_flcount) ? |
1897 | (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); | 1904 | (need - be32_to_cpu(agf->agf_flcount)) : 0; |
1898 | if (args->minlen + args->alignment + args->minalignslop - 1 > longest || | 1905 | longest = be32_to_cpu(agf->agf_longest); |
1899 | (!(flags & XFS_ALLOC_FLAG_FREEING) && | 1906 | longest = (longest > delta) ? (longest - delta) : |
1900 | (int)(be32_to_cpu(agf->agf_freeblks) + | 1907 | (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); |
1901 | be32_to_cpu(agf->agf_flcount) - need - args->total) < | 1908 | if ((args->minlen + args->alignment + args->minalignslop - 1) > |
1902 | (int)args->minleft)) { | 1909 | longest || |
1903 | xfs_trans_brelse(tp, agbp); | 1910 | ((int)(be32_to_cpu(agf->agf_freeblks) + |
1904 | args->agbp = NULL; | 1911 | be32_to_cpu(agf->agf_flcount) - need - args->total) < |
1905 | return 0; | 1912 | (int)args->minleft)) { |
1913 | xfs_trans_brelse(tp, agbp); | ||
1914 | args->agbp = NULL; | ||
1915 | return 0; | ||
1916 | } | ||
1906 | } | 1917 | } |
1907 | /* | 1918 | /* |
1908 | * Make the freelist shorter if it's too long. | 1919 | * Make the freelist shorter if it's too long. |
@@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist( | |||
1950 | * on a completely full ag. | 1961 | * on a completely full ag. |
1951 | */ | 1962 | */ |
1952 | if (targs.agbno == NULLAGBLOCK) { | 1963 | if (targs.agbno == NULLAGBLOCK) { |
1953 | if (!(flags & XFS_ALLOC_FLAG_FREEING)) { | 1964 | if (flags & XFS_ALLOC_FLAG_FREEING) |
1954 | xfs_trans_brelse(tp, agflbp); | 1965 | break; |
1955 | args->agbp = NULL; | 1966 | xfs_trans_brelse(tp, agflbp); |
1956 | return 0; | 1967 | args->agbp = NULL; |
1957 | } | 1968 | return 0; |
1958 | break; | ||
1959 | } | 1969 | } |
1960 | /* | 1970 | /* |
1961 | * Put each allocated block on the list. | 1971 | * Put each allocated block on the list. |
@@ -2442,31 +2452,26 @@ xfs_free_extent( | |||
2442 | xfs_fsblock_t bno, /* starting block number of extent */ | 2452 | xfs_fsblock_t bno, /* starting block number of extent */ |
2443 | xfs_extlen_t len) /* length of extent */ | 2453 | xfs_extlen_t len) /* length of extent */ |
2444 | { | 2454 | { |
2445 | #ifdef DEBUG | 2455 | xfs_alloc_arg_t args; |
2446 | xfs_agf_t *agf; /* a.g. freespace header */ | ||
2447 | #endif | ||
2448 | xfs_alloc_arg_t args; /* allocation argument structure */ | ||
2449 | int error; | 2456 | int error; |
2450 | 2457 | ||
2451 | ASSERT(len != 0); | 2458 | ASSERT(len != 0); |
2459 | memset(&args, 0, sizeof(xfs_alloc_arg_t)); | ||
2452 | args.tp = tp; | 2460 | args.tp = tp; |
2453 | args.mp = tp->t_mountp; | 2461 | args.mp = tp->t_mountp; |
2454 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); | 2462 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); |
2455 | ASSERT(args.agno < args.mp->m_sb.sb_agcount); | 2463 | ASSERT(args.agno < args.mp->m_sb.sb_agcount); |
2456 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); | 2464 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); |
2457 | args.alignment = 1; | ||
2458 | args.minlen = args.minleft = args.minalignslop = 0; | ||
2459 | down_read(&args.mp->m_peraglock); | 2465 | down_read(&args.mp->m_peraglock); |
2460 | args.pag = &args.mp->m_perag[args.agno]; | 2466 | args.pag = &args.mp->m_perag[args.agno]; |
2461 | if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) | 2467 | if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) |
2462 | goto error0; | 2468 | goto error0; |
2463 | #ifdef DEBUG | 2469 | #ifdef DEBUG |
2464 | ASSERT(args.agbp != NULL); | 2470 | ASSERT(args.agbp != NULL); |
2465 | agf = XFS_BUF_TO_AGF(args.agbp); | 2471 | ASSERT((args.agbno + len) <= |
2466 | ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); | 2472 | be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); |
2467 | #endif | 2473 | #endif |
2468 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, | 2474 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); |
2469 | len, 0); | ||
2470 | error0: | 2475 | error0: |
2471 | up_read(&args.mp->m_peraglock); | 2476 | up_read(&args.mp->m_peraglock); |
2472 | return error; | 2477 | return error; |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3a6137539064..bf46fae303af 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -4993,7 +4993,7 @@ xfs_bmapi( | |||
4993 | bma.firstblock = *firstblock; | 4993 | bma.firstblock = *firstblock; |
4994 | bma.alen = alen; | 4994 | bma.alen = alen; |
4995 | bma.off = aoff; | 4995 | bma.off = aoff; |
4996 | bma.conv = (flags & XFS_BMAPI_CONVERT); | 4996 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); |
4997 | bma.wasdel = wasdelay; | 4997 | bma.wasdel = wasdelay; |
4998 | bma.minlen = minlen; | 4998 | bma.minlen = minlen; |
4999 | bma.low = flist->xbf_low; | 4999 | bma.low = flist->xbf_low; |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba9e..1f8ecff8553a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -334,10 +334,9 @@ xfs_itobp( | |||
334 | #if !defined(__KERNEL__) | 334 | #if !defined(__KERNEL__) |
335 | ni = 0; | 335 | ni = 0; |
336 | #elif defined(DEBUG) | 336 | #elif defined(DEBUG) |
337 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : | 337 | ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; |
338 | (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); | ||
339 | #else /* usual case */ | 338 | #else /* usual case */ |
340 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; | 339 | ni = 1; |
341 | #endif | 340 | #endif |
342 | 341 | ||
343 | for (i = 0; i < ni; i++) { | 342 | for (i = 0; i < ni; i++) { |
@@ -348,11 +347,15 @@ xfs_itobp( | |||
348 | (i << mp->m_sb.sb_inodelog)); | 347 | (i << mp->m_sb.sb_inodelog)); |
349 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && | 348 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && |
350 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); | 349 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); |
351 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, | 350 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | 351 | XFS_ERRTAG_ITOBP_INOTOBP, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
353 | if (imap_flags & XFS_IMAP_BULKSTAT) { | ||
354 | xfs_trans_brelse(tp, bp); | ||
355 | return XFS_ERROR(EINVAL); | ||
356 | } | ||
353 | #ifdef DEBUG | 357 | #ifdef DEBUG |
354 | if (!(imap_flags & XFS_IMAP_BULKSTAT)) | 358 | cmn_err(CE_ALERT, |
355 | cmn_err(CE_ALERT, | ||
356 | "Device %s - bad inode magic/vsn " | 359 | "Device %s - bad inode magic/vsn " |
357 | "daddr %lld #%d (magic=%x)", | 360 | "daddr %lld #%d (magic=%x)", |
358 | XFS_BUFTARG_NAME(mp->m_ddev_targp), | 361 | XFS_BUFTARG_NAME(mp->m_ddev_targp), |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e730328636c3..21ac1a67e3e0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log, | |||
1413 | ops = iclog->ic_header.h_num_logops; | 1413 | ops = iclog->ic_header.h_num_logops; |
1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | 1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); |
1415 | 1415 | ||
1416 | bp = iclog->ic_bp; | 1416 | bp = iclog->ic_bp; |
1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); |
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log, | |||
1430 | } | 1430 | } |
1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); | 1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); |
1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ | 1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ |
1433 | XFS_BUF_ZEROFLAGS(bp); | ||
1433 | XFS_BUF_BUSY(bp); | 1434 | XFS_BUF_BUSY(bp); |
1434 | XFS_BUF_ASYNC(bp); | 1435 | XFS_BUF_ASYNC(bp); |
1435 | /* | 1436 | /* |
1436 | * Do an ordered write for the log block. | 1437 | * Do an ordered write for the log block. |
1437 | * | 1438 | * Its unnecessary to flush the first split block in the log wrap case. |
1438 | * It may not be needed to flush the first split block in the log wrap | ||
1439 | * case, but do it anyways to be safe -AK | ||
1440 | */ | 1439 | */ |
1441 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1440 | if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) |
1442 | XFS_BUF_ORDERED(bp); | 1441 | XFS_BUF_ORDERED(bp); |
1443 | 1442 | ||
1444 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); | 1443 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log, | |||
1460 | return error; | 1459 | return error; |
1461 | } | 1460 | } |
1462 | if (split) { | 1461 | if (split) { |
1463 | bp = iclog->ic_log->l_xbuf; | 1462 | bp = iclog->ic_log->l_xbuf; |
1464 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == | 1463 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == |
1465 | (unsigned long)1); | 1464 | (unsigned long)1); |
1466 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1465 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log, | |||
1468 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ | 1467 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ |
1469 | (__psint_t)count), split); | 1468 | (__psint_t)count), split); |
1470 | XFS_BUF_SET_FSPRIVATE(bp, iclog); | 1469 | XFS_BUF_SET_FSPRIVATE(bp, iclog); |
1470 | XFS_BUF_ZEROFLAGS(bp); | ||
1471 | XFS_BUF_BUSY(bp); | 1471 | XFS_BUF_BUSY(bp); |
1472 | XFS_BUF_ASYNC(bp); | 1472 | XFS_BUF_ASYNC(bp); |
1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 6c96391f3f1a..b427d220a169 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -515,7 +515,7 @@ xfs_mount( | |||
515 | if (error) | 515 | if (error) |
516 | goto error2; | 516 | goto error2; |
517 | 517 | ||
518 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) | 518 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
519 | xfs_mountfs_check_barriers(mp); | 519 | xfs_mountfs_check_barriers(mp); |
520 | 520 | ||
521 | error = XFS_IOINIT(vfsp, args, flags); | 521 | error = XFS_IOINIT(vfsp, args, flags); |