aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/adfs/super.c2
-rw-r--r--fs/block_dev.c114
-rw-r--r--fs/cifs/CHANGES10
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifsencrypt.c3
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c28
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/cifs/file.c97
-rw-r--r--fs/cifs/netmisc.c1
-rw-r--r--fs/cifs/readdir.c2
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/cifs/transport.c618
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c10
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/ioprio.c30
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/journal.c92
-rw-r--r--fs/jbd/transaction.c9
-rw-r--r--fs/jfs/inode.c16
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/super.c118
-rw-r--r--fs/lockd/svcsubs.c15
-rw-r--r--fs/locks.c6
-rw-r--r--fs/minix/inode.c13
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/idmap.c4
-rw-r--r--fs/nfs/nfs4proc.c29
-rw-r--r--fs/nfs/nfs4xdr.c21
-rw-r--r--fs/nfs/read.c23
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c1
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c43
-rw-r--r--fs/ocfs2/localalloc.c8
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/suballoc.c261
-rw-r--r--fs/ocfs2/suballoc.h2
-rw-r--r--fs/ocfs2/super.c8
-rw-r--r--fs/partitions/sun.c2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/udf/truncate.c64
-rw-r--r--fs/ufs/inode.c35
-rw-r--r--fs/ufs/truncate.c77
-rw-r--r--fs/xfs/xfs_alloc.c103
-rw-r--r--fs/xfs/xfs_bmap.c2
56 files changed, 1398 insertions, 609 deletions
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index ba1c88af49fe..82011019494c 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
308 if (adfs_checkmap(sb, dm)) 308 if (adfs_checkmap(sb, dm))
309 return dm; 309 return dm;
310 310
311 adfs_error(sb, NULL, "map corrupted"); 311 adfs_error(sb, "map corrupted");
312 312
313error_free: 313error_free:
314 while (--zone >= 0) 314 while (--zone >= 0)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 37534573960b..045f98854f14 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size)
884} 884}
885EXPORT_SYMBOL(bd_set_size); 885EXPORT_SYMBOL(bd_set_size);
886 886
887static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
888{
889 int ret = 0;
890 struct inode *bd_inode = bdev->bd_inode;
891 struct gendisk *disk = bdev->bd_disk;
892
893 mutex_lock_nested(&bdev->bd_mutex, subclass);
894 lock_kernel();
895 if (!--bdev->bd_openers) {
896 sync_blockdev(bdev);
897 kill_bdev(bdev);
898 }
899 if (bdev->bd_contains == bdev) {
900 if (disk->fops->release)
901 ret = disk->fops->release(bd_inode, NULL);
902 } else {
903 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
904 subclass + 1);
905 bdev->bd_contains->bd_part_count--;
906 mutex_unlock(&bdev->bd_contains->bd_mutex);
907 }
908 if (!bdev->bd_openers) {
909 struct module *owner = disk->fops->owner;
910
911 put_disk(disk);
912 module_put(owner);
913
914 if (bdev->bd_contains != bdev) {
915 kobject_put(&bdev->bd_part->kobj);
916 bdev->bd_part = NULL;
917 }
918 bdev->bd_disk = NULL;
919 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
920 if (bdev != bdev->bd_contains)
921 __blkdev_put(bdev->bd_contains, subclass + 1);
922 bdev->bd_contains = NULL;
923 }
924 unlock_kernel();
925 mutex_unlock(&bdev->bd_mutex);
926 bdput(bdev);
927 return ret;
928}
929
930int blkdev_put(struct block_device *bdev)
931{
932 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
933}
934EXPORT_SYMBOL(blkdev_put);
935
936int blkdev_put_partition(struct block_device *bdev)
937{
938 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
939}
940EXPORT_SYMBOL(blkdev_put_partition);
941
887static int 942static int
888blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); 943blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
889 944
@@ -980,7 +1035,7 @@ out_first:
980 bdev->bd_disk = NULL; 1035 bdev->bd_disk = NULL;
981 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1036 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
982 if (bdev != bdev->bd_contains) 1037 if (bdev != bdev->bd_contains)
983 blkdev_put(bdev->bd_contains); 1038 __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE);
984 bdev->bd_contains = NULL; 1039 bdev->bd_contains = NULL;
985 put_disk(disk); 1040 put_disk(disk);
986 module_put(owner); 1041 module_put(owner);
@@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1079 return res; 1134 return res;
1080} 1135}
1081 1136
1082static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
1083{
1084 int ret = 0;
1085 struct inode *bd_inode = bdev->bd_inode;
1086 struct gendisk *disk = bdev->bd_disk;
1087
1088 mutex_lock_nested(&bdev->bd_mutex, subclass);
1089 lock_kernel();
1090 if (!--bdev->bd_openers) {
1091 sync_blockdev(bdev);
1092 kill_bdev(bdev);
1093 }
1094 if (bdev->bd_contains == bdev) {
1095 if (disk->fops->release)
1096 ret = disk->fops->release(bd_inode, NULL);
1097 } else {
1098 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
1099 subclass + 1);
1100 bdev->bd_contains->bd_part_count--;
1101 mutex_unlock(&bdev->bd_contains->bd_mutex);
1102 }
1103 if (!bdev->bd_openers) {
1104 struct module *owner = disk->fops->owner;
1105
1106 put_disk(disk);
1107 module_put(owner);
1108
1109 if (bdev->bd_contains != bdev) {
1110 kobject_put(&bdev->bd_part->kobj);
1111 bdev->bd_part = NULL;
1112 }
1113 bdev->bd_disk = NULL;
1114 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1115 if (bdev != bdev->bd_contains)
1116 __blkdev_put(bdev->bd_contains, subclass + 1);
1117 bdev->bd_contains = NULL;
1118 }
1119 unlock_kernel();
1120 mutex_unlock(&bdev->bd_mutex);
1121 bdput(bdev);
1122 return ret;
1123}
1124
1125int blkdev_put(struct block_device *bdev)
1126{
1127 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
1128}
1129
1130EXPORT_SYMBOL(blkdev_put);
1131
1132int blkdev_put_partition(struct block_device *bdev)
1133{
1134 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
1135}
1136
1137EXPORT_SYMBOL(blkdev_put_partition);
1138
1139static int blkdev_close(struct inode * inode, struct file * filp) 1137static int blkdev_close(struct inode * inode, struct file * filp)
1140{ 1138{
1141 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1139 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index a61d17ed1827..0feb3bd49cb8 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,13 @@
1Version 1.45
2------------
3Do not time out lockw calls when using posix extensions. Do not
4time out requests if server still responding reasonably fast
5on requests on other threads. Improve POSIX locking emulation,
6(lock cancel now works, and unlock of merged range works even
7to Windows servers now). Fix oops on mount to lanman servers
8(win9x, os/2 etc.) when null password. Do not send listxattr
9(SMB to query all EAs) if nouser_xattr specified.
10
1Version 1.44 11Version 1.44
2------------ 12------------
3Rewritten sessionsetup support, including support for legacy SMB 13Rewritten sessionsetup support, including support for legacy SMB
diff --git a/fs/cifs/README b/fs/cifs/README
index 7986d0d97ace..5f0e1bd64fee 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -408,7 +408,7 @@ A partial list of the supported mount options follows:
408 user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended 408 user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended
409 attributes) to the server (default) e.g. via setfattr 409 attributes) to the server (default) e.g. via setfattr
410 and getfattr utilities. 410 and getfattr utilities.
411 nouser_xattr Do not allow getfattr/setfattr to get/set xattrs 411 nouser_xattr Do not allow getfattr/setfattr to get/set/list xattrs
412 mapchars Translate six of the seven reserved characters (not backslash) 412 mapchars Translate six of the seven reserved characters (not backslash)
413 *?<>|: 413 *?<>|:
414 to the remap range (above 0xF000), which also 414 to the remap range (above 0xF000), which also
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a89efaf78a26..4bc250b2d9fc 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -277,7 +277,8 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
277 return; 277 return;
278 278
279 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); 279 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
280 strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); 280 if(ses->password)
281 strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
281 282
282 if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) 283 if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
283 if(extended_security & CIFSSEC_MAY_PLNTXT) { 284 if(extended_security & CIFSSEC_MAY_PLNTXT) {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c28ede599946..3cd750029be2 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -402,7 +402,6 @@ static struct quotactl_ops cifs_quotactl_ops = {
402}; 402};
403#endif 403#endif
404 404
405#ifdef CONFIG_CIFS_EXPERIMENTAL
406static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) 405static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
407{ 406{
408 struct cifs_sb_info *cifs_sb; 407 struct cifs_sb_info *cifs_sb;
@@ -422,7 +421,7 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
422 tcon->tidStatus = CifsExiting; 421 tcon->tidStatus = CifsExiting;
423 up(&tcon->tconSem); 422 up(&tcon->tconSem);
424 423
425 /* cancel_brl_requests(tcon); */ 424 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
426 /* cancel_notify_requests(tcon); */ 425 /* cancel_notify_requests(tcon); */
427 if(tcon->ses && tcon->ses->server) 426 if(tcon->ses && tcon->ses->server)
428 { 427 {
@@ -438,7 +437,6 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
438 437
439 return; 438 return;
440} 439}
441#endif
442 440
443static int cifs_remount(struct super_block *sb, int *flags, char *data) 441static int cifs_remount(struct super_block *sb, int *flags, char *data)
444{ 442{
@@ -457,9 +455,7 @@ struct super_operations cifs_super_ops = {
457 unless later we add lazy close of inodes or unless the kernel forgets to call 455 unless later we add lazy close of inodes or unless the kernel forgets to call
458 us with the same number of releases (closes) as opens */ 456 us with the same number of releases (closes) as opens */
459 .show_options = cifs_show_options, 457 .show_options = cifs_show_options,
460#ifdef CONFIG_CIFS_EXPERIMENTAL
461 .umount_begin = cifs_umount_begin, 458 .umount_begin = cifs_umount_begin,
462#endif
463 .remount_fs = cifs_remount, 459 .remount_fs = cifs_remount,
464}; 460};
465 461
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 8f75c6f24701..39ee8ef3bdeb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -100,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
100extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 100extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
101extern int cifs_ioctl (struct inode * inode, struct file * filep, 101extern int cifs_ioctl (struct inode * inode, struct file * filep,
102 unsigned int command, unsigned long arg); 102 unsigned int command, unsigned long arg);
103#define CIFS_VERSION "1.44" 103#define CIFS_VERSION "1.45"
104#endif /* _CIFSFS_H */ 104#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6d7cf5f3bc0b..b24006c47df1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2006 4 * Copyright (C) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org)
6 * 7 *
7 * This library is free software; you can redistribute it and/or modify 8 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 9 * it under the terms of the GNU Lesser General Public License as published
@@ -158,7 +159,8 @@ struct TCP_Server_Info {
158 /* 16th byte of RFC1001 workstation name is always null */ 159 /* 16th byte of RFC1001 workstation name is always null */
159 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 160 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
160 __u32 sequence_number; /* needed for CIFS PDU signature */ 161 __u32 sequence_number; /* needed for CIFS PDU signature */
161 char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; 162 char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
163 unsigned long lstrp; /* when we got last response from this server */
162}; 164};
163 165
164/* 166/*
@@ -266,14 +268,14 @@ struct cifsTconInfo {
266}; 268};
267 269
268/* 270/*
269 * This info hangs off the cifsFileInfo structure. This is used to track 271 * This info hangs off the cifsFileInfo structure, pointed to by llist.
270 * byte stream locks on the file 272 * This is used to track byte stream locks on the file
271 */ 273 */
272struct cifsLockInfo { 274struct cifsLockInfo {
273 struct cifsLockInfo *next; 275 struct list_head llist; /* pointer to next cifsLockInfo */
274 int start; 276 __u64 offset;
275 int length; 277 __u64 length;
276 int type; 278 __u8 type;
277}; 279};
278 280
279/* 281/*
@@ -304,6 +306,8 @@ struct cifsFileInfo {
304 /* lock scope id (0 if none) */ 306 /* lock scope id (0 if none) */
305 struct file * pfile; /* needed for writepage */ 307 struct file * pfile; /* needed for writepage */
306 struct inode * pInode; /* needed for oplock break */ 308 struct inode * pInode; /* needed for oplock break */
309 struct semaphore lock_sem;
310 struct list_head llist; /* list of byte range locks we have. */
307 unsigned closePend:1; /* file is marked to close */ 311 unsigned closePend:1; /* file is marked to close */
308 unsigned invalidHandle:1; /* file closed via session abend */ 312 unsigned invalidHandle:1; /* file closed via session abend */
309 atomic_t wrtPending; /* handle in use - defer close */ 313 atomic_t wrtPending; /* handle in use - defer close */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index a5ddc62d6fe6..b35c55c3c8bb 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -50,6 +50,10 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, 50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
51 struct kvec *, int /* nvec to send */, 51 struct kvec *, int /* nvec to send */,
52 int * /* type of buf returned */ , const int long_op); 52 int * /* type of buf returned */ , const int long_op);
53extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
54 struct smb_hdr * /* input */ ,
55 struct smb_hdr * /* out */ ,
56 int * /* bytes returned */);
53extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); 57extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
54extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); 58extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
55extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); 59extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 19678c575dfc..075d8fb3d376 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -477,7 +477,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
477 /* BB get server time for time conversions and add 477 /* BB get server time for time conversions and add
478 code to use it and timezone since this is not UTC */ 478 code to use it and timezone since this is not UTC */
479 479
480 if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { 480 if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
481 memcpy(server->cryptKey, rsp->EncryptionKey, 481 memcpy(server->cryptKey, rsp->EncryptionKey,
482 CIFS_CRYPTO_KEY_SIZE); 482 CIFS_CRYPTO_KEY_SIZE);
483 } else if (server->secMode & SECMODE_PW_ENCRYPT) { 483 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
@@ -1460,8 +1460,13 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
1460 pSMB->hdr.smb_buf_length += count; 1460 pSMB->hdr.smb_buf_length += count;
1461 pSMB->ByteCount = cpu_to_le16(count); 1461 pSMB->ByteCount = cpu_to_le16(count);
1462 1462
1463 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 1463 if (waitFlag) {
1464 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
1465 (struct smb_hdr *) pSMBr, &bytes_returned);
1466 } else {
1467 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
1464 (struct smb_hdr *) pSMBr, &bytes_returned, timeout); 1468 (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
1469 }
1465 cifs_stats_inc(&tcon->num_locks); 1470 cifs_stats_inc(&tcon->num_locks);
1466 if (rc) { 1471 if (rc) {
1467 cFYI(1, ("Send error in Lock = %d", rc)); 1472 cFYI(1, ("Send error in Lock = %d", rc));
@@ -1484,6 +1489,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1484 char *data_offset; 1489 char *data_offset;
1485 struct cifs_posix_lock *parm_data; 1490 struct cifs_posix_lock *parm_data;
1486 int rc = 0; 1491 int rc = 0;
1492 int timeout = 0;
1487 int bytes_returned = 0; 1493 int bytes_returned = 0;
1488 __u16 params, param_offset, offset, byte_count, count; 1494 __u16 params, param_offset, offset, byte_count, count;
1489 1495
@@ -1503,7 +1509,6 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1503 pSMB->MaxSetupCount = 0; 1509 pSMB->MaxSetupCount = 0;
1504 pSMB->Reserved = 0; 1510 pSMB->Reserved = 0;
1505 pSMB->Flags = 0; 1511 pSMB->Flags = 0;
1506 pSMB->Timeout = 0;
1507 pSMB->Reserved2 = 0; 1512 pSMB->Reserved2 = 0;
1508 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; 1513 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
1509 offset = param_offset + params; 1514 offset = param_offset + params;
@@ -1529,8 +1534,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1529 (((char *) &pSMB->hdr.Protocol) + offset); 1534 (((char *) &pSMB->hdr.Protocol) + offset);
1530 1535
1531 parm_data->lock_type = cpu_to_le16(lock_type); 1536 parm_data->lock_type = cpu_to_le16(lock_type);
1532 if(waitFlag) 1537 if(waitFlag) {
1538 timeout = 3; /* blocking operation, no timeout */
1533 parm_data->lock_flags = cpu_to_le16(1); 1539 parm_data->lock_flags = cpu_to_le16(1);
1540 pSMB->Timeout = cpu_to_le32(-1);
1541 } else
1542 pSMB->Timeout = 0;
1543
1534 parm_data->pid = cpu_to_le32(current->tgid); 1544 parm_data->pid = cpu_to_le32(current->tgid);
1535 parm_data->start = cpu_to_le64(pLockData->fl_start); 1545 parm_data->start = cpu_to_le64(pLockData->fl_start);
1536 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ 1546 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
@@ -1541,8 +1551,14 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1541 pSMB->Reserved4 = 0; 1551 pSMB->Reserved4 = 0;
1542 pSMB->hdr.smb_buf_length += byte_count; 1552 pSMB->hdr.smb_buf_length += byte_count;
1543 pSMB->ByteCount = cpu_to_le16(byte_count); 1553 pSMB->ByteCount = cpu_to_le16(byte_count);
1544 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 1554 if (waitFlag) {
1545 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 1555 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
1556 (struct smb_hdr *) pSMBr, &bytes_returned);
1557 } else {
1558 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
1559 (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
1560 }
1561
1546 if (rc) { 1562 if (rc) {
1547 cFYI(1, ("Send error in Posix Lock = %d", rc)); 1563 cFYI(1, ("Send error in Posix Lock = %d", rc));
1548 } else if (get_flag) { 1564 } else if (get_flag) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 876eb9ef85fe..5d394c726860 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -182,6 +182,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
182 182
183 while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) 183 while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood))
184 { 184 {
185 try_to_freeze();
185 if(server->protocolType == IPV6) { 186 if(server->protocolType == IPV6) {
186 rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket); 187 rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket);
187 } else { 188 } else {
@@ -612,6 +613,10 @@ multi_t2_fnd:
612#ifdef CONFIG_CIFS_STATS2 613#ifdef CONFIG_CIFS_STATS2
613 mid_entry->when_received = jiffies; 614 mid_entry->when_received = jiffies;
614#endif 615#endif
616 /* so we do not time out requests to server
617 which is still responding (since server could
618 be busy but not dead) */
619 server->lstrp = jiffies;
615 break; 620 break;
616 } 621 }
617 } 622 }
@@ -1266,33 +1271,35 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName)
1266 1271
1267 read_lock(&GlobalSMBSeslock); 1272 read_lock(&GlobalSMBSeslock);
1268 list_for_each(tmp, &GlobalTreeConnectionList) { 1273 list_for_each(tmp, &GlobalTreeConnectionList) {
1269 cFYI(1, ("Next tcon - ")); 1274 cFYI(1, ("Next tcon"));
1270 tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 1275 tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
1271 if (tcon->ses) { 1276 if (tcon->ses) {
1272 if (tcon->ses->server) { 1277 if (tcon->ses->server) {
1273 cFYI(1, 1278 cFYI(1,
1274 (" old ip addr: %x == new ip %x ?", 1279 ("old ip addr: %x == new ip %x ?",
1275 tcon->ses->server->addr.sockAddr.sin_addr. 1280 tcon->ses->server->addr.sockAddr.sin_addr.
1276 s_addr, new_target_ip_addr)); 1281 s_addr, new_target_ip_addr));
1277 if (tcon->ses->server->addr.sockAddr.sin_addr. 1282 if (tcon->ses->server->addr.sockAddr.sin_addr.
1278 s_addr == new_target_ip_addr) { 1283 s_addr == new_target_ip_addr) {
1279 /* BB lock tcon and server and tcp session and increment use count here? */ 1284 /* BB lock tcon, server and tcp session and increment use count here? */
1280 /* found a match on the TCP session */ 1285 /* found a match on the TCP session */
1281 /* BB check if reconnection needed */ 1286 /* BB check if reconnection needed */
1282 cFYI(1,("Matched ip, old UNC: %s == new: %s ?", 1287 cFYI(1,("IP match, old UNC: %s new: %s",
1283 tcon->treeName, uncName)); 1288 tcon->treeName, uncName));
1284 if (strncmp 1289 if (strncmp
1285 (tcon->treeName, uncName, 1290 (tcon->treeName, uncName,
1286 MAX_TREE_SIZE) == 0) { 1291 MAX_TREE_SIZE) == 0) {
1287 cFYI(1, 1292 cFYI(1,
1288 ("Matched UNC, old user: %s == new: %s ?", 1293 ("and old usr: %s new: %s",
1289 tcon->treeName, uncName)); 1294 tcon->treeName, uncName));
1290 if (strncmp 1295 if (strncmp
1291 (tcon->ses->userName, 1296 (tcon->ses->userName,
1292 userName, 1297 userName,
1293 MAX_USERNAME_SIZE) == 0) { 1298 MAX_USERNAME_SIZE) == 0) {
1294 read_unlock(&GlobalSMBSeslock); 1299 read_unlock(&GlobalSMBSeslock);
1295 return tcon;/* also matched user (smb session)*/ 1300 /* matched smb session
1301 (user name */
1302 return tcon;
1296 } 1303 }
1297 } 1304 }
1298 } 1305 }
@@ -1969,7 +1976,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1969 } 1976 }
1970 1977
1971 cFYI(1,("Negotiate caps 0x%x",(int)cap)); 1978 cFYI(1,("Negotiate caps 0x%x",(int)cap));
1972 1979#ifdef CONFIG_CIFS_DEBUG2
1980 if(cap & CIFS_UNIX_FCNTL_CAP)
1981 cFYI(1,("FCNTL cap"));
1982 if(cap & CIFS_UNIX_EXTATTR_CAP)
1983 cFYI(1,("EXTATTR cap"));
1984 if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
1985 cFYI(1,("POSIX path cap"));
1986 if(cap & CIFS_UNIX_XATTR_CAP)
1987 cFYI(1,("XATTR cap"));
1988 if(cap & CIFS_UNIX_POSIX_ACL_CAP)
1989 cFYI(1,("POSIX ACL cap"));
1990#endif /* CIFS_DEBUG2 */
1973 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 1991 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
1974 cFYI(1,("setting capabilities failed")); 1992 cFYI(1,("setting capabilities failed"));
1975 } 1993 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ba4cbe9b0684..914239d53634 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -267,6 +267,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
267 pCifsFile->invalidHandle = FALSE; 267 pCifsFile->invalidHandle = FALSE;
268 pCifsFile->closePend = FALSE; 268 pCifsFile->closePend = FALSE;
269 init_MUTEX(&pCifsFile->fh_sem); 269 init_MUTEX(&pCifsFile->fh_sem);
270 init_MUTEX(&pCifsFile->lock_sem);
271 INIT_LIST_HEAD(&pCifsFile->llist);
272 atomic_set(&pCifsFile->wrtPending,0);
273
270 /* set the following in open now 274 /* set the following in open now
271 pCifsFile->pfile = file; */ 275 pCifsFile->pfile = file; */
272 write_lock(&GlobalSMBSeslock); 276 write_lock(&GlobalSMBSeslock);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 944d2b9e092d..e9c5ba9084fc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003 6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
8 * 9 *
9 * This library is free software; you can redistribute it and/or modify 10 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published 11 * it under the terms of the GNU Lesser General Public License as published
@@ -47,6 +48,8 @@ static inline struct cifsFileInfo *cifs_init_private(
47 private_data->netfid = netfid; 48 private_data->netfid = netfid;
48 private_data->pid = current->tgid; 49 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem); 50 init_MUTEX(&private_data->fh_sem);
51 init_MUTEX(&private_data->lock_sem);
52 INIT_LIST_HEAD(&private_data->llist);
50 private_data->pfile = file; /* needed for writepage */ 53 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode; 54 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE; 55 private_data->invalidHandle = FALSE;
@@ -473,6 +476,8 @@ int cifs_close(struct inode *inode, struct file *file)
473 cifs_sb = CIFS_SB(inode->i_sb); 476 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon; 477 pTcon = cifs_sb->tcon;
475 if (pSMBFile) { 478 if (pSMBFile) {
479 struct cifsLockInfo *li, *tmp;
480
476 pSMBFile->closePend = TRUE; 481 pSMBFile->closePend = TRUE;
477 if (pTcon) { 482 if (pTcon) {
478 /* no sense reconnecting to close a file that is 483 /* no sense reconnecting to close a file that is
@@ -496,6 +501,16 @@ int cifs_close(struct inode *inode, struct file *file)
496 pSMBFile->netfid); 501 pSMBFile->netfid);
497 } 502 }
498 } 503 }
504
505 /* Delete any outstanding lock records.
506 We'll lose them when the file is closed anyway. */
507 down(&pSMBFile->lock_sem);
508 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
509 list_del(&li->llist);
510 kfree(li);
511 }
512 up(&pSMBFile->lock_sem);
513
499 write_lock(&GlobalSMBSeslock); 514 write_lock(&GlobalSMBSeslock);
500 list_del(&pSMBFile->flist); 515 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist); 516 list_del(&pSMBFile->tlist);
@@ -570,6 +585,21 @@ int cifs_closedir(struct inode *inode, struct file *file)
570 return rc; 585 return rc;
571} 586}
572 587
588static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
589 __u64 offset, __u8 lockType)
590{
591 struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
592 if (li == NULL)
593 return -ENOMEM;
594 li->offset = offset;
595 li->length = len;
596 li->type = lockType;
597 down(&fid->lock_sem);
598 list_add(&li->llist, &fid->llist);
599 up(&fid->lock_sem);
600 return 0;
601}
602
573int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) 603int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
574{ 604{
575 int rc, xid; 605 int rc, xid;
@@ -581,6 +611,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
581 struct cifsTconInfo *pTcon; 611 struct cifsTconInfo *pTcon;
582 __u16 netfid; 612 __u16 netfid;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES; 613 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
614 int posix_locking;
584 615
585 length = 1 + pfLock->fl_end - pfLock->fl_start; 616 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES; 617 rc = -EACCES;
@@ -639,15 +670,14 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
639 } 670 }
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 671 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
641 672
673 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
674 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
642 675
643 /* BB add code here to normalize offset and length to 676 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the 677 account for negative length which we can not accept over the
645 wire */ 678 wire */
646 if (IS_GETLK(cmd)) { 679 if (IS_GETLK(cmd)) {
647 if(experimEnabled && 680 if(posix_locking) {
648 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
649 (CIFS_UNIX_FCNTL_CAP &
650 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
651 int posix_lock_type; 681 int posix_lock_type;
652 if(lockType & LOCKING_ANDX_SHARED_LOCK) 682 if(lockType & LOCKING_ANDX_SHARED_LOCK)
653 posix_lock_type = CIFS_RDLCK; 683 posix_lock_type = CIFS_RDLCK;
@@ -683,10 +713,15 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
683 FreeXid(xid); 713 FreeXid(xid);
684 return rc; 714 return rc;
685 } 715 }
686 if (experimEnabled && 716
687 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && 717 if (!numLock && !numUnlock) {
688 (CIFS_UNIX_FCNTL_CAP & 718 /* if no lock or unlock then nothing
689 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { 719 to do since we do not know what it is */
720 FreeXid(xid);
721 return -EOPNOTSUPP;
722 }
723
724 if (posix_locking) {
690 int posix_lock_type; 725 int posix_lock_type;
691 if(lockType & LOCKING_ANDX_SHARED_LOCK) 726 if(lockType & LOCKING_ANDX_SHARED_LOCK)
692 posix_lock_type = CIFS_RDLCK; 727 posix_lock_type = CIFS_RDLCK;
@@ -695,18 +730,46 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
695 730
696 if(numUnlock == 1) 731 if(numUnlock == 1)
697 posix_lock_type = CIFS_UNLCK; 732 posix_lock_type = CIFS_UNLCK;
698 else if(numLock == 0) { 733
699 /* if no lock or unlock then nothing
700 to do since we do not know what it is */
701 FreeXid(xid);
702 return -EOPNOTSUPP;
703 }
704 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, 734 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
705 length, pfLock, 735 length, pfLock,
706 posix_lock_type, wait_flag); 736 posix_lock_type, wait_flag);
707 } else 737 } else {
708 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, 738 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
709 numUnlock, numLock, lockType, wait_flag); 739
740 if (numLock) {
741 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
742 0, numLock, lockType, wait_flag);
743
744 if (rc == 0) {
745 /* For Windows locks we must store them. */
746 rc = store_file_lock(fid, length,
747 pfLock->fl_start, lockType);
748 }
749 } else if (numUnlock) {
750 /* For each stored lock that this unlock overlaps
751 completely, unlock it. */
752 int stored_rc = 0;
753 struct cifsLockInfo *li, *tmp;
754
755 down(&fid->lock_sem);
756 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
757 if (pfLock->fl_start <= li->offset &&
758 length >= li->length) {
759 stored_rc = CIFSSMBLock(xid, pTcon, netfid,
760 li->length, li->offset,
761 1, 0, li->type, FALSE);
762 if (stored_rc)
763 rc = stored_rc;
764
765 list_del(&li->llist);
766 kfree(li);
767 }
768 }
769 up(&fid->lock_sem);
770 }
771 }
772
710 if (pfLock->fl_flags & FL_POSIX) 773 if (pfLock->fl_flags & FL_POSIX)
711 posix_lock_file_wait(file, pfLock); 774 posix_lock_file_wait(file, pfLock);
712 FreeXid(xid); 775 FreeXid(xid);
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b66eff5dc624..ce87550e918f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -72,6 +72,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
72 {ERRinvlevel,-EOPNOTSUPP}, 72 {ERRinvlevel,-EOPNOTSUPP},
73 {ERRdirnotempty, -ENOTEMPTY}, 73 {ERRdirnotempty, -ENOTEMPTY},
74 {ERRnotlocked, -ENOLCK}, 74 {ERRnotlocked, -ENOLCK},
75 {ERRcancelviolation, -ENOLCK},
75 {ERRalreadyexists, -EEXIST}, 76 {ERRalreadyexists, -EEXIST},
76 {ERRmoredata, -EOVERFLOW}, 77 {ERRmoredata, -EOVERFLOW},
77 {ERReasnotsupported,-EOPNOTSUPP}, 78 {ERReasnotsupported,-EOPNOTSUPP},
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 03bbcb377913..105761e3ba0e 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -556,7 +556,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
556 FIND_FILE_STANDARD_INFO * pFindData = 556 FIND_FILE_STANDARD_INFO * pFindData =
557 (FIND_FILE_STANDARD_INFO *)current_entry; 557 (FIND_FILE_STANDARD_INFO *)current_entry;
558 filename = &pFindData->FileName[0]; 558 filename = &pFindData->FileName[0];
559 len = le32_to_cpu(pFindData->FileNameLength); 559 len = pFindData->FileNameLength;
560 } else { 560 } else {
561 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); 561 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
562 } 562 }
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7202d534ef0b..d1705ab8136e 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -372,7 +372,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
372 372
373 /* no capabilities flags in old lanman negotiation */ 373 /* no capabilities flags in old lanman negotiation */
374 374
375 pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE; 375 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
376 /* BB calculate hash with password */ 376 /* BB calculate hash with password */
377 /* and copy into bcc */ 377 /* and copy into bcc */
378 378
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index cd41c67ff8d3..212c3c296409 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -95,6 +95,7 @@
95#define ERRinvlevel 124 95#define ERRinvlevel 124
96#define ERRdirnotempty 145 96#define ERRdirnotempty 145
97#define ERRnotlocked 158 97#define ERRnotlocked 158
98#define ERRcancelviolation 173
98#define ERRalreadyexists 183 99#define ERRalreadyexists 183
99#define ERRbadpipe 230 100#define ERRbadpipe 230
100#define ERRpipebusy 231 101#define ERRpipebusy 231
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 17ba329e2b3d..48d47b46b1fb 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2005 4 * Copyright (C) International Business Machines Corp., 2002,2005
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 * Jeremy Allison (jra@samba.org) 2006.
7 *
7 * This library is free software; you can redistribute it and/or modify 8 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 9 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or 10 * by the Free Software Foundation; either version 2.1 of the License, or
@@ -36,7 +37,7 @@ extern mempool_t *cifs_mid_poolp;
36extern kmem_cache_t *cifs_oplock_cachep; 37extern kmem_cache_t *cifs_oplock_cachep;
37 38
38static struct mid_q_entry * 39static struct mid_q_entry *
39AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) 40AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
40{ 41{
41 struct mid_q_entry *temp; 42 struct mid_q_entry *temp;
42 43
@@ -203,6 +204,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
203 rc = 0; 204 rc = 0;
204 } 205 }
205 206
207 /* Don't want to modify the buffer as a
208 side effect of this call. */
209 smb_buffer->smb_buf_length = smb_buf_length;
210
206 return rc; 211 return rc;
207} 212}
208 213
@@ -217,6 +222,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
217 unsigned int len = iov[0].iov_len; 222 unsigned int len = iov[0].iov_len;
218 unsigned int total_len; 223 unsigned int total_len;
219 int first_vec = 0; 224 int first_vec = 0;
225 unsigned int smb_buf_length = smb_buffer->smb_buf_length;
220 226
221 if(ssocket == NULL) 227 if(ssocket == NULL)
222 return -ENOTSOCK; /* BB eventually add reconnect code here */ 228 return -ENOTSOCK; /* BB eventually add reconnect code here */
@@ -293,36 +299,15 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
293 } else 299 } else
294 rc = 0; 300 rc = 0;
295 301
302 /* Don't want to modify the buffer as a
303 side effect of this call. */
304 smb_buffer->smb_buf_length = smb_buf_length;
305
296 return rc; 306 return rc;
297} 307}
298 308
299int 309static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
300SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
301 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
302 const int long_op)
303{ 310{
304 int rc = 0;
305 unsigned int receive_len;
306 unsigned long timeout;
307 struct mid_q_entry *midQ;
308 struct smb_hdr *in_buf = iov[0].iov_base;
309
310 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
311
312 if ((ses == NULL) || (ses->server == NULL)) {
313 cifs_small_buf_release(in_buf);
314 cERROR(1,("Null session"));
315 return -EIO;
316 }
317
318 if(ses->server->tcpStatus == CifsExiting) {
319 cifs_small_buf_release(in_buf);
320 return -ENOENT;
321 }
322
323 /* Ensure that we do not send more than 50 overlapping requests
324 to the same server. We may make this configurable later or
325 use ses->maxReq */
326 if(long_op == -1) { 311 if(long_op == -1) {
327 /* oplock breaks must not be held up */ 312 /* oplock breaks must not be held up */
328 atomic_inc(&ses->server->inFlight); 313 atomic_inc(&ses->server->inFlight);
@@ -345,53 +330,140 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
345 } else { 330 } else {
346 if(ses->server->tcpStatus == CifsExiting) { 331 if(ses->server->tcpStatus == CifsExiting) {
347 spin_unlock(&GlobalMid_Lock); 332 spin_unlock(&GlobalMid_Lock);
348 cifs_small_buf_release(in_buf);
349 return -ENOENT; 333 return -ENOENT;
350 } 334 }
351 335
352 /* can not count locking commands against total since 336 /* can not count locking commands against total since
353 they are allowed to block on server */ 337 they are allowed to block on server */
354 338
355 if(long_op < 3) {
356 /* update # of requests on the wire to server */ 339 /* update # of requests on the wire to server */
340 if (long_op < 3)
357 atomic_inc(&ses->server->inFlight); 341 atomic_inc(&ses->server->inFlight);
358 }
359 spin_unlock(&GlobalMid_Lock); 342 spin_unlock(&GlobalMid_Lock);
360 break; 343 break;
361 } 344 }
362 } 345 }
363 } 346 }
364 /* make sure that we sign in the same order that we send on this socket 347 return 0;
365 and avoid races inside tcp sendmsg code that could cause corruption 348}
366 of smb data */
367
368 down(&ses->server->tcpSem);
369 349
350static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
351 struct mid_q_entry **ppmidQ)
352{
370 if (ses->server->tcpStatus == CifsExiting) { 353 if (ses->server->tcpStatus == CifsExiting) {
371 rc = -ENOENT; 354 return -ENOENT;
372 goto out_unlock2;
373 } else if (ses->server->tcpStatus == CifsNeedReconnect) { 355 } else if (ses->server->tcpStatus == CifsNeedReconnect) {
374 cFYI(1,("tcp session dead - return to caller to retry")); 356 cFYI(1,("tcp session dead - return to caller to retry"));
375 rc = -EAGAIN; 357 return -EAGAIN;
376 goto out_unlock2;
377 } else if (ses->status != CifsGood) { 358 } else if (ses->status != CifsGood) {
378 /* check if SMB session is bad because we are setting it up */ 359 /* check if SMB session is bad because we are setting it up */
379 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 360 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
380 (in_buf->Command != SMB_COM_NEGOTIATE)) { 361 (in_buf->Command != SMB_COM_NEGOTIATE)) {
381 rc = -EAGAIN; 362 return -EAGAIN;
382 goto out_unlock2;
383 } /* else ok - we are setting up session */ 363 } /* else ok - we are setting up session */
384 } 364 }
385 midQ = AllocMidQEntry(in_buf, ses); 365 *ppmidQ = AllocMidQEntry(in_buf, ses);
386 if (midQ == NULL) { 366 if (*ppmidQ == NULL) {
367 return -ENOMEM;
368 }
369 return 0;
370}
371
372static int wait_for_response(struct cifsSesInfo *ses,
373 struct mid_q_entry *midQ,
374 unsigned long timeout,
375 unsigned long time_to_wait)
376{
377 unsigned long curr_timeout;
378
379 for (;;) {
380 curr_timeout = timeout + jiffies;
381 wait_event(ses->server->response_q,
382 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
383 time_after(jiffies, curr_timeout) ||
384 ((ses->server->tcpStatus != CifsGood) &&
385 (ses->server->tcpStatus != CifsNew)));
386
387 if (time_after(jiffies, curr_timeout) &&
388 (midQ->midState == MID_REQUEST_SUBMITTED) &&
389 ((ses->server->tcpStatus == CifsGood) ||
390 (ses->server->tcpStatus == CifsNew))) {
391
392 unsigned long lrt;
393
394 /* We timed out. Is the server still
395 sending replies ? */
396 spin_lock(&GlobalMid_Lock);
397 lrt = ses->server->lstrp;
398 spin_unlock(&GlobalMid_Lock);
399
400 /* Calculate time_to_wait past last receive time.
401 Although we prefer not to time out if the
402 server is still responding - we will time
403 out if the server takes more than 15 (or 45
404 or 180) seconds to respond to this request
405 and has not responded to any request from
406 other threads on the client within 10 seconds */
407 lrt += time_to_wait;
408 if (time_after(jiffies, lrt)) {
409 /* No replies for time_to_wait. */
410 cERROR(1,("server not responding"));
411 return -1;
412 }
413 } else {
414 return 0;
415 }
416 }
417}
418
419int
420SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
421 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
422 const int long_op)
423{
424 int rc = 0;
425 unsigned int receive_len;
426 unsigned long timeout;
427 struct mid_q_entry *midQ;
428 struct smb_hdr *in_buf = iov[0].iov_base;
429
430 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
431
432 if ((ses == NULL) || (ses->server == NULL)) {
433 cifs_small_buf_release(in_buf);
434 cERROR(1,("Null session"));
435 return -EIO;
436 }
437
438 if(ses->server->tcpStatus == CifsExiting) {
439 cifs_small_buf_release(in_buf);
440 return -ENOENT;
441 }
442
443 /* Ensure that we do not send more than 50 overlapping requests
444 to the same server. We may make this configurable later or
445 use ses->maxReq */
446
447 rc = wait_for_free_request(ses, long_op);
448 if (rc) {
449 cifs_small_buf_release(in_buf);
450 return rc;
451 }
452
453 /* make sure that we sign in the same order that we send on this socket
454 and avoid races inside tcp sendmsg code that could cause corruption
455 of smb data */
456
457 down(&ses->server->tcpSem);
458
459 rc = allocate_mid(ses, in_buf, &midQ);
460 if (rc) {
387 up(&ses->server->tcpSem); 461 up(&ses->server->tcpSem);
388 cifs_small_buf_release(in_buf); 462 cifs_small_buf_release(in_buf);
389 /* If not lock req, update # of requests on wire to server */ 463 /* Update # of requests on wire to server */
390 if(long_op < 3) { 464 atomic_dec(&ses->server->inFlight);
391 atomic_dec(&ses->server->inFlight); 465 wake_up(&ses->server->request_q);
392 wake_up(&ses->server->request_q); 466 return rc;
393 }
394 return -ENOMEM;
395 } 467 }
396 468
397 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); 469 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
@@ -406,32 +478,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
406 atomic_dec(&ses->server->inSend); 478 atomic_dec(&ses->server->inSend);
407 midQ->when_sent = jiffies; 479 midQ->when_sent = jiffies;
408#endif 480#endif
409 if(rc < 0) { 481
410 DeleteMidQEntry(midQ); 482 up(&ses->server->tcpSem);
411 up(&ses->server->tcpSem); 483 cifs_small_buf_release(in_buf);
412 cifs_small_buf_release(in_buf); 484
413 /* If not lock req, update # of requests on wire to server */ 485 if(rc < 0)
414 if(long_op < 3) { 486 goto out;
415 atomic_dec(&ses->server->inFlight);
416 wake_up(&ses->server->request_q);
417 }
418 return rc;
419 } else {
420 up(&ses->server->tcpSem);
421 cifs_small_buf_release(in_buf);
422 }
423 487
424 if (long_op == -1) 488 if (long_op == -1)
425 goto cifs_no_response_exit2; 489 goto out;
426 else if (long_op == 2) /* writes past end of file can take loong time */ 490 else if (long_op == 2) /* writes past end of file can take loong time */
427 timeout = 180 * HZ; 491 timeout = 180 * HZ;
428 else if (long_op == 1) 492 else if (long_op == 1)
429 timeout = 45 * HZ; /* should be greater than 493 timeout = 45 * HZ; /* should be greater than
430 servers oplock break timeout (about 43 seconds) */ 494 servers oplock break timeout (about 43 seconds) */
431 else if (long_op > 2) { 495 else
432 timeout = MAX_SCHEDULE_TIMEOUT;
433 } else
434 timeout = 15 * HZ; 496 timeout = 15 * HZ;
497
435 /* wait for 15 seconds or until woken up due to response arriving or 498 /* wait for 15 seconds or until woken up due to response arriving or
436 due to last connection to this server being unmounted */ 499 due to last connection to this server being unmounted */
437 if (signal_pending(current)) { 500 if (signal_pending(current)) {
@@ -441,19 +504,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
441 } 504 }
442 505
443 /* No user interrupts in wait - wreaks havoc with performance */ 506 /* No user interrupts in wait - wreaks havoc with performance */
444 if(timeout != MAX_SCHEDULE_TIMEOUT) { 507 wait_for_response(ses, midQ, timeout, 10 * HZ);
445 timeout += jiffies;
446 wait_event(ses->server->response_q,
447 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
448 time_after(jiffies, timeout) ||
449 ((ses->server->tcpStatus != CifsGood) &&
450 (ses->server->tcpStatus != CifsNew)));
451 } else {
452 wait_event(ses->server->response_q,
453 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
454 ((ses->server->tcpStatus != CifsGood) &&
455 (ses->server->tcpStatus != CifsNew)));
456 }
457 508
458 spin_lock(&GlobalMid_Lock); 509 spin_lock(&GlobalMid_Lock);
459 if (midQ->resp_buf) { 510 if (midQ->resp_buf) {
@@ -481,11 +532,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
481 } 532 }
482 spin_unlock(&GlobalMid_Lock); 533 spin_unlock(&GlobalMid_Lock);
483 DeleteMidQEntry(midQ); 534 DeleteMidQEntry(midQ);
484 /* If not lock req, update # of requests on wire to server */ 535 /* Update # of requests on wire to server */
485 if(long_op < 3) { 536 atomic_dec(&ses->server->inFlight);
486 atomic_dec(&ses->server->inFlight); 537 wake_up(&ses->server->request_q);
487 wake_up(&ses->server->request_q);
488 }
489 return rc; 538 return rc;
490 } 539 }
491 540
@@ -536,24 +585,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
536 cFYI(1,("Bad MID state?")); 585 cFYI(1,("Bad MID state?"));
537 } 586 }
538 } 587 }
539cifs_no_response_exit2:
540 DeleteMidQEntry(midQ);
541
542 if(long_op < 3) {
543 atomic_dec(&ses->server->inFlight);
544 wake_up(&ses->server->request_q);
545 }
546 588
547 return rc; 589out:
548 590
549out_unlock2: 591 DeleteMidQEntry(midQ);
550 up(&ses->server->tcpSem); 592 atomic_dec(&ses->server->inFlight);
551 cifs_small_buf_release(in_buf); 593 wake_up(&ses->server->request_q);
552 /* If not lock req, update # of requests on wire to server */
553 if(long_op < 3) {
554 atomic_dec(&ses->server->inFlight);
555 wake_up(&ses->server->request_q);
556 }
557 594
558 return rc; 595 return rc;
559} 596}
@@ -583,85 +620,34 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
583 /* Ensure that we do not send more than 50 overlapping requests 620 /* Ensure that we do not send more than 50 overlapping requests
584 to the same server. We may make this configurable later or 621 to the same server. We may make this configurable later or
585 use ses->maxReq */ 622 use ses->maxReq */
586 if(long_op == -1) {
587 /* oplock breaks must not be held up */
588 atomic_inc(&ses->server->inFlight);
589 } else {
590 spin_lock(&GlobalMid_Lock);
591 while(1) {
592 if(atomic_read(&ses->server->inFlight) >=
593 cifs_max_pending){
594 spin_unlock(&GlobalMid_Lock);
595#ifdef CONFIG_CIFS_STATS2
596 atomic_inc(&ses->server->num_waiters);
597#endif
598 wait_event(ses->server->request_q,
599 atomic_read(&ses->server->inFlight)
600 < cifs_max_pending);
601#ifdef CONFIG_CIFS_STATS2
602 atomic_dec(&ses->server->num_waiters);
603#endif
604 spin_lock(&GlobalMid_Lock);
605 } else {
606 if(ses->server->tcpStatus == CifsExiting) {
607 spin_unlock(&GlobalMid_Lock);
608 return -ENOENT;
609 }
610 623
611 /* can not count locking commands against total since 624 rc = wait_for_free_request(ses, long_op);
612 they are allowed to block on server */ 625 if (rc)
613 626 return rc;
614 if(long_op < 3) { 627
615 /* update # of requests on the wire to server */
616 atomic_inc(&ses->server->inFlight);
617 }
618 spin_unlock(&GlobalMid_Lock);
619 break;
620 }
621 }
622 }
623 /* make sure that we sign in the same order that we send on this socket 628 /* make sure that we sign in the same order that we send on this socket
624 and avoid races inside tcp sendmsg code that could cause corruption 629 and avoid races inside tcp sendmsg code that could cause corruption
625 of smb data */ 630 of smb data */
626 631
627 down(&ses->server->tcpSem); 632 down(&ses->server->tcpSem);
628 633
629 if (ses->server->tcpStatus == CifsExiting) { 634 rc = allocate_mid(ses, in_buf, &midQ);
630 rc = -ENOENT; 635 if (rc) {
631 goto out_unlock;
632 } else if (ses->server->tcpStatus == CifsNeedReconnect) {
633 cFYI(1,("tcp session dead - return to caller to retry"));
634 rc = -EAGAIN;
635 goto out_unlock;
636 } else if (ses->status != CifsGood) {
637 /* check if SMB session is bad because we are setting it up */
638 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
639 (in_buf->Command != SMB_COM_NEGOTIATE)) {
640 rc = -EAGAIN;
641 goto out_unlock;
642 } /* else ok - we are setting up session */
643 }
644 midQ = AllocMidQEntry(in_buf, ses);
645 if (midQ == NULL) {
646 up(&ses->server->tcpSem); 636 up(&ses->server->tcpSem);
647 /* If not lock req, update # of requests on wire to server */ 637 /* Update # of requests on wire to server */
648 if(long_op < 3) { 638 atomic_dec(&ses->server->inFlight);
649 atomic_dec(&ses->server->inFlight); 639 wake_up(&ses->server->request_q);
650 wake_up(&ses->server->request_q); 640 return rc;
651 }
652 return -ENOMEM;
653 } 641 }
654 642
655 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 643 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
656 up(&ses->server->tcpSem);
657 cERROR(1, ("Illegal length, greater than maximum frame, %d", 644 cERROR(1, ("Illegal length, greater than maximum frame, %d",
658 in_buf->smb_buf_length)); 645 in_buf->smb_buf_length));
659 DeleteMidQEntry(midQ); 646 DeleteMidQEntry(midQ);
660 /* If not lock req, update # of requests on wire to server */ 647 up(&ses->server->tcpSem);
661 if(long_op < 3) { 648 /* Update # of requests on wire to server */
662 atomic_dec(&ses->server->inFlight); 649 atomic_dec(&ses->server->inFlight);
663 wake_up(&ses->server->request_q); 650 wake_up(&ses->server->request_q);
664 }
665 return -EIO; 651 return -EIO;
666 } 652 }
667 653
@@ -677,27 +663,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
677 atomic_dec(&ses->server->inSend); 663 atomic_dec(&ses->server->inSend);
678 midQ->when_sent = jiffies; 664 midQ->when_sent = jiffies;
679#endif 665#endif
680 if(rc < 0) { 666 up(&ses->server->tcpSem);
681 DeleteMidQEntry(midQ); 667
682 up(&ses->server->tcpSem); 668 if(rc < 0)
683 /* If not lock req, update # of requests on wire to server */ 669 goto out;
684 if(long_op < 3) { 670
685 atomic_dec(&ses->server->inFlight);
686 wake_up(&ses->server->request_q);
687 }
688 return rc;
689 } else
690 up(&ses->server->tcpSem);
691 if (long_op == -1) 671 if (long_op == -1)
692 goto cifs_no_response_exit; 672 goto out;
693 else if (long_op == 2) /* writes past end of file can take loong time */ 673 else if (long_op == 2) /* writes past end of file can take loong time */
694 timeout = 180 * HZ; 674 timeout = 180 * HZ;
695 else if (long_op == 1) 675 else if (long_op == 1)
696 timeout = 45 * HZ; /* should be greater than 676 timeout = 45 * HZ; /* should be greater than
697 servers oplock break timeout (about 43 seconds) */ 677 servers oplock break timeout (about 43 seconds) */
698 else if (long_op > 2) { 678 else
699 timeout = MAX_SCHEDULE_TIMEOUT;
700 } else
701 timeout = 15 * HZ; 679 timeout = 15 * HZ;
702 /* wait for 15 seconds or until woken up due to response arriving or 680 /* wait for 15 seconds or until woken up due to response arriving or
703 due to last connection to this server being unmounted */ 681 due to last connection to this server being unmounted */
@@ -708,19 +686,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
708 } 686 }
709 687
710 /* No user interrupts in wait - wreaks havoc with performance */ 688 /* No user interrupts in wait - wreaks havoc with performance */
711 if(timeout != MAX_SCHEDULE_TIMEOUT) { 689 wait_for_response(ses, midQ, timeout, 10 * HZ);
712 timeout += jiffies;
713 wait_event(ses->server->response_q,
714 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
715 time_after(jiffies, timeout) ||
716 ((ses->server->tcpStatus != CifsGood) &&
717 (ses->server->tcpStatus != CifsNew)));
718 } else {
719 wait_event(ses->server->response_q,
720 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
721 ((ses->server->tcpStatus != CifsGood) &&
722 (ses->server->tcpStatus != CifsNew)));
723 }
724 690
725 spin_lock(&GlobalMid_Lock); 691 spin_lock(&GlobalMid_Lock);
726 if (midQ->resp_buf) { 692 if (midQ->resp_buf) {
@@ -748,11 +714,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
748 } 714 }
749 spin_unlock(&GlobalMid_Lock); 715 spin_unlock(&GlobalMid_Lock);
750 DeleteMidQEntry(midQ); 716 DeleteMidQEntry(midQ);
751 /* If not lock req, update # of requests on wire to server */ 717 /* Update # of requests on wire to server */
752 if(long_op < 3) { 718 atomic_dec(&ses->server->inFlight);
753 atomic_dec(&ses->server->inFlight); 719 wake_up(&ses->server->request_q);
754 wake_up(&ses->server->request_q);
755 }
756 return rc; 720 return rc;
757 } 721 }
758 722
@@ -799,23 +763,253 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
799 cERROR(1,("Bad MID state?")); 763 cERROR(1,("Bad MID state?"));
800 } 764 }
801 } 765 }
802cifs_no_response_exit: 766
767out:
768
803 DeleteMidQEntry(midQ); 769 DeleteMidQEntry(midQ);
770 atomic_dec(&ses->server->inFlight);
771 wake_up(&ses->server->request_q);
804 772
805 if(long_op < 3) { 773 return rc;
806 atomic_dec(&ses->server->inFlight); 774}
807 wake_up(&ses->server->request_q); 775
808 } 776/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
777
778static int
779send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
780 struct mid_q_entry *midQ)
781{
782 int rc = 0;
783 struct cifsSesInfo *ses = tcon->ses;
784 __u16 mid = in_buf->Mid;
809 785
786 header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
787 in_buf->Mid = mid;
788 down(&ses->server->tcpSem);
789 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
790 if (rc) {
791 up(&ses->server->tcpSem);
792 return rc;
793 }
794 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
795 (struct sockaddr *) &(ses->server->addr.sockAddr));
796 up(&ses->server->tcpSem);
810 return rc; 797 return rc;
798}
799
800/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
801 blocking lock to return. */
802
803static int
804send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
805 struct smb_hdr *in_buf,
806 struct smb_hdr *out_buf)
807{
808 int bytes_returned;
809 struct cifsSesInfo *ses = tcon->ses;
810 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
811
812 /* We just modify the current in_buf to change
813 the type of lock from LOCKING_ANDX_SHARED_LOCK
814 or LOCKING_ANDX_EXCLUSIVE_LOCK to
815 LOCKING_ANDX_CANCEL_LOCK. */
816
817 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
818 pSMB->Timeout = 0;
819 pSMB->hdr.Mid = GetNextMid(ses->server);
820
821 return SendReceive(xid, ses, in_buf, out_buf,
822 &bytes_returned, 0);
823}
811 824
812out_unlock: 825int
826SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
827 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
828 int *pbytes_returned)
829{
830 int rc = 0;
831 int rstart = 0;
832 unsigned int receive_len;
833 struct mid_q_entry *midQ;
834 struct cifsSesInfo *ses;
835
836 if (tcon == NULL || tcon->ses == NULL) {
837 cERROR(1,("Null smb session"));
838 return -EIO;
839 }
840 ses = tcon->ses;
841
842 if(ses->server == NULL) {
843 cERROR(1,("Null tcp session"));
844 return -EIO;
845 }
846
847 if(ses->server->tcpStatus == CifsExiting)
848 return -ENOENT;
849
850 /* Ensure that we do not send more than 50 overlapping requests
851 to the same server. We may make this configurable later or
852 use ses->maxReq */
853
854 rc = wait_for_free_request(ses, 3);
855 if (rc)
856 return rc;
857
858 /* make sure that we sign in the same order that we send on this socket
859 and avoid races inside tcp sendmsg code that could cause corruption
860 of smb data */
861
862 down(&ses->server->tcpSem);
863
864 rc = allocate_mid(ses, in_buf, &midQ);
865 if (rc) {
866 up(&ses->server->tcpSem);
867 return rc;
868 }
869
870 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
871 up(&ses->server->tcpSem);
872 cERROR(1, ("Illegal length, greater than maximum frame, %d",
873 in_buf->smb_buf_length));
874 DeleteMidQEntry(midQ);
875 return -EIO;
876 }
877
878 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
879
880 midQ->midState = MID_REQUEST_SUBMITTED;
881#ifdef CONFIG_CIFS_STATS2
882 atomic_inc(&ses->server->inSend);
883#endif
884 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
885 (struct sockaddr *) &(ses->server->addr.sockAddr));
886#ifdef CONFIG_CIFS_STATS2
887 atomic_dec(&ses->server->inSend);
888 midQ->when_sent = jiffies;
889#endif
813 up(&ses->server->tcpSem); 890 up(&ses->server->tcpSem);
814 /* If not lock req, update # of requests on wire to server */ 891
815 if(long_op < 3) { 892 if(rc < 0) {
816 atomic_dec(&ses->server->inFlight); 893 DeleteMidQEntry(midQ);
817 wake_up(&ses->server->request_q); 894 return rc;
895 }
896
897 /* Wait for a reply - allow signals to interrupt. */
898 rc = wait_event_interruptible(ses->server->response_q,
899 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
900 ((ses->server->tcpStatus != CifsGood) &&
901 (ses->server->tcpStatus != CifsNew)));
902
903 /* Were we interrupted by a signal ? */
904 if ((rc == -ERESTARTSYS) &&
905 (midQ->midState == MID_REQUEST_SUBMITTED) &&
906 ((ses->server->tcpStatus == CifsGood) ||
907 (ses->server->tcpStatus == CifsNew))) {
908
909 if (in_buf->Command == SMB_COM_TRANSACTION2) {
910 /* POSIX lock. We send a NT_CANCEL SMB to cause the
911 blocking lock to return. */
912
913 rc = send_nt_cancel(tcon, in_buf, midQ);
914 if (rc) {
915 DeleteMidQEntry(midQ);
916 return rc;
917 }
918 } else {
919 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
920 to cause the blocking lock to return. */
921
922 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
923
924 /* If we get -ENOLCK back the lock may have
925 already been removed. Don't exit in this case. */
926 if (rc && rc != -ENOLCK) {
927 DeleteMidQEntry(midQ);
928 return rc;
929 }
930 }
931
932 /* Wait 5 seconds for the response. */
933 if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) {
934 /* We got the response - restart system call. */
935 rstart = 1;
936 }
937 }
938
939 spin_lock(&GlobalMid_Lock);
940 if (midQ->resp_buf) {
941 spin_unlock(&GlobalMid_Lock);
942 receive_len = midQ->resp_buf->smb_buf_length;
943 } else {
944 cERROR(1,("No response for cmd %d mid %d",
945 midQ->command, midQ->mid));
946 if(midQ->midState == MID_REQUEST_SUBMITTED) {
947 if(ses->server->tcpStatus == CifsExiting)
948 rc = -EHOSTDOWN;
949 else {
950 ses->server->tcpStatus = CifsNeedReconnect;
951 midQ->midState = MID_RETRY_NEEDED;
952 }
953 }
954
955 if (rc != -EHOSTDOWN) {
956 if(midQ->midState == MID_RETRY_NEEDED) {
957 rc = -EAGAIN;
958 cFYI(1,("marking request for retry"));
959 } else {
960 rc = -EIO;
961 }
962 }
963 spin_unlock(&GlobalMid_Lock);
964 DeleteMidQEntry(midQ);
965 return rc;
818 } 966 }
967
968 if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
969 cERROR(1, ("Frame too large received. Length: %d Xid: %d",
970 receive_len, xid));
971 rc = -EIO;
972 } else { /* rcvd frame is ok */
973
974 if (midQ->resp_buf && out_buf
975 && (midQ->midState == MID_RESPONSE_RECEIVED)) {
976 out_buf->smb_buf_length = receive_len;
977 memcpy((char *)out_buf + 4,
978 (char *)midQ->resp_buf + 4,
979 receive_len);
980
981 dump_smb(out_buf, 92);
982 /* convert the length into a more usable form */
983 if((receive_len > 24) &&
984 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
985 SECMODE_SIGN_ENABLED))) {
986 rc = cifs_verify_signature(out_buf,
987 ses->server->mac_signing_key,
988 midQ->sequence_number+1);
989 if(rc) {
990 cERROR(1,("Unexpected SMB signature"));
991 /* BB FIXME add code to kill session */
992 }
993 }
994
995 *pbytes_returned = out_buf->smb_buf_length;
996
997 /* BB special case reconnect tid and uid here? */
998 rc = map_smb_to_linux_error(out_buf);
819 999
1000 /* convert ByteCount if necessary */
1001 if (receive_len >=
1002 sizeof (struct smb_hdr) -
1003 4 /* do not count RFC1001 header */ +
1004 (2 * out_buf->WordCount) + 2 /* bcc */ )
1005 BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
1006 } else {
1007 rc = -EIO;
1008 cERROR(1,("Bad MID state?"));
1009 }
1010 }
1011 DeleteMidQEntry(midQ);
1012 if (rstart && rc == -EACCES)
1013 return -ERESTARTSYS;
820 return rc; 1014 return rc;
821} 1015}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 7754d641775e..067648b7179b 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -330,11 +330,15 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
330 sb = direntry->d_inode->i_sb; 330 sb = direntry->d_inode->i_sb;
331 if(sb == NULL) 331 if(sb == NULL)
332 return -EIO; 332 return -EIO;
333 xid = GetXid();
334 333
335 cifs_sb = CIFS_SB(sb); 334 cifs_sb = CIFS_SB(sb);
336 pTcon = cifs_sb->tcon; 335 pTcon = cifs_sb->tcon;
337 336
337 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
338 return -EOPNOTSUPP;
339
340 xid = GetXid();
341
338 full_path = build_path_from_dentry(direntry); 342 full_path = build_path_from_dentry(direntry);
339 if(full_path == NULL) { 343 if(full_path == NULL) {
340 FreeXid(xid); 344 FreeXid(xid);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 19ffb043abbc..3a3567433b92 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
1168eexit_1: 1168eexit_1:
1169 1169
1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", 1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1171 current, ep, epi->file, error)); 1171 current, ep, epi->ffd.file, error));
1172 1172
1173 return error; 1173 return error;
1174} 1174}
@@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
1236 struct eventpoll *ep = epi->ep; 1236 struct eventpoll *ep = epi->ep;
1237 1237
1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1239 current, epi->file, epi, ep)); 1239 current, epi->ffd.file, epi, ep));
1240 1240
1241 write_lock_irqsave(&ep->lock, flags); 1241 write_lock_irqsave(&ep->lock, flags);
1242 1242
diff --git a/fs/exec.c b/fs/exec.c
index 8344ba73a2a6..54135df2a966 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -486,8 +486,6 @@ struct file *open_exec(const char *name)
486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && 486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
487 S_ISREG(inode->i_mode)) { 487 S_ISREG(inode->i_mode)) {
488 int err = vfs_permission(&nd, MAY_EXEC); 488 int err = vfs_permission(&nd, MAY_EXEC);
489 if (!err && !(inode->i_mode & 0111))
490 err = -EACCES;
491 file = ERR_PTR(err); 489 file = ERR_PTR(err);
492 if (!err) { 490 if (!err) {
493 file = nameidata_to_filp(&nd, O_RDONLY); 491 file = nameidata_to_filp(&nd, O_RDONLY);
@@ -753,7 +751,7 @@ no_thread_group:
753 751
754 write_lock_irq(&tasklist_lock); 752 write_lock_irq(&tasklist_lock);
755 spin_lock(&oldsighand->siglock); 753 spin_lock(&oldsighand->siglock);
756 spin_lock(&newsighand->siglock); 754 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
757 755
758 rcu_assign_pointer(current->sighand, newsighand); 756 rcu_assign_pointer(current->sighand, newsighand);
759 recalc_sigpending(); 757 recalc_sigpending();
@@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm)
922 int retval; 920 int retval;
923 921
924 mode = inode->i_mode; 922 mode = inode->i_mode;
925 /*
926 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
927 * generic_permission lets a non-executable through
928 */
929 if (!(mode & 0111)) /* with at least _one_ execute bit set */
930 return -EACCES;
931 if (bprm->file->f_op == NULL) 923 if (bprm->file->f_op == NULL)
932 return -EACCES; 924 return -EACCES;
933 925
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index f2702cda9779..681dea8f9532 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
775 if (EXT2_INODE_SIZE(sb) == 0) 775 if (EXT2_INODE_SIZE(sb) == 0)
776 goto cantfind_ext2; 776 goto cantfind_ext2;
777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); 777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
778 if (sbi->s_inodes_per_block == 0) 778 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
779 goto cantfind_ext2; 779 goto cantfind_ext2;
780 sbi->s_itb_per_group = sbi->s_inodes_per_group / 780 sbi->s_itb_per_group = sbi->s_inodes_per_group /
781 sbi->s_inodes_per_block; 781 sbi->s_inodes_per_block;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a504a40d6d29..063d994bda0b 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1269 goal = le32_to_cpu(es->s_first_data_block); 1269 goal = le32_to_cpu(es->s_first_data_block);
1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) / 1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1271 EXT3_BLOCKS_PER_GROUP(sb); 1271 EXT3_BLOCKS_PER_GROUP(sb);
1272 goal_group = group_no;
1273retry_alloc:
1272 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1274 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1273 if (!gdp) 1275 if (!gdp)
1274 goto io_error; 1276 goto io_error;
1275 1277
1276 goal_group = group_no;
1277retry:
1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1279 /* 1279 /*
1280 * if there is not enough free blocks to make a new resevation 1280 * if there is not enough free blocks to make a new resevation
@@ -1349,7 +1349,7 @@ retry:
1349 if (my_rsv) { 1349 if (my_rsv) {
1350 my_rsv = NULL; 1350 my_rsv = NULL;
1351 group_no = goal_group; 1351 group_no = goal_group;
1352 goto retry; 1352 goto retry_alloc;
1353 } 1353 }
1354 /* No space left on the device */ 1354 /* No space left on the device */
1355 *errp = -ENOSPC; 1355 *errp = -ENOSPC;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 63614ed16336..5c4fcd1dbf59 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
395 struct fuse_readpages_data data; 395 struct fuse_readpages_data data;
396 int err; 396 int err;
397 397
398 err = -EIO;
398 if (is_bad_inode(inode)) 399 if (is_bad_inode(inode))
399 return -EIO; 400 goto clean_pages_up;
400 401
401 data.file = file; 402 data.file = file;
402 data.inode = inode; 403 data.inode = inode;
403 data.req = fuse_get_req(fc); 404 data.req = fuse_get_req(fc);
405 err = PTR_ERR(data.req);
404 if (IS_ERR(data.req)) 406 if (IS_ERR(data.req))
405 return PTR_ERR(data.req); 407 goto clean_pages_up;
406 408
407 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 409 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
408 if (!err) { 410 if (!err) {
@@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
412 fuse_put_request(fc, data.req); 414 fuse_put_request(fc, data.req);
413 } 415 }
414 return err; 416 return err;
417
418clean_pages_up:
419 put_pages_list(pages);
420 return err;
415} 421}
416 422
417static size_t fuse_send_write(struct fuse_req *req, struct file *file, 423static size_t fuse_send_write(struct fuse_req *req, struct file *file,
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 93aa5715f224..78b1deae3fa2 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
44 task->ioprio = ioprio; 44 task->ioprio = ioprio;
45 45
46 ioc = task->io_context; 46 ioc = task->io_context;
47 /* see wmb() in current_io_context() */
48 smp_read_barrier_depends();
49
47 if (ioc && ioc->set_ioprio) 50 if (ioc && ioc->set_ioprio)
48 ioc->set_ioprio(ioc, ioprio); 51 ioc->set_ioprio(ioc, ioprio);
49 52
@@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
111 continue; 114 continue;
112 ret = set_task_ioprio(p, ioprio); 115 ret = set_task_ioprio(p, ioprio);
113 if (ret) 116 if (ret)
114 break; 117 goto free_uid;
115 } while_each_thread(g, p); 118 } while_each_thread(g, p);
116 119free_uid:
117 if (who) 120 if (who)
118 free_uid(user); 121 free_uid(user);
119 break; 122 break;
@@ -137,6 +140,29 @@ out:
137 return ret; 140 return ret;
138} 141}
139 142
143int ioprio_best(unsigned short aprio, unsigned short bprio)
144{
145 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
146 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
147
148 if (!ioprio_valid(aprio))
149 return bprio;
150 if (!ioprio_valid(bprio))
151 return aprio;
152
153 if (aclass == IOPRIO_CLASS_NONE)
154 aclass = IOPRIO_CLASS_BE;
155 if (bclass == IOPRIO_CLASS_NONE)
156 bclass = IOPRIO_CLASS_BE;
157
158 if (aclass == bclass)
159 return min(aprio, bprio);
160 if (aclass > bclass)
161 return bprio;
162 else
163 return aprio;
164}
165
140asmlinkage long sys_ioprio_get(int which, int who) 166asmlinkage long sys_ioprio_get(int which, int who)
141{ 167{
142 struct task_struct *g, *p; 168 struct task_struct *g, *p;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 0971814c38b8..42da60784311 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal)
261 struct buffer_head *bh = jh2bh(jh); 261 struct buffer_head *bh = jh2bh(jh);
262 262
263 jbd_lock_bh_state(bh); 263 jbd_lock_bh_state(bh);
264 kfree(jh->b_committed_data); 264 jbd_slab_free(jh->b_committed_data, bh->b_size);
265 jh->b_committed_data = NULL; 265 jh->b_committed_data = NULL;
266 jbd_unlock_bh_state(bh); 266 jbd_unlock_bh_state(bh);
267 } 267 }
@@ -745,14 +745,14 @@ restart_loop:
745 * Otherwise, we can just throw away the frozen data now. 745 * Otherwise, we can just throw away the frozen data now.
746 */ 746 */
747 if (jh->b_committed_data) { 747 if (jh->b_committed_data) {
748 kfree(jh->b_committed_data); 748 jbd_slab_free(jh->b_committed_data, bh->b_size);
749 jh->b_committed_data = NULL; 749 jh->b_committed_data = NULL;
750 if (jh->b_frozen_data) { 750 if (jh->b_frozen_data) {
751 jh->b_committed_data = jh->b_frozen_data; 751 jh->b_committed_data = jh->b_frozen_data;
752 jh->b_frozen_data = NULL; 752 jh->b_frozen_data = NULL;
753 } 753 }
754 } else if (jh->b_frozen_data) { 754 } else if (jh->b_frozen_data) {
755 kfree(jh->b_frozen_data); 755 jbd_slab_free(jh->b_frozen_data, bh->b_size);
756 jh->b_frozen_data = NULL; 756 jh->b_frozen_data = NULL;
757 } 757 }
758 758
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 8c9b28dff119..f66724ce443a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit);
84 84
85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86static void __journal_abort_soft (journal_t *journal, int errno); 86static void __journal_abort_soft (journal_t *journal, int errno);
87static int journal_create_jbd_slab(size_t slab_size);
87 88
88/* 89/*
89 * Helper function used to manage commit timeouts 90 * Helper function used to manage commit timeouts
@@ -328,10 +329,10 @@ repeat:
328 char *tmp; 329 char *tmp;
329 330
330 jbd_unlock_bh_state(bh_in); 331 jbd_unlock_bh_state(bh_in);
331 tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); 332 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
332 jbd_lock_bh_state(bh_in); 333 jbd_lock_bh_state(bh_in);
333 if (jh_in->b_frozen_data) { 334 if (jh_in->b_frozen_data) {
334 kfree(tmp); 335 jbd_slab_free(tmp, bh_in->b_size);
335 goto repeat; 336 goto repeat;
336 } 337 }
337 338
@@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal)
1069int journal_load(journal_t *journal) 1070int journal_load(journal_t *journal)
1070{ 1071{
1071 int err; 1072 int err;
1073 journal_superblock_t *sb;
1072 1074
1073 err = load_superblock(journal); 1075 err = load_superblock(journal);
1074 if (err) 1076 if (err)
1075 return err; 1077 return err;
1076 1078
1079 sb = journal->j_superblock;
1077 /* If this is a V2 superblock, then we have to check the 1080 /* If this is a V2 superblock, then we have to check the
1078 * features flags on it. */ 1081 * features flags on it. */
1079 1082
1080 if (journal->j_format_version >= 2) { 1083 if (journal->j_format_version >= 2) {
1081 journal_superblock_t *sb = journal->j_superblock;
1082
1083 if ((sb->s_feature_ro_compat & 1084 if ((sb->s_feature_ro_compat &
1084 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || 1085 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
1085 (sb->s_feature_incompat & 1086 (sb->s_feature_incompat &
@@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal)
1090 } 1091 }
1091 } 1092 }
1092 1093
1094 /*
1095 * Create a slab for this blocksize
1096 */
1097 err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
1098 if (err)
1099 return err;
1100
1093 /* Let the recovery code check whether it needs to recover any 1101 /* Let the recovery code check whether it needs to recover any
1094 * data from the journal. */ 1102 * data from the journal. */
1095 if (journal_recover(journal)) 1103 if (journal_recover(journal))
@@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1612} 1620}
1613 1621
1614/* 1622/*
1623 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1624 * and allocate frozen and commit buffers from these slabs.
1625 *
1626 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1627 * cause bh to cross page boundary.
1628 */
1629
1630#define JBD_MAX_SLABS 5
1631#define JBD_SLAB_INDEX(size) (size >> 11)
1632
1633static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
1634static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1636};
1637
1638static void journal_destroy_jbd_slabs(void)
1639{
1640 int i;
1641
1642 for (i = 0; i < JBD_MAX_SLABS; i++) {
1643 if (jbd_slab[i])
1644 kmem_cache_destroy(jbd_slab[i]);
1645 jbd_slab[i] = NULL;
1646 }
1647}
1648
1649static int journal_create_jbd_slab(size_t slab_size)
1650{
1651 int i = JBD_SLAB_INDEX(slab_size);
1652
1653 BUG_ON(i >= JBD_MAX_SLABS);
1654
1655 /*
1656 * Check if we already have a slab created for this size
1657 */
1658 if (jbd_slab[i])
1659 return 0;
1660
1661 /*
1662 * Create a slab and force alignment to be same as slabsize -
1663 * this will make sure that allocations won't cross the page
1664 * boundary.
1665 */
1666 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1667 slab_size, slab_size, 0, NULL, NULL);
1668 if (!jbd_slab[i]) {
1669 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1670 return -ENOMEM;
1671 }
1672 return 0;
1673}
1674
1675void * jbd_slab_alloc(size_t size, gfp_t flags)
1676{
1677 int idx;
1678
1679 idx = JBD_SLAB_INDEX(size);
1680 BUG_ON(jbd_slab[idx] == NULL);
1681 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1682}
1683
1684void jbd_slab_free(void *ptr, size_t size)
1685{
1686 int idx;
1687
1688 idx = JBD_SLAB_INDEX(size);
1689 BUG_ON(jbd_slab[idx] == NULL);
1690 kmem_cache_free(jbd_slab[idx], ptr);
1691}
1692
1693/*
1615 * Journal_head storage management 1694 * Journal_head storage management
1616 */ 1695 */
1617static kmem_cache_t *journal_head_cache; 1696static kmem_cache_t *journal_head_cache;
@@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1799 printk(KERN_WARNING "%s: freeing " 1878 printk(KERN_WARNING "%s: freeing "
1800 "b_frozen_data\n", 1879 "b_frozen_data\n",
1801 __FUNCTION__); 1880 __FUNCTION__);
1802 kfree(jh->b_frozen_data); 1881 jbd_slab_free(jh->b_frozen_data, bh->b_size);
1803 } 1882 }
1804 if (jh->b_committed_data) { 1883 if (jh->b_committed_data) {
1805 printk(KERN_WARNING "%s: freeing " 1884 printk(KERN_WARNING "%s: freeing "
1806 "b_committed_data\n", 1885 "b_committed_data\n",
1807 __FUNCTION__); 1886 __FUNCTION__);
1808 kfree(jh->b_committed_data); 1887 jbd_slab_free(jh->b_committed_data, bh->b_size);
1809 } 1888 }
1810 bh->b_private = NULL; 1889 bh->b_private = NULL;
1811 jh->b_bh = NULL; /* debug, really */ 1890 jh->b_bh = NULL; /* debug, really */
@@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void)
1961 journal_destroy_revoke_caches(); 2040 journal_destroy_revoke_caches();
1962 journal_destroy_journal_head_cache(); 2041 journal_destroy_journal_head_cache();
1963 journal_destroy_handle_cache(); 2042 journal_destroy_handle_cache();
2043 journal_destroy_jbd_slabs();
1964} 2044}
1965 2045
1966static int __init journal_init(void) 2046static int __init journal_init(void)
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 508b2ea91f43..de2e4cbbf79a 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -666,8 +666,9 @@ repeat:
666 if (!frozen_buffer) { 666 if (!frozen_buffer) {
667 JBUFFER_TRACE(jh, "allocate memory for buffer"); 667 JBUFFER_TRACE(jh, "allocate memory for buffer");
668 jbd_unlock_bh_state(bh); 668 jbd_unlock_bh_state(bh);
669 frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, 669 frozen_buffer =
670 GFP_NOFS); 670 jbd_slab_alloc(jh2bh(jh)->b_size,
671 GFP_NOFS);
671 if (!frozen_buffer) { 672 if (!frozen_buffer) {
672 printk(KERN_EMERG 673 printk(KERN_EMERG
673 "%s: OOM for frozen_buffer\n", 674 "%s: OOM for frozen_buffer\n",
@@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
879 880
880repeat: 881repeat:
881 if (!jh->b_committed_data) { 882 if (!jh->b_committed_data) {
882 committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); 883 committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
883 if (!committed_data) { 884 if (!committed_data) {
884 printk(KERN_EMERG "%s: No memory for committed data\n", 885 printk(KERN_EMERG "%s: No memory for committed data\n",
885 __FUNCTION__); 886 __FUNCTION__);
@@ -906,7 +907,7 @@ repeat:
906out: 907out:
907 journal_put_journal_head(jh); 908 journal_put_journal_head(jh);
908 if (unlikely(committed_data)) 909 if (unlikely(committed_data))
909 kfree(committed_data); 910 jbd_slab_free(committed_data, bh->b_size);
910 return err; 911 return err;
911} 912}
912 913
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 43e3f566aad6..a223cf4faa9b 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode)
168 set_cflag(COMMIT_Dirty, inode); 168 set_cflag(COMMIT_Dirty, inode);
169} 169}
170 170
171static int 171int jfs_get_block(struct inode *ip, sector_t lblock,
172jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, 172 struct buffer_head *bh_result, int create)
173 struct buffer_head *bh_result, int create)
174{ 173{
175 s64 lblock64 = lblock; 174 s64 lblock64 = lblock;
176 int rc = 0; 175 int rc = 0;
177 xad_t xad; 176 xad_t xad;
178 s64 xaddr; 177 s64 xaddr;
179 int xflag; 178 int xflag;
180 s32 xlen = max_blocks; 179 s32 xlen = bh_result->b_size >> ip->i_blkbits;
181 180
182 /* 181 /*
183 * Take appropriate lock on inode 182 * Take appropriate lock on inode
@@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
188 IREAD_LOCK(ip); 187 IREAD_LOCK(ip);
189 188
190 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && 189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
191 (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && 190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
192 xaddr) { 191 xaddr) {
193 if (xflag & XAD_NOTRECORDED) { 192 if (xflag & XAD_NOTRECORDED) {
194 if (!create) 193 if (!create)
@@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
255 return rc; 254 return rc;
256} 255}
257 256
258static int jfs_get_block(struct inode *ip, sector_t lblock,
259 struct buffer_head *bh_result, int create)
260{
261 return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
262 bh_result, create);
263}
264
265static int jfs_writepage(struct page *page, struct writeback_control *wbc) 257static int jfs_writepage(struct page *page, struct writeback_control *wbc)
266{ 258{
267 return nobh_writepage(page, jfs_get_block, wbc); 259 return nobh_writepage(page, jfs_get_block, wbc);
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index b5c7da6190dc..1fc48df670c8 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
32extern void jfs_free_zero_link(struct inode *); 32extern void jfs_free_zero_link(struct inode *);
33extern struct dentry *jfs_get_parent(struct dentry *dentry); 33extern struct dentry *jfs_get_parent(struct dentry *dentry);
34extern void jfs_set_inode_flags(struct inode *); 34extern void jfs_set_inode_flags(struct inode *);
35extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
35 36
36extern const struct address_space_operations jfs_aops; 37extern const struct address_space_operations jfs_aops;
37extern struct inode_operations jfs_dir_inode_operations; 38extern struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f6cfebc82db..143bcd1d5eaa 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -26,6 +26,7 @@
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/posix_acl.h> 28#include <linux/posix_acl.h>
29#include <linux/buffer_head.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <linux/seq_file.h> 31#include <linux/seq_file.h>
31 32
@@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
298 break; 299 break;
299 } 300 }
300 301
301#if defined(CONFIG_QUOTA) 302#ifdef CONFIG_QUOTA
302 case Opt_quota: 303 case Opt_quota:
303 case Opt_usrquota: 304 case Opt_usrquota:
304 *flag |= JFS_USRQUOTA; 305 *flag |= JFS_USRQUOTA;
@@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
597 if (sbi->flag & JFS_NOINTEGRITY) 598 if (sbi->flag & JFS_NOINTEGRITY)
598 seq_puts(seq, ",nointegrity"); 599 seq_puts(seq, ",nointegrity");
599 600
600#if defined(CONFIG_QUOTA) 601#ifdef CONFIG_QUOTA
601 if (sbi->flag & JFS_USRQUOTA) 602 if (sbi->flag & JFS_USRQUOTA)
602 seq_puts(seq, ",usrquota"); 603 seq_puts(seq, ",usrquota");
603 604
@@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
608 return 0; 609 return 0;
609} 610}
610 611
612#ifdef CONFIG_QUOTA
613
614/* Read data from quotafile - avoid pagecache and such because we cannot afford
615 * acquiring the locks... As quota files are never truncated and quota code
616 * itself serializes the operations (and noone else should touch the files)
617 * we don't have to be afraid of races */
618static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
619 size_t len, loff_t off)
620{
621 struct inode *inode = sb_dqopt(sb)->files[type];
622 sector_t blk = off >> sb->s_blocksize_bits;
623 int err = 0;
624 int offset = off & (sb->s_blocksize - 1);
625 int tocopy;
626 size_t toread;
627 struct buffer_head tmp_bh;
628 struct buffer_head *bh;
629 loff_t i_size = i_size_read(inode);
630
631 if (off > i_size)
632 return 0;
633 if (off+len > i_size)
634 len = i_size-off;
635 toread = len;
636 while (toread > 0) {
637 tocopy = sb->s_blocksize - offset < toread ?
638 sb->s_blocksize - offset : toread;
639
640 tmp_bh.b_state = 0;
641 tmp_bh.b_size = 1 << inode->i_blkbits;
642 err = jfs_get_block(inode, blk, &tmp_bh, 0);
643 if (err)
644 return err;
645 if (!buffer_mapped(&tmp_bh)) /* A hole? */
646 memset(data, 0, tocopy);
647 else {
648 bh = sb_bread(sb, tmp_bh.b_blocknr);
649 if (!bh)
650 return -EIO;
651 memcpy(data, bh->b_data+offset, tocopy);
652 brelse(bh);
653 }
654 offset = 0;
655 toread -= tocopy;
656 data += tocopy;
657 blk++;
658 }
659 return len;
660}
661
662/* Write to quotafile */
663static ssize_t jfs_quota_write(struct super_block *sb, int type,
664 const char *data, size_t len, loff_t off)
665{
666 struct inode *inode = sb_dqopt(sb)->files[type];
667 sector_t blk = off >> sb->s_blocksize_bits;
668 int err = 0;
669 int offset = off & (sb->s_blocksize - 1);
670 int tocopy;
671 size_t towrite = len;
672 struct buffer_head tmp_bh;
673 struct buffer_head *bh;
674
675 mutex_lock(&inode->i_mutex);
676 while (towrite > 0) {
677 tocopy = sb->s_blocksize - offset < towrite ?
678 sb->s_blocksize - offset : towrite;
679
680 tmp_bh.b_state = 0;
681 tmp_bh.b_size = 1 << inode->i_blkbits;
682 err = jfs_get_block(inode, blk, &tmp_bh, 1);
683 if (err)
684 goto out;
685 if (offset || tocopy != sb->s_blocksize)
686 bh = sb_bread(sb, tmp_bh.b_blocknr);
687 else
688 bh = sb_getblk(sb, tmp_bh.b_blocknr);
689 if (!bh) {
690 err = -EIO;
691 goto out;
692 }
693 lock_buffer(bh);
694 memcpy(bh->b_data+offset, data, tocopy);
695 flush_dcache_page(bh->b_page);
696 set_buffer_uptodate(bh);
697 mark_buffer_dirty(bh);
698 unlock_buffer(bh);
699 brelse(bh);
700 offset = 0;
701 towrite -= tocopy;
702 data += tocopy;
703 blk++;
704 }
705out:
706 if (len == towrite)
707 return err;
708 if (inode->i_size < off+len-towrite)
709 i_size_write(inode, off+len-towrite);
710 inode->i_version++;
711 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
712 mark_inode_dirty(inode);
713 mutex_unlock(&inode->i_mutex);
714 return len - towrite;
715}
716
717#endif
718
611static struct super_operations jfs_super_operations = { 719static struct super_operations jfs_super_operations = {
612 .alloc_inode = jfs_alloc_inode, 720 .alloc_inode = jfs_alloc_inode,
613 .destroy_inode = jfs_destroy_inode, 721 .destroy_inode = jfs_destroy_inode,
@@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = {
621 .unlockfs = jfs_unlockfs, 729 .unlockfs = jfs_unlockfs,
622 .statfs = jfs_statfs, 730 .statfs = jfs_statfs,
623 .remount_fs = jfs_remount, 731 .remount_fs = jfs_remount,
624 .show_options = jfs_show_options 732 .show_options = jfs_show_options,
733#ifdef CONFIG_QUOTA
734 .quota_read = jfs_quota_read,
735 .quota_write = jfs_quota_write,
736#endif
625}; 737};
626 738
627static struct export_operations jfs_export_operations = { 739static struct export_operations jfs_export_operations = {
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 2a4df9b3779a..01b4db9e5466 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -237,19 +237,22 @@ static int
237nlm_traverse_files(struct nlm_host *host, int action) 237nlm_traverse_files(struct nlm_host *host, int action)
238{ 238{
239 struct nlm_file *file, **fp; 239 struct nlm_file *file, **fp;
240 int i; 240 int i, ret = 0;
241 241
242 mutex_lock(&nlm_file_mutex); 242 mutex_lock(&nlm_file_mutex);
243 for (i = 0; i < FILE_NRHASH; i++) { 243 for (i = 0; i < FILE_NRHASH; i++) {
244 fp = nlm_files + i; 244 fp = nlm_files + i;
245 while ((file = *fp) != NULL) { 245 while ((file = *fp) != NULL) {
246 file->f_count++;
247 mutex_unlock(&nlm_file_mutex);
248
246 /* Traverse locks, blocks and shares of this file 249 /* Traverse locks, blocks and shares of this file
247 * and update file->f_locks count */ 250 * and update file->f_locks count */
248 if (nlm_inspect_file(host, file, action)) { 251 if (nlm_inspect_file(host, file, action))
249 mutex_unlock(&nlm_file_mutex); 252 ret = 1;
250 return 1;
251 }
252 253
254 mutex_lock(&nlm_file_mutex);
255 file->f_count--;
253 /* No more references to this file. Let go of it. */ 256 /* No more references to this file. Let go of it. */
254 if (!file->f_blocks && !file->f_locks 257 if (!file->f_blocks && !file->f_locks
255 && !file->f_shares && !file->f_count) { 258 && !file->f_shares && !file->f_count) {
@@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
262 } 265 }
263 } 266 }
264 mutex_unlock(&nlm_file_mutex); 267 mutex_unlock(&nlm_file_mutex);
265 return 0; 268 return ret;
266} 269}
267 270
268/* 271/*
diff --git a/fs/locks.c b/fs/locks.c
index b0b41a64e10b..d7c53392cac1 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
1421 if (!leases_enable) 1421 if (!leases_enable)
1422 goto out; 1422 goto out;
1423 1423
1424 error = lease_alloc(filp, arg, &fl); 1424 error = -ENOMEM;
1425 if (error) 1425 fl = locks_alloc_lock();
1426 if (fl == NULL)
1426 goto out; 1427 goto out;
1427 1428
1428 locks_copy_lock(fl, lease); 1429 locks_copy_lock(fl, lease);
@@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
1430 locks_insert_lock(before, fl); 1431 locks_insert_lock(before, fl);
1431 1432
1432 *flp = fl; 1433 *flp = fl;
1434 error = 0;
1433out: 1435out:
1434 return error; 1436 return error;
1435} 1437}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 9ea91c5eeb7b..330ff9fc7cf0 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
204 /* 204 /*
205 * Allocate the buffer map to keep the superblock small. 205 * Allocate the buffer map to keep the superblock small.
206 */ 206 */
207 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
208 goto out_illegal_sb;
207 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); 209 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
208 map = kmalloc(i, GFP_KERNEL); 210 map = kmalloc(i, GFP_KERNEL);
209 if (!map) 211 if (!map)
@@ -263,7 +265,7 @@ out_no_root:
263 265
264out_no_bitmap: 266out_no_bitmap:
265 printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); 267 printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
266 out_freemap: 268out_freemap:
267 for (i = 0; i < sbi->s_imap_blocks; i++) 269 for (i = 0; i < sbi->s_imap_blocks; i++)
268 brelse(sbi->s_imap[i]); 270 brelse(sbi->s_imap[i]);
269 for (i = 0; i < sbi->s_zmap_blocks; i++) 271 for (i = 0; i < sbi->s_zmap_blocks; i++)
@@ -276,11 +278,16 @@ out_no_map:
276 printk("MINIX-fs: can't allocate map\n"); 278 printk("MINIX-fs: can't allocate map\n");
277 goto out_release; 279 goto out_release;
278 280
281out_illegal_sb:
282 if (!silent)
283 printk("MINIX-fs: bad superblock\n");
284 goto out_release;
285
279out_no_fs: 286out_no_fs:
280 if (!silent) 287 if (!silent)
281 printk("VFS: Can't find a Minix or Minix V2 filesystem " 288 printk("VFS: Can't find a Minix or Minix V2 filesystem "
282 "on device %s\n", s->s_id); 289 "on device %s\n", s->s_id);
283 out_release: 290out_release:
284 brelse(bh); 291 brelse(bh);
285 goto out; 292 goto out;
286 293
@@ -290,7 +297,7 @@ out_bad_hblock:
290 297
291out_bad_sb: 298out_bad_sb:
292 printk("MINIX-fs: unable to read superblock\n"); 299 printk("MINIX-fs: unable to read superblock\n");
293 out: 300out:
294 s->s_fs_info = NULL; 301 s->s_fs_info = NULL;
295 kfree(sbi); 302 kfree(sbi);
296 return -EINVAL; 303 return -EINVAL;
diff --git a/fs/namei.c b/fs/namei.c
index 55a131230f94..432d6bc6fab0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask,
227 227
228int permission(struct inode *inode, int mask, struct nameidata *nd) 228int permission(struct inode *inode, int mask, struct nameidata *nd)
229{ 229{
230 umode_t mode = inode->i_mode;
230 int retval, submask; 231 int retval, submask;
231 232
232 if (mask & MAY_WRITE) { 233 if (mask & MAY_WRITE) {
233 umode_t mode = inode->i_mode;
234 234
235 /* 235 /*
236 * Nobody gets write access to a read-only fs. 236 * Nobody gets write access to a read-only fs.
@@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
247 } 247 }
248 248
249 249
250 /*
251 * MAY_EXEC on regular files requires special handling: We override
252 * filesystem execute permissions if the mode bits aren't set.
253 */
254 if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
255 return -EACCES;
256
250 /* Ordinary permission routines do not understand MAY_APPEND. */ 257 /* Ordinary permission routines do not understand MAY_APPEND. */
251 submask = mask & ~MAY_APPEND; 258 submask = mask & ~MAY_APPEND;
252 if (inode->i_op && inode->i_op->permission) 259 if (inode->i_op && inode->i_op->permission)
@@ -1767,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
1767 if (nd->last_type != LAST_NORM) 1774 if (nd->last_type != LAST_NORM)
1768 goto fail; 1775 goto fail;
1769 nd->flags &= ~LOOKUP_PARENT; 1776 nd->flags &= ~LOOKUP_PARENT;
1777 nd->flags |= LOOKUP_CREATE;
1778 nd->intent.open.flags = O_EXCL;
1770 1779
1771 /* 1780 /*
1772 * Do the final lookup. 1781 * Do the final lookup.
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cc2b874ad5a4..48e892880d5b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
312 312
313static int nfs_release_page(struct page *page, gfp_t gfp) 313static int nfs_release_page(struct page *page, gfp_t gfp)
314{ 314{
315 return !nfs_wb_page(page->mapping->host, page); 315 if (gfp & __GFP_FS)
316 return !nfs_wb_page(page->mapping->host, page);
317 else
318 /*
319 * Avoid deadlock on nfs_wait_on_request().
320 */
321 return 0;
316} 322}
317 323
318const struct address_space_operations nfs_file_aops = { 324const struct address_space_operations nfs_file_aops = {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b81e7ed3c902..07a5dd57646e 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp)
130 130
131 if (!idmap) 131 if (!idmap)
132 return; 132 return;
133 dput(idmap->idmap_dentry); 133 rpc_unlink(idmap->idmap_dentry);
134 idmap->idmap_dentry = NULL;
135 rpc_unlink(idmap->idmap_path);
136 clp->cl_idmap = NULL; 134 clp->cl_idmap = NULL;
137 kfree(idmap); 135 kfree(idmap);
138} 136}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e6ee97f19d81..153898e1331f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2668,7 +2668,7 @@ out:
2668 nfs4_set_cached_acl(inode, acl); 2668 nfs4_set_cached_acl(inode, acl);
2669} 2669}
2670 2670
2671static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 2671static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2672{ 2672{
2673 struct page *pages[NFS4ACL_MAXPAGES]; 2673 struct page *pages[NFS4ACL_MAXPAGES];
2674 struct nfs_getaclargs args = { 2674 struct nfs_getaclargs args = {
@@ -2721,6 +2721,19 @@ out_free:
2721 return ret; 2721 return ret;
2722} 2722}
2723 2723
2724static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2725{
2726 struct nfs4_exception exception = { };
2727 ssize_t ret;
2728 do {
2729 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
2730 if (ret >= 0)
2731 break;
2732 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
2733 } while (exception.retry);
2734 return ret;
2735}
2736
2724static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 2737static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2725{ 2738{
2726 struct nfs_server *server = NFS_SERVER(inode); 2739 struct nfs_server *server = NFS_SERVER(inode);
@@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2737 return nfs4_get_acl_uncached(inode, buf, buflen); 2750 return nfs4_get_acl_uncached(inode, buf, buflen);
2738} 2751}
2739 2752
2740static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 2753static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2741{ 2754{
2742 struct nfs_server *server = NFS_SERVER(inode); 2755 struct nfs_server *server = NFS_SERVER(inode);
2743 struct page *pages[NFS4ACL_MAXPAGES]; 2756 struct page *pages[NFS4ACL_MAXPAGES];
@@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
2763 return ret; 2776 return ret;
2764} 2777}
2765 2778
2779static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2780{
2781 struct nfs4_exception exception = { };
2782 int err;
2783 do {
2784 err = nfs4_handle_exception(NFS_SERVER(inode),
2785 __nfs4_proc_set_acl(inode, buf, buflen),
2786 &exception);
2787 } while (exception.retry);
2788 return err;
2789}
2790
2766static int 2791static int
2767nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) 2792nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
2768{ 2793{
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1750d996f49f..730ec8fb31c6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3355 struct kvec *iov = rcvbuf->head; 3355 struct kvec *iov = rcvbuf->head;
3356 unsigned int nr, pglen = rcvbuf->page_len; 3356 unsigned int nr, pglen = rcvbuf->page_len;
3357 uint32_t *end, *entry, *p, *kaddr; 3357 uint32_t *end, *entry, *p, *kaddr;
3358 uint32_t len, attrlen; 3358 uint32_t len, attrlen, xlen;
3359 int hdrlen, recvd, status; 3359 int hdrlen, recvd, status;
3360 3360
3361 status = decode_op_hdr(xdr, OP_READDIR); 3361 status = decode_op_hdr(xdr, OP_READDIR);
@@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3377 3377
3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); 3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE);
3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); 3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0);
3380 end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); 3380 end = p + ((pglen + readdir->pgbase) >> 2);
3381 entry = p; 3381 entry = p;
3382 for (nr = 0; *p++; nr++) { 3382 for (nr = 0; *p++; nr++) {
3383 if (p + 3 > end) 3383 if (end - p < 3)
3384 goto short_pkt; 3384 goto short_pkt;
3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p)); 3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p));
3386 p += 2; /* cookie */ 3386 p += 2; /* cookie */
@@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); 3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
3390 goto err_unmap; 3390 goto err_unmap;
3391 } 3391 }
3392 dprintk("filename = %*s\n", len, (char *)p); 3392 xlen = XDR_QUADLEN(len);
3393 p += XDR_QUADLEN(len); 3393 if (end - p < xlen + 1)
3394 if (p + 1 > end)
3395 goto short_pkt; 3394 goto short_pkt;
3395 dprintk("filename = %*s\n", len, (char *)p);
3396 p += xlen;
3396 len = ntohl(*p++); /* bitmap length */ 3397 len = ntohl(*p++); /* bitmap length */
3397 p += len; 3398 if (end - p < len + 1)
3398 if (p + 1 > end)
3399 goto short_pkt; 3399 goto short_pkt;
3400 p += len;
3400 attrlen = XDR_QUADLEN(ntohl(*p++)); 3401 attrlen = XDR_QUADLEN(ntohl(*p++));
3401 p += attrlen; /* attributes */ 3402 if (end - p < attrlen + 2)
3402 if (p + 2 > end)
3403 goto short_pkt; 3403 goto short_pkt;
3404 p += attrlen; /* attributes */
3404 entry = p; 3405 entry = p;
3405 } 3406 }
3406 if (!nr && (entry[0] != 0 || entry[1] == 0)) 3407 if (!nr && (entry[0] != 0 || entry[1] == 0))
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 65c0c5b32351..da9cf11c326f 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
117 base &= ~PAGE_CACHE_MASK; 117 base &= ~PAGE_CACHE_MASK;
118 pglen = PAGE_CACHE_SIZE - base; 118 pglen = PAGE_CACHE_SIZE - base;
119 if (pglen < remainder) 119 for (;;) {
120 if (remainder <= pglen) {
121 memclear_highpage_flush(*pages, base, remainder);
122 break;
123 }
120 memclear_highpage_flush(*pages, base, pglen); 124 memclear_highpage_flush(*pages, base, pglen);
121 else 125 pages++;
122 memclear_highpage_flush(*pages, base, remainder); 126 remainder -= pglen;
127 pglen = PAGE_CACHE_SIZE;
128 base = 0;
129 }
123} 130}
124 131
125/* 132/*
@@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
476 unsigned int base = data->args.pgbase; 483 unsigned int base = data->args.pgbase;
477 struct page **pages; 484 struct page **pages;
478 485
486 if (data->res.eof)
487 count = data->args.count;
479 if (unlikely(count == 0)) 488 if (unlikely(count == 0))
480 return; 489 return;
481 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 490 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
@@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
483 count += base; 492 count += base;
484 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 493 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
485 SetPageUptodate(*pages); 494 SetPageUptodate(*pages);
486 /* 495 if (count != 0)
487 * Was this an eof or a short read? If the latter, don't mark the page
488 * as uptodate yet.
489 */
490 if (count > 0 && (data->res.eof || data->args.count == data->res.count))
491 SetPageUptodate(*pages); 496 SetPageUptodate(*pages);
492} 497}
493 498
@@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
502 count += base; 507 count += base;
503 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 508 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
504 SetPageError(*pages); 509 SetPageError(*pages);
510 if (count != 0)
511 SetPageError(*pages);
505} 512}
506 513
507/* 514/*
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 1b8346dd0572..9503240ef0e5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2375,7 +2375,6 @@ leave:
2375 mlog(0, "returning %d\n", ret); 2375 mlog(0, "returning %d\n", ret);
2376 return ret; 2376 return ret;
2377} 2377}
2378EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2379 2378
2380int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2379int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2381{ 2380{
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index b0c3134f4f70..37be4b2e0d4a 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
155 else 155 else
156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); 156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
157 157
158 if (status != DLM_NORMAL) 158 if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
159 goto leave; 159 goto leave;
160 160
161 /* By now this has been masked out of cancel requests. */ 161 /* By now this has been masked out of cancel requests. */
@@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
183 spin_lock(&lock->spinlock); 183 spin_lock(&lock->spinlock);
184 /* if the master told us the lock was already granted, 184 /* if the master told us the lock was already granted,
185 * let the ast handle all of these actions */ 185 * let the ast handle all of these actions */
186 if (status == DLM_NORMAL && 186 if (status == DLM_CANCELGRANT) {
187 lksb->status == DLM_CANCELGRANT) {
188 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 187 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
189 DLM_UNLOCK_REGRANT_LOCK| 188 DLM_UNLOCK_REGRANT_LOCK|
190 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 189 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
@@ -349,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
349 vec, veclen, owner, &status); 348 vec, veclen, owner, &status);
350 if (tmpret >= 0) { 349 if (tmpret >= 0) {
351 // successfully sent and received 350 // successfully sent and received
352 if (status == DLM_CANCELGRANT) 351 if (status == DLM_FORWARD)
353 ret = DLM_NORMAL;
354 else if (status == DLM_FORWARD) {
355 mlog(0, "master was in-progress. retry\n"); 352 mlog(0, "master was in-progress. retry\n");
356 ret = DLM_FORWARD; 353 ret = status;
357 } else
358 ret = status;
359 lksb->status = status;
360 } else { 354 } else {
361 mlog_errno(tmpret); 355 mlog_errno(tmpret);
362 if (dlm_is_host_down(tmpret)) { 356 if (dlm_is_host_down(tmpret)) {
@@ -372,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
372 /* something bad. this will BUG in ocfs2 */ 366 /* something bad. this will BUG in ocfs2 */
373 ret = dlm_err_to_dlm_status(tmpret); 367 ret = dlm_err_to_dlm_status(tmpret);
374 } 368 }
375 lksb->status = ret;
376 } 369 }
377 370
378 return ret; 371 return ret;
@@ -483,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
483 476
484 /* lock was found on queue */ 477 /* lock was found on queue */
485 lksb = lock->lksb; 478 lksb = lock->lksb;
479 if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
480 lock->ml.type != LKM_EXMODE)
481 flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
482
486 /* unlockast only called on originating node */ 483 /* unlockast only called on originating node */
487 if (flags & LKM_PUT_LVB) { 484 if (flags & LKM_PUT_LVB) {
488 lksb->flags |= DLM_LKSB_PUT_LVB; 485 lksb->flags |= DLM_LKSB_PUT_LVB;
@@ -507,11 +504,8 @@ not_found:
507 "cookie=%u:%llu\n", 504 "cookie=%u:%llu\n",
508 dlm_get_lock_cookie_node(unlock->cookie), 505 dlm_get_lock_cookie_node(unlock->cookie),
509 dlm_get_lock_cookie_seq(unlock->cookie)); 506 dlm_get_lock_cookie_seq(unlock->cookie));
510 else { 507 else
511 /* send the lksb->status back to the other node */
512 status = lksb->status;
513 dlm_lock_put(lock); 508 dlm_lock_put(lock);
514 }
515 509
516leave: 510leave:
517 if (res) 511 if (res)
@@ -533,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
533 527
534 if (dlm_lock_on_list(&res->blocked, lock)) { 528 if (dlm_lock_on_list(&res->blocked, lock)) {
535 /* cancel this outright */ 529 /* cancel this outright */
536 lksb->status = DLM_NORMAL;
537 status = DLM_NORMAL; 530 status = DLM_NORMAL;
538 *actions = (DLM_UNLOCK_CALL_AST | 531 *actions = (DLM_UNLOCK_CALL_AST |
539 DLM_UNLOCK_REMOVE_LOCK); 532 DLM_UNLOCK_REMOVE_LOCK);
540 } else if (dlm_lock_on_list(&res->converting, lock)) { 533 } else if (dlm_lock_on_list(&res->converting, lock)) {
541 /* cancel the request, put back on granted */ 534 /* cancel the request, put back on granted */
542 lksb->status = DLM_NORMAL;
543 status = DLM_NORMAL; 535 status = DLM_NORMAL;
544 *actions = (DLM_UNLOCK_CALL_AST | 536 *actions = (DLM_UNLOCK_CALL_AST |
545 DLM_UNLOCK_REMOVE_LOCK | 537 DLM_UNLOCK_REMOVE_LOCK |
546 DLM_UNLOCK_REGRANT_LOCK | 538 DLM_UNLOCK_REGRANT_LOCK |
547 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 539 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
548 } else if (dlm_lock_on_list(&res->granted, lock)) { 540 } else if (dlm_lock_on_list(&res->granted, lock)) {
549 /* too late, already granted. DLM_CANCELGRANT */ 541 /* too late, already granted. */
550 lksb->status = DLM_CANCELGRANT; 542 status = DLM_CANCELGRANT;
551 status = DLM_NORMAL;
552 *actions = DLM_UNLOCK_CALL_AST; 543 *actions = DLM_UNLOCK_CALL_AST;
553 } else { 544 } else {
554 mlog(ML_ERROR, "lock to cancel is not on any list!\n"); 545 mlog(ML_ERROR, "lock to cancel is not on any list!\n");
555 lksb->status = DLM_IVLOCKID;
556 status = DLM_IVLOCKID; 546 status = DLM_IVLOCKID;
557 *actions = 0; 547 *actions = 0;
558 } 548 }
@@ -569,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
569 559
570 /* unlock request */ 560 /* unlock request */
571 if (!dlm_lock_on_list(&res->granted, lock)) { 561 if (!dlm_lock_on_list(&res->granted, lock)) {
572 lksb->status = DLM_DENIED;
573 status = DLM_DENIED; 562 status = DLM_DENIED;
574 dlm_error(status); 563 dlm_error(status);
575 *actions = 0; 564 *actions = 0;
576 } else { 565 } else {
577 /* unlock granted lock */ 566 /* unlock granted lock */
578 lksb->status = DLM_NORMAL;
579 status = DLM_NORMAL; 567 status = DLM_NORMAL;
580 *actions = (DLM_UNLOCK_FREE_LOCK | 568 *actions = (DLM_UNLOCK_FREE_LOCK |
581 DLM_UNLOCK_CALL_AST | 569 DLM_UNLOCK_CALL_AST |
@@ -632,6 +620,8 @@ retry:
632 620
633 spin_lock(&res->spinlock); 621 spin_lock(&res->spinlock);
634 is_master = (res->owner == dlm->node_num); 622 is_master = (res->owner == dlm->node_num);
623 if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
624 flags &= ~LKM_VALBLK;
635 spin_unlock(&res->spinlock); 625 spin_unlock(&res->spinlock);
636 626
637 if (is_master) { 627 if (is_master) {
@@ -665,7 +655,7 @@ retry:
665 } 655 }
666 656
667 if (call_ast) { 657 if (call_ast) {
668 mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); 658 mlog(0, "calling unlockast(%p, %d)\n", data, status);
669 if (is_master) { 659 if (is_master) {
670 /* it is possible that there is one last bast 660 /* it is possible that there is one last bast
671 * pending. make sure it is flushed, then 661 * pending. make sure it is flushed, then
@@ -677,9 +667,12 @@ retry:
677 wait_event(dlm->ast_wq, 667 wait_event(dlm->ast_wq,
678 dlm_lock_basts_flushed(dlm, lock)); 668 dlm_lock_basts_flushed(dlm, lock));
679 } 669 }
680 (*unlockast)(data, lksb->status); 670 (*unlockast)(data, status);
681 } 671 }
682 672
673 if (status == DLM_CANCELGRANT)
674 status = DLM_NORMAL;
675
683 if (status == DLM_NORMAL) { 676 if (status == DLM_NORMAL) {
684 mlog(0, "kicking the thread\n"); 677 mlog(0, "kicking the thread\n");
685 dlm_kick_thread(dlm, res); 678 dlm_kick_thread(dlm, res);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 0d1973ea32b0..1f17a4d08287 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
840 840
841 mlog(0, "Allocating %u clusters for a new window.\n", 841 mlog(0, "Allocating %u clusters for a new window.\n",
842 ocfs2_local_alloc_window_bits(osb)); 842 ocfs2_local_alloc_window_bits(osb));
843
844 /* Instruct the allocation code to try the most recently used
845 * cluster group. We'll re-record the group used this pass
846 * below. */
847 ac->ac_last_group = osb->la_last_gd;
848
843 /* we used the generic suballoc reserve function, but we set 849 /* we used the generic suballoc reserve function, but we set
844 * everything up nicely, so there's no reason why we can't use 850 * everything up nicely, so there's no reason why we can't use
845 * the more specific cluster api to claim bits. */ 851 * the more specific cluster api to claim bits. */
@@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
852 goto bail; 858 goto bail;
853 } 859 }
854 860
861 osb->la_last_gd = ac->ac_last_group;
862
855 la->la_bm_off = cpu_to_le32(cluster_off); 863 la->la_bm_off = cpu_to_le32(cluster_off);
856 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); 864 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
857 /* just in case... In the future when we find space ourselves, 865 /* just in case... In the future when we find space ourselves,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index cd4a6f253d13..0462a7f4e21b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -197,7 +197,6 @@ struct ocfs2_super
197 struct ocfs2_node_map recovery_map; 197 struct ocfs2_node_map recovery_map;
198 struct ocfs2_node_map umount_map; 198 struct ocfs2_node_map umount_map;
199 199
200 u32 num_clusters;
201 u64 root_blkno; 200 u64 root_blkno;
202 u64 system_dir_blkno; 201 u64 system_dir_blkno;
203 u64 bitmap_blkno; 202 u64 bitmap_blkno;
@@ -237,6 +236,7 @@ struct ocfs2_super
237 236
238 enum ocfs2_local_alloc_state local_alloc_state; 237 enum ocfs2_local_alloc_state local_alloc_state;
239 struct buffer_head *local_alloc_bh; 238 struct buffer_head *local_alloc_bh;
239 u64 la_last_gd;
240 240
241 /* Next two fields are for local node slot recovery during 241 /* Next two fields are for local node slot recovery during
242 * mount. */ 242 * mount. */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 195523090c87..9d91e66f51a9 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode,
70 struct buffer_head *group_bh, 70 struct buffer_head *group_bh,
71 u32 bits_wanted, u32 min_bits, 71 u32 bits_wanted, u32 min_bits,
72 u16 *bit_off, u16 *bits_found); 72 u16 *bit_off, u16 *bits_found);
73static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
74 u32 bits_wanted,
75 u32 min_bits,
76 u16 *bit_off,
77 unsigned int *num_bits,
78 u64 *bg_blkno);
79static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, 73static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
80 struct ocfs2_alloc_context *ac, 74 struct ocfs2_alloc_context *ac,
81 u32 bits_wanted, 75 u32 bits_wanted,
@@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
85 u64 *bg_blkno); 79 u64 *bg_blkno);
86static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, 80static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
87 int nr); 81 int nr);
88static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
89 struct buffer_head *bg_bh,
90 unsigned int bits_wanted,
91 u16 *bit_off,
92 u16 *bits_found);
93static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, 82static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
94 struct inode *alloc_inode, 83 struct inode *alloc_inode,
95 struct ocfs2_group_desc *bg, 84 struct ocfs2_group_desc *bg,
@@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
143 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); 132 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
144} 133}
145 134
135/* somewhat more expensive than our other checks, so use sparingly. */
136static int ocfs2_check_group_descriptor(struct super_block *sb,
137 struct ocfs2_dinode *di,
138 struct ocfs2_group_desc *gd)
139{
140 unsigned int max_bits;
141
142 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
143 OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd);
144 return -EIO;
145 }
146
147 if (di->i_blkno != gd->bg_parent_dinode) {
148 ocfs2_error(sb, "Group descriptor # %llu has bad parent "
149 "pointer (%llu, expected %llu)",
150 (unsigned long long)le64_to_cpu(gd->bg_blkno),
151 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
152 (unsigned long long)le64_to_cpu(di->i_blkno));
153 return -EIO;
154 }
155
156 max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
157 if (le16_to_cpu(gd->bg_bits) > max_bits) {
158 ocfs2_error(sb, "Group descriptor # %llu has bit count of %u",
159 (unsigned long long)le64_to_cpu(gd->bg_blkno),
160 le16_to_cpu(gd->bg_bits));
161 return -EIO;
162 }
163
164 if (le16_to_cpu(gd->bg_chain) >=
165 le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
166 ocfs2_error(sb, "Group descriptor # %llu has bad chain %u",
167 (unsigned long long)le64_to_cpu(gd->bg_blkno),
168 le16_to_cpu(gd->bg_chain));
169 return -EIO;
170 }
171
172 if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
173 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
174 "claims that %u are free",
175 (unsigned long long)le64_to_cpu(gd->bg_blkno),
176 le16_to_cpu(gd->bg_bits),
177 le16_to_cpu(gd->bg_free_bits_count));
178 return -EIO;
179 }
180
181 if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
182 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
183 "max bitmap bits of %u",
184 (unsigned long long)le64_to_cpu(gd->bg_blkno),
185 le16_to_cpu(gd->bg_bits),
186 8 * le16_to_cpu(gd->bg_size));
187 return -EIO;
188 }
189
190 return 0;
191}
192
146static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, 193static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
147 struct inode *alloc_inode, 194 struct inode *alloc_inode,
148 struct buffer_head *bg_bh, 195 struct buffer_head *bg_bh,
@@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
663static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, 710static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
664 struct buffer_head *bg_bh, 711 struct buffer_head *bg_bh,
665 unsigned int bits_wanted, 712 unsigned int bits_wanted,
713 unsigned int total_bits,
666 u16 *bit_off, 714 u16 *bit_off,
667 u16 *bits_found) 715 u16 *bits_found)
668{ 716{
@@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
679 found = start = best_offset = best_size = 0; 727 found = start = best_offset = best_size = 0;
680 bitmap = bg->bg_bitmap; 728 bitmap = bg->bg_bitmap;
681 729
682 while((offset = ocfs2_find_next_zero_bit(bitmap, 730 while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
683 le16_to_cpu(bg->bg_bits), 731 if (offset == total_bits)
684 start)) != -1) {
685 if (offset == le16_to_cpu(bg->bg_bits))
686 break; 732 break;
687 733
688 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { 734 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
@@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode,
911{ 957{
912 int search = -ENOSPC; 958 int search = -ENOSPC;
913 int ret; 959 int ret;
914 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; 960 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
915 u16 tmp_off, tmp_found; 961 u16 tmp_off, tmp_found;
962 unsigned int max_bits, gd_cluster_off;
916 963
917 BUG_ON(!ocfs2_is_cluster_bitmap(inode)); 964 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
918 965
919 if (bg->bg_free_bits_count) { 966 if (gd->bg_free_bits_count) {
967 max_bits = le16_to_cpu(gd->bg_bits);
968
969 /* Tail groups in cluster bitmaps which aren't cpg
970 * aligned are prone to partial extention by a failed
971 * fs resize. If the file system resize never got to
972 * update the dinode cluster count, then we don't want
973 * to trust any clusters past it, regardless of what
974 * the group descriptor says. */
975 gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
976 le64_to_cpu(gd->bg_blkno));
977 if ((gd_cluster_off + max_bits) >
978 OCFS2_I(inode)->ip_clusters) {
979 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
980 mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
981 (unsigned long long)le64_to_cpu(gd->bg_blkno),
982 le16_to_cpu(gd->bg_bits),
983 OCFS2_I(inode)->ip_clusters, max_bits);
984 }
985
920 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), 986 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
921 group_bh, bits_wanted, 987 group_bh, bits_wanted,
988 max_bits,
922 &tmp_off, &tmp_found); 989 &tmp_off, &tmp_found);
923 if (ret) 990 if (ret)
924 return ret; 991 return ret;
@@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode,
951 if (bg->bg_free_bits_count) 1018 if (bg->bg_free_bits_count)
952 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), 1019 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
953 group_bh, bits_wanted, 1020 group_bh, bits_wanted,
1021 le16_to_cpu(bg->bg_bits),
954 bit_off, bits_found); 1022 bit_off, bits_found);
955 1023
956 return ret; 1024 return ret;
957} 1025}
958 1026
1027static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
1028 struct ocfs2_journal_handle *handle,
1029 struct buffer_head *di_bh,
1030 u32 num_bits,
1031 u16 chain)
1032{
1033 int ret;
1034 u32 tmp_used;
1035 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
1036 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
1037
1038 ret = ocfs2_journal_access(handle, inode, di_bh,
1039 OCFS2_JOURNAL_ACCESS_WRITE);
1040 if (ret < 0) {
1041 mlog_errno(ret);
1042 goto out;
1043 }
1044
1045 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
1046 di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
1047 le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
1048
1049 ret = ocfs2_journal_dirty(handle, di_bh);
1050 if (ret < 0)
1051 mlog_errno(ret);
1052
1053out:
1054 return ret;
1055}
1056
1057static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1058 u32 bits_wanted,
1059 u32 min_bits,
1060 u16 *bit_off,
1061 unsigned int *num_bits,
1062 u64 gd_blkno,
1063 u16 *bits_left)
1064{
1065 int ret;
1066 u16 found;
1067 struct buffer_head *group_bh = NULL;
1068 struct ocfs2_group_desc *gd;
1069 struct inode *alloc_inode = ac->ac_inode;
1070 struct ocfs2_journal_handle *handle = ac->ac_handle;
1071
1072 ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
1073 &group_bh, OCFS2_BH_CACHED, alloc_inode);
1074 if (ret < 0) {
1075 mlog_errno(ret);
1076 return ret;
1077 }
1078
1079 gd = (struct ocfs2_group_desc *) group_bh->b_data;
1080 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
1081 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd);
1082 ret = -EIO;
1083 goto out;
1084 }
1085
1086 ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
1087 bit_off, &found);
1088 if (ret < 0) {
1089 if (ret != -ENOSPC)
1090 mlog_errno(ret);
1091 goto out;
1092 }
1093
1094 *num_bits = found;
1095
1096 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1097 *num_bits,
1098 le16_to_cpu(gd->bg_chain));
1099 if (ret < 0) {
1100 mlog_errno(ret);
1101 goto out;
1102 }
1103
1104 ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
1105 *bit_off, *num_bits);
1106 if (ret < 0)
1107 mlog_errno(ret);
1108
1109 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1110
1111out:
1112 brelse(group_bh);
1113
1114 return ret;
1115}
1116
959static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, 1117static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
960 u32 bits_wanted, 1118 u32 bits_wanted,
961 u32 min_bits, 1119 u32 min_bits,
962 u16 *bit_off, 1120 u16 *bit_off,
963 unsigned int *num_bits, 1121 unsigned int *num_bits,
964 u64 *bg_blkno) 1122 u64 *bg_blkno,
1123 u16 *bits_left)
965{ 1124{
966 int status; 1125 int status;
967 u16 chain, tmp_bits; 1126 u16 chain, tmp_bits;
@@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
988 goto bail; 1147 goto bail;
989 } 1148 }
990 bg = (struct ocfs2_group_desc *) group_bh->b_data; 1149 bg = (struct ocfs2_group_desc *) group_bh->b_data;
991 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { 1150 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
992 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); 1151 if (status) {
993 status = -EIO; 1152 mlog_errno(status);
994 goto bail; 1153 goto bail;
995 } 1154 }
996 1155
@@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1018 goto bail; 1177 goto bail;
1019 } 1178 }
1020 bg = (struct ocfs2_group_desc *) group_bh->b_data; 1179 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1021 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { 1180 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
1022 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); 1181 if (status) {
1023 status = -EIO; 1182 mlog_errno(status);
1024 goto bail; 1183 goto bail;
1025 } 1184 }
1026 } 1185 }
@@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1099 (unsigned long long)fe->i_blkno); 1258 (unsigned long long)fe->i_blkno);
1100 1259
1101 *bg_blkno = le64_to_cpu(bg->bg_blkno); 1260 *bg_blkno = le64_to_cpu(bg->bg_blkno);
1261 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1102bail: 1262bail:
1103 if (group_bh) 1263 if (group_bh)
1104 brelse(group_bh); 1264 brelse(group_bh);
@@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1120{ 1280{
1121 int status; 1281 int status;
1122 u16 victim, i; 1282 u16 victim, i;
1283 u16 bits_left = 0;
1284 u64 hint_blkno = ac->ac_last_group;
1123 struct ocfs2_chain_list *cl; 1285 struct ocfs2_chain_list *cl;
1124 struct ocfs2_dinode *fe; 1286 struct ocfs2_dinode *fe;
1125 1287
@@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1146 goto bail; 1308 goto bail;
1147 } 1309 }
1148 1310
1311 if (hint_blkno) {
1312 /* Attempt to short-circuit the usual search mechanism
1313 * by jumping straight to the most recently used
1314 * allocation group. This helps us mantain some
1315 * contiguousness across allocations. */
1316 status = ocfs2_search_one_group(ac, bits_wanted, min_bits,
1317 bit_off, num_bits,
1318 hint_blkno, &bits_left);
1319 if (!status) {
1320 /* Be careful to update *bg_blkno here as the
1321 * caller is expecting it to be filled in, and
1322 * ocfs2_search_one_group() won't do that for
1323 * us. */
1324 *bg_blkno = hint_blkno;
1325 goto set_hint;
1326 }
1327 if (status < 0 && status != -ENOSPC) {
1328 mlog_errno(status);
1329 goto bail;
1330 }
1331 }
1332
1149 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; 1333 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1150 1334
1151 victim = ocfs2_find_victim_chain(cl); 1335 victim = ocfs2_find_victim_chain(cl);
@@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1153 ac->ac_allow_chain_relink = 1; 1337 ac->ac_allow_chain_relink = 1;
1154 1338
1155 status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, 1339 status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
1156 num_bits, bg_blkno); 1340 num_bits, bg_blkno, &bits_left);
1157 if (!status) 1341 if (!status)
1158 goto bail; 1342 goto set_hint;
1159 if (status < 0 && status != -ENOSPC) { 1343 if (status < 0 && status != -ENOSPC) {
1160 mlog_errno(status); 1344 mlog_errno(status);
1161 goto bail; 1345 goto bail;
@@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1177 1361
1178 ac->ac_chain = i; 1362 ac->ac_chain = i;
1179 status = ocfs2_search_chain(ac, bits_wanted, min_bits, 1363 status = ocfs2_search_chain(ac, bits_wanted, min_bits,
1180 bit_off, num_bits, 1364 bit_off, num_bits, bg_blkno,
1181 bg_blkno); 1365 &bits_left);
1182 if (!status) 1366 if (!status)
1183 break; 1367 break;
1184 if (status < 0 && status != -ENOSPC) { 1368 if (status < 0 && status != -ENOSPC) {
@@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1186 goto bail; 1370 goto bail;
1187 } 1371 }
1188 } 1372 }
1189bail:
1190 1373
1374set_hint:
1375 if (status != -ENOSPC) {
1376 /* If the next search of this group is not likely to
1377 * yield a suitable extent, then we reset the last
1378 * group hint so as to not waste a disk read */
1379 if (bits_left < min_bits)
1380 ac->ac_last_group = 0;
1381 else
1382 ac->ac_last_group = *bg_blkno;
1383 }
1384
1385bail:
1191 mlog_exit(status); 1386 mlog_exit(status);
1192 return status; 1387 return status;
1193} 1388}
@@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
1341{ 1536{
1342 int status; 1537 int status;
1343 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; 1538 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
1344 u64 bg_blkno; 1539 u64 bg_blkno = 0;
1345 u16 bg_bit_off; 1540 u16 bg_bit_off;
1346 1541
1347 mlog_entry_void(); 1542 mlog_entry_void();
@@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
1494 } 1689 }
1495 1690
1496 group = (struct ocfs2_group_desc *) group_bh->b_data; 1691 group = (struct ocfs2_group_desc *) group_bh->b_data;
1497 if (!OCFS2_IS_VALID_GROUP_DESC(group)) { 1692 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
1498 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); 1693 if (status) {
1499 status = -EIO; 1694 mlog_errno(status);
1500 goto bail; 1695 goto bail;
1501 } 1696 }
1502 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); 1697 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a76c82a7ceac..c787838d1052 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -49,6 +49,8 @@ struct ocfs2_alloc_context {
49 u16 ac_chain; 49 u16 ac_chain;
50 int ac_allow_chain_relink; 50 int ac_allow_chain_relink;
51 group_search_t *ac_group_search; 51 group_search_t *ac_group_search;
52
53 u64 ac_last_group;
52}; 54};
53 55
54void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); 56void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 382706a67ffd..d17e33e66a1e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1442,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb,
1442 1442
1443 osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; 1443 osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
1444 1444
1445 /* We don't have a cluster lock on the bitmap here because
1446 * we're only interested in static information and the extra
1447 * complexity at mount time isn't worht it. Don't pass the
1448 * inode in to the read function though as we don't want it to
1449 * be put in the cache. */
1445 status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, 1450 status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0,
1446 inode); 1451 NULL);
1447 iput(inode); 1452 iput(inode);
1448 if (status < 0) { 1453 if (status < 0) {
1449 mlog_errno(status); 1454 mlog_errno(status);
@@ -1452,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
1452 1457
1453 di = (struct ocfs2_dinode *) bitmap_bh->b_data; 1458 di = (struct ocfs2_dinode *) bitmap_bh->b_data;
1454 osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); 1459 osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
1455 osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total);
1456 brelse(bitmap_bh); 1460 brelse(bitmap_bh);
1457 mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", 1461 mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n",
1458 (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); 1462 (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg);
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index abe91ca03edf..0a5927c806ca 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); 74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
75 for (i = 0; i < 8; i++, p++) { 75 for (i = 0; i < 8; i++, p++) {
76 unsigned long st_sector; 76 unsigned long st_sector;
77 int num_sectors; 77 unsigned int num_sectors;
78 78
79 st_sector = be32_to_cpu(p->start_cylinder) * spc; 79 st_sector = be32_to_cpu(p->start_cylinder) * spc;
80 num_sectors = be32_to_cpu(p->num_sectors); 80 num_sectors = be32_to_cpu(p->num_sectors);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 9f2cfc30f9cf..942156225447 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
169 "Mapped: %8lu kB\n" 169 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n" 170 "Slab: %8lu kB\n"
171 "PageTables: %8lu kB\n" 171 "PageTables: %8lu kB\n"
172 "NFS Unstable: %8lu kB\n" 172 "NFS_Unstable: %8lu kB\n"
173 "Bounce: %8lu kB\n" 173 "Bounce: %8lu kB\n"
174 "CommitLimit: %8lu kB\n" 174 "CommitLimit: %8lu kB\n"
175 "Committed_AS: %8lu kB\n" 175 "Committed_AS: %8lu kB\n"
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 39fedaa88a0c..d935fb9394e3 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
424 int res = -ENOTDIR; 424 int res = -ENOTDIR;
425 if (!file->f_op || !file->f_op->readdir) 425 if (!file->f_op || !file->f_op->readdir)
426 goto out; 426 goto out;
427 mutex_lock(&inode->i_mutex); 427 mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR);
428// down(&inode->i_zombie); 428// down(&inode->i_zombie);
429 res = -ENOENT; 429 res = -ENOENT;
430 if (!IS_DEADDIR(inode)) { 430 if (!IS_DEADDIR(inode)) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4df822c881b6..fcce1a21a51b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); 115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
116 if (!ei) 116 if (!ei)
117 return NULL; 117 return NULL;
118
119 ei->i_unique = 0;
120 ei->i_lenExtents = 0;
121 ei->i_next_alloc_block = 0;
122 ei->i_next_alloc_goal = 0;
123 ei->i_strat4096 = 0;
124
118 return &ei->vfs_inode; 125 return &ei->vfs_inode;
119} 126}
120 127
@@ -1652,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1652 iput(inode); 1659 iput(inode);
1653 goto error_out; 1660 goto error_out;
1654 } 1661 }
1655 sb->s_maxbytes = MAX_LFS_FILESIZE; 1662 sb->s_maxbytes = 1<<30;
1656 return 0; 1663 return 0;
1657 1664
1658error_out: 1665error_out:
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index e1b0e8cfecb4..0abd66ce36ea 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode)
239 { 239 {
240 if (offset) 240 if (offset)
241 { 241 {
242 extoffset -= adsize; 242 /*
243 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); 243 * OK, there is not extent covering inode->i_size and
244 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 244 * no extent above inode->i_size => truncate is
245 { 245 * extending the file by 'offset'.
246 extoffset -= adsize; 246 */
247 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); 247 if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) ||
248 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); 248 (bh && extoffset == sizeof(struct allocExtDesc))) {
249 /* File has no extents at all! */
250 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
251 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
252 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
249 } 253 }
250 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 254 else {
251 {
252 kernel_lb_addr neloc = { 0, 0 };
253 extoffset -= adsize; 255 extoffset -= adsize;
254 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | 256 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1);
255 ((elen + offset + inode->i_sb->s_blocksize - 1) & 257 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
256 ~(inode->i_sb->s_blocksize - 1)); 258 {
257 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); 259 extoffset -= adsize;
258 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); 260 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset);
259 } 261 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0);
260 else 262 }
261 { 263 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
262 if (elen & (inode->i_sb->s_blocksize - 1))
263 { 264 {
265 kernel_lb_addr neloc = { 0, 0 };
264 extoffset -= adsize; 266 extoffset -= adsize;
265 elen = EXT_RECORDED_ALLOCATED | 267 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED |
266 ((elen + inode->i_sb->s_blocksize - 1) & 268 ((elen + offset + inode->i_sb->s_blocksize - 1) &
267 ~(inode->i_sb->s_blocksize - 1)); 269 ~(inode->i_sb->s_blocksize - 1));
268 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); 270 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
271 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1);
272 }
273 else
274 {
275 if (elen & (inode->i_sb->s_blocksize - 1))
276 {
277 extoffset -= adsize;
278 elen = EXT_RECORDED_ALLOCATED |
279 ((elen + inode->i_sb->s_blocksize - 1) &
280 ~(inode->i_sb->s_blocksize - 1));
281 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1);
282 }
283 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
284 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
285 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
269 } 286 }
270 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
271 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
272 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
273 } 287 }
274 } 288 }
275 } 289 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index e7c8615beb65..30c6e8a9446c 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
169 169
170static struct buffer_head * 170static struct buffer_head *
171ufs_clear_frags(struct inode *inode, sector_t beg, 171ufs_clear_frags(struct inode *inode, sector_t beg,
172 unsigned int n) 172 unsigned int n, sector_t want)
173{ 173{
174 struct buffer_head *res, *bh; 174 struct buffer_head *res = NULL, *bh;
175 sector_t end = beg + n; 175 sector_t end = beg + n;
176 176
177 res = sb_getblk(inode->i_sb, beg); 177 for (; beg < end; ++beg) {
178 ufs_clear_frag(inode, res);
179 for (++beg; beg < end; ++beg) {
180 bh = sb_getblk(inode->i_sb, beg); 178 bh = sb_getblk(inode->i_sb, beg);
181 ufs_clear_frag(inode, bh); 179 ufs_clear_frag(inode, bh);
182 brelse(bh); 180 if (want != beg)
181 brelse(bh);
182 else
183 res = bh;
183 } 184 }
185 BUG_ON(!res);
184 return res; 186 return res;
185} 187}
186 188
@@ -265,7 +267,9 @@ repeat:
265 lastfrag = ufsi->i_lastfrag; 267 lastfrag = ufsi->i_lastfrag;
266 268
267 } 269 }
268 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 270 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]);
271 if (tmp)
272 goal = tmp + uspi->s_fpb;
269 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 273 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
270 goal, required + blockoff, 274 goal, required + blockoff,
271 err, locked_page); 275 err, locked_page);
@@ -277,13 +281,15 @@ repeat:
277 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 281 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
278 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 282 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
279 err, locked_page); 283 err, locked_page);
280 } 284 } else /* (lastblock > block) */ {
281 /* 285 /*
282 * We will allocate new block before last allocated block 286 * We will allocate new block before last allocated block
283 */ 287 */
284 else /* (lastblock > block) */ { 288 if (block) {
285 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 289 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]);
286 goal = tmp + uspi->s_fpb; 290 if (tmp)
291 goal = tmp + uspi->s_fpb;
292 }
287 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 293 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
288 goal, uspi->s_fpb, err, locked_page); 294 goal, uspi->s_fpb, err, locked_page);
289 } 295 }
@@ -296,7 +302,7 @@ repeat:
296 } 302 }
297 303
298 if (!phys) { 304 if (!phys) {
299 result = ufs_clear_frags(inode, tmp + blockoff, required); 305 result = ufs_clear_frags(inode, tmp, required, tmp + blockoff);
300 } else { 306 } else {
301 *phys = tmp + blockoff; 307 *phys = tmp + blockoff;
302 result = NULL; 308 result = NULL;
@@ -383,7 +389,7 @@ repeat:
383 } 389 }
384 } 390 }
385 391
386 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) 392 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1])))
387 goal = tmp + uspi->s_fpb; 393 goal = tmp + uspi->s_fpb;
388 else 394 else
389 goal = bh->b_blocknr + uspi->s_fpb; 395 goal = bh->b_blocknr + uspi->s_fpb;
@@ -397,7 +403,8 @@ repeat:
397 403
398 404
399 if (!phys) { 405 if (!phys) {
400 result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); 406 result = ufs_clear_frags(inode, tmp, uspi->s_fpb,
407 tmp + blockoff);
401 } else { 408 } else {
402 *phys = tmp + blockoff; 409 *phys = tmp + blockoff;
403 *new = 1; 410 *new = 1;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index c9b55872079b..ea11d04c41a0 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode)
375 int err = 0; 375 int err = 0;
376 struct address_space *mapping = inode->i_mapping; 376 struct address_space *mapping = inode->i_mapping;
377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
378 struct ufs_inode_info *ufsi = UFS_I(inode);
379 unsigned lastfrag, i, end; 378 unsigned lastfrag, i, end;
380 struct page *lastpage; 379 struct page *lastpage;
381 struct buffer_head *bh; 380 struct buffer_head *bh;
382 381
383 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; 382 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
384 383
385 if (!lastfrag) { 384 if (!lastfrag)
386 ufsi->i_lastfrag = 0;
387 goto out; 385 goto out;
388 } 386
389 lastfrag--; 387 lastfrag--;
390 388
391 lastpage = ufs_get_locked_page(mapping, lastfrag >> 389 lastpage = ufs_get_locked_page(mapping, lastfrag >>
@@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode)
400 for (i = 0; i < end; ++i) 398 for (i = 0; i < end; ++i)
401 bh = bh->b_this_page; 399 bh = bh->b_this_page;
402 400
403 if (!buffer_mapped(bh)) { 401
404 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 402 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
405 403
406 if (unlikely(err)) 404 if (unlikely(err))
407 goto out_unlock; 405 goto out_unlock;
408 406
409 if (buffer_new(bh)) { 407 if (buffer_new(bh)) {
410 clear_buffer_new(bh); 408 clear_buffer_new(bh);
411 unmap_underlying_metadata(bh->b_bdev, 409 unmap_underlying_metadata(bh->b_bdev,
412 bh->b_blocknr); 410 bh->b_blocknr);
413 /* 411 /*
414 * we do not zeroize fragment, because of 412 * we do not zeroize fragment, because of
415 * if it maped to hole, it already contains zeroes 413 * if it maped to hole, it already contains zeroes
416 */ 414 */
417 set_buffer_uptodate(bh); 415 set_buffer_uptodate(bh);
418 mark_buffer_dirty(bh); 416 mark_buffer_dirty(bh);
419 set_page_dirty(lastpage); 417 set_page_dirty(lastpage);
420 }
421 } 418 }
419
422out_unlock: 420out_unlock:
423 ufs_put_locked_page(lastpage); 421 ufs_put_locked_page(lastpage);
424out: 422out:
@@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
440 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 438 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
441 return -EPERM; 439 return -EPERM;
442 440
443 if (inode->i_size > old_i_size) { 441 err = ufs_alloc_lastblock(inode);
444 /*
445 * if we expand file we should care about
446 * allocation of block for last byte first of all
447 */
448 err = ufs_alloc_lastblock(inode);
449 442
450 if (err) { 443 if (err) {
451 i_size_write(inode, old_i_size); 444 i_size_write(inode, old_i_size);
452 goto out; 445 goto out;
453 }
454 /*
455 * go away, because of we expand file, and we do not
456 * need free blocks, and zeroizes page
457 */
458 lock_kernel();
459 goto almost_end;
460 } 446 }
461 447
462 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); 448 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
@@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
477 yield(); 463 yield();
478 } 464 }
479 465
480 if (inode->i_size < old_i_size) {
481 /*
482 * now we should have enough space
483 * to allocate block for last byte
484 */
485 err = ufs_alloc_lastblock(inode);
486 if (err)
487 /*
488 * looks like all the same - we have no space,
489 * but we truncate file already
490 */
491 inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize;
492 }
493almost_end:
494 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 466 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
467 ufsi->i_lastfrag = DIRECT_FRAGMENT;
495 unlock_kernel(); 468 unlock_kernel();
496 mark_inode_dirty(inode); 469 mark_inode_dirty(inode);
497out: 470out:
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index eef6763f3a67..d2bbcd882a69 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist(
1835 &agbp))) 1835 &agbp)))
1836 return error; 1836 return error;
1837 if (!pag->pagf_init) { 1837 if (!pag->pagf_init) {
1838 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1839 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1838 args->agbp = NULL; 1840 args->agbp = NULL;
1839 return 0; 1841 return 0;
1840 } 1842 }
1841 } else 1843 } else
1842 agbp = NULL; 1844 agbp = NULL;
1843 1845
1844 /* If this is a metadata preferred pag and we are user data 1846 /*
1847 * If this is a metadata preferred pag and we are user data
1845 * then try somewhere else if we are not being asked to 1848 * then try somewhere else if we are not being asked to
1846 * try harder at this point 1849 * try harder at this point
1847 */ 1850 */
1848 if (pag->pagf_metadata && args->userdata && flags) { 1851 if (pag->pagf_metadata && args->userdata &&
1852 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1853 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1849 args->agbp = NULL; 1854 args->agbp = NULL;
1850 return 0; 1855 return 0;
1851 } 1856 }
1852 1857
1853 need = XFS_MIN_FREELIST_PAG(pag, mp); 1858 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1854 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; 1859 need = XFS_MIN_FREELIST_PAG(pag, mp);
1855 /* 1860 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
1856 * If it looks like there isn't a long enough extent, or enough 1861 /*
1857 * total blocks, reject it. 1862 * If it looks like there isn't a long enough extent, or enough
1858 */ 1863 * total blocks, reject it.
1859 longest = (pag->pagf_longest > delta) ? 1864 */
1860 (pag->pagf_longest - delta) : 1865 longest = (pag->pagf_longest > delta) ?
1861 (pag->pagf_flcount > 0 || pag->pagf_longest > 0); 1866 (pag->pagf_longest - delta) :
1862 if (args->minlen + args->alignment + args->minalignslop - 1 > longest || 1867 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
1863 (!(flags & XFS_ALLOC_FLAG_FREEING) && 1868 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1864 (int)(pag->pagf_freeblks + pag->pagf_flcount - 1869 longest ||
1865 need - args->total) < 1870 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1866 (int)args->minleft)) { 1871 need - args->total) < (int)args->minleft)) {
1867 if (agbp) 1872 if (agbp)
1868 xfs_trans_brelse(tp, agbp); 1873 xfs_trans_brelse(tp, agbp);
1869 args->agbp = NULL; 1874 args->agbp = NULL;
1870 return 0; 1875 return 0;
1876 }
1871 } 1877 }
1878
1872 /* 1879 /*
1873 * Get the a.g. freespace buffer. 1880 * Get the a.g. freespace buffer.
1874 * Can fail if we're not blocking on locks, and it's held. 1881 * Can fail if we're not blocking on locks, and it's held.
@@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist(
1878 &agbp))) 1885 &agbp)))
1879 return error; 1886 return error;
1880 if (agbp == NULL) { 1887 if (agbp == NULL) {
1888 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1889 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1881 args->agbp = NULL; 1890 args->agbp = NULL;
1882 return 0; 1891 return 0;
1883 } 1892 }
@@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist(
1887 */ 1896 */
1888 agf = XFS_BUF_TO_AGF(agbp); 1897 agf = XFS_BUF_TO_AGF(agbp);
1889 need = XFS_MIN_FREELIST(agf, mp); 1898 need = XFS_MIN_FREELIST(agf, mp);
1890 delta = need > be32_to_cpu(agf->agf_flcount) ?
1891 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1892 /* 1899 /*
1893 * If there isn't enough total or single-extent, reject it. 1900 * If there isn't enough total or single-extent, reject it.
1894 */ 1901 */
1895 longest = be32_to_cpu(agf->agf_longest); 1902 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1896 longest = (longest > delta) ? (longest - delta) : 1903 delta = need > be32_to_cpu(agf->agf_flcount) ?
1897 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); 1904 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1898 if (args->minlen + args->alignment + args->minalignslop - 1 > longest || 1905 longest = be32_to_cpu(agf->agf_longest);
1899 (!(flags & XFS_ALLOC_FLAG_FREEING) && 1906 longest = (longest > delta) ? (longest - delta) :
1900 (int)(be32_to_cpu(agf->agf_freeblks) + 1907 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1901 be32_to_cpu(agf->agf_flcount) - need - args->total) < 1908 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1902 (int)args->minleft)) { 1909 longest ||
1903 xfs_trans_brelse(tp, agbp); 1910 ((int)(be32_to_cpu(agf->agf_freeblks) +
1904 args->agbp = NULL; 1911 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1905 return 0; 1912 (int)args->minleft)) {
1913 xfs_trans_brelse(tp, agbp);
1914 args->agbp = NULL;
1915 return 0;
1916 }
1906 } 1917 }
1907 /* 1918 /*
1908 * Make the freelist shorter if it's too long. 1919 * Make the freelist shorter if it's too long.
@@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist(
1950 * on a completely full ag. 1961 * on a completely full ag.
1951 */ 1962 */
1952 if (targs.agbno == NULLAGBLOCK) { 1963 if (targs.agbno == NULLAGBLOCK) {
1953 if (!(flags & XFS_ALLOC_FLAG_FREEING)) { 1964 if (flags & XFS_ALLOC_FLAG_FREEING)
1954 xfs_trans_brelse(tp, agflbp); 1965 break;
1955 args->agbp = NULL; 1966 xfs_trans_brelse(tp, agflbp);
1956 return 0; 1967 args->agbp = NULL;
1957 } 1968 return 0;
1958 break;
1959 } 1969 }
1960 /* 1970 /*
1961 * Put each allocated block on the list. 1971 * Put each allocated block on the list.
@@ -2442,31 +2452,26 @@ xfs_free_extent(
2442 xfs_fsblock_t bno, /* starting block number of extent */ 2452 xfs_fsblock_t bno, /* starting block number of extent */
2443 xfs_extlen_t len) /* length of extent */ 2453 xfs_extlen_t len) /* length of extent */
2444{ 2454{
2445#ifdef DEBUG 2455 xfs_alloc_arg_t args;
2446 xfs_agf_t *agf; /* a.g. freespace header */
2447#endif
2448 xfs_alloc_arg_t args; /* allocation argument structure */
2449 int error; 2456 int error;
2450 2457
2451 ASSERT(len != 0); 2458 ASSERT(len != 0);
2459 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2452 args.tp = tp; 2460 args.tp = tp;
2453 args.mp = tp->t_mountp; 2461 args.mp = tp->t_mountp;
2454 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2462 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2455 ASSERT(args.agno < args.mp->m_sb.sb_agcount); 2463 ASSERT(args.agno < args.mp->m_sb.sb_agcount);
2456 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2464 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2457 args.alignment = 1;
2458 args.minlen = args.minleft = args.minalignslop = 0;
2459 down_read(&args.mp->m_peraglock); 2465 down_read(&args.mp->m_peraglock);
2460 args.pag = &args.mp->m_perag[args.agno]; 2466 args.pag = &args.mp->m_perag[args.agno];
2461 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) 2467 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
2462 goto error0; 2468 goto error0;
2463#ifdef DEBUG 2469#ifdef DEBUG
2464 ASSERT(args.agbp != NULL); 2470 ASSERT(args.agbp != NULL);
2465 agf = XFS_BUF_TO_AGF(args.agbp); 2471 ASSERT((args.agbno + len) <=
2466 ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); 2472 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
2467#endif 2473#endif
2468 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, 2474 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2469 len, 0);
2470error0: 2475error0:
2471 up_read(&args.mp->m_peraglock); 2476 up_read(&args.mp->m_peraglock);
2472 return error; 2477 return error;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3a6137539064..bf46fae303af 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4993,7 +4993,7 @@ xfs_bmapi(
4993 bma.firstblock = *firstblock; 4993 bma.firstblock = *firstblock;
4994 bma.alen = alen; 4994 bma.alen = alen;
4995 bma.off = aoff; 4995 bma.off = aoff;
4996 bma.conv = (flags & XFS_BMAPI_CONVERT); 4996 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4997 bma.wasdel = wasdelay; 4997 bma.wasdel = wasdelay;
4998 bma.minlen = minlen; 4998 bma.minlen = minlen;
4999 bma.low = flist->xbf_low; 4999 bma.low = flist->xbf_low;