aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/adfs/map.c2
-rw-r--r--fs/afs/cache.c12
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/attr.c2
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/befs/ChangeLog10
-rw-r--r--fs/befs/befs_fs_types.h2
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/free-space-cache.c84
-rw-r--r--fs/btrfs/inode.c31
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/root-tree.c18
-rw-r--r--fs/btrfs/super.c19
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/cifs/AUTHORS2
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/recover.c2
-rw-r--r--fs/ecryptfs/main.c4
-rw-r--r--fs/eventpoll.c8
-rw-r--r--fs/exofs/common.h4
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/inode.c8
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/inode.c8
-rw-r--r--fs/ext3/resize.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/extents.c10
-rw-r--r--fs/ext4/fsync.c19
-rw-r--r--fs/ext4/inode.c55
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/super.c78
-rw-r--r--fs/freevxfs/vxfs_fshead.c2
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/freevxfs/vxfs_olt.h2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/inode.c9
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c7
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/jffs2/TODO2
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c4
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/jfs/jfs_extent.c6
-rw-r--r--fs/jfs/jfs_imap.c14
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.h2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/logfs/dev_mtd.c2
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/namespace.c58
-rw-r--r--fs/nfs/nfs4filelayout.h2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs_common/nfsacl.c2
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nfsd/nfs4state.c13
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nilfs2/file.c11
-rw-r--r--fs/nilfs2/nilfs.h14
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c3
-rw-r--r--fs/notify/inotify/inotify_user.c39
-rw-r--r--fs/notify/mark.c2
-rw-r--r--fs/ntfs/attrib.c4
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/inode.c4
-rw-r--r--fs/ntfs/layout.h12
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/logfile.h2
-rw-r--r--fs/ntfs/mft.c8
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/ocfs2/ocfs2_fs.h4
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/reservations.h2
-rw-r--r--fs/ocfs2/stackglue.h2
-rw-r--r--fs/ocfs2/suballoc.c4
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/xattr.c4
-rw-r--r--fs/partitions/check.c4
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/quota/dquot.c15
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/lock.c2
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/squashfs/cache.c4
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/commit.c2
-rw-r--r--fs/ubifs/debug.c63
-rw-r--r--fs/ubifs/lpt.c7
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/ubifs/xattr.c4
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/ufs/truncate.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c28
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_message.h24
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c129
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c230
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/quota/xfs_dquot.c2
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/quota/xfs_qm.h5
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c6
-rw-r--r--fs/xfs/xfs_alloc.c30
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c67
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--fs/xfs/xfs_log_priv.h3
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_trans_ail.c421
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_trans_priv.h22
-rw-r--r--fs/xfs/xfs_vnodeops.c4
182 files changed, 1142 insertions, 916 deletions
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index d1a5932bb0f1..6935f05202ac 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -51,7 +51,7 @@ static DEFINE_RWLOCK(adfs_map_lock);
51 51
52/* 52/*
53 * This is fun. We need to load up to 19 bits from the map at an 53 * This is fun. We need to load up to 19 bits from the map at an
54 * arbitary bit alignment. (We're limited to 19 bits by F+ version 2). 54 * arbitrary bit alignment. (We're limited to 19 bits by F+ version 2).
55 */ 55 */
56#define GET_FRAG_ID(_map,_start,_idmask) \ 56#define GET_FRAG_ID(_map,_start,_idmask) \
57 ({ \ 57 ({ \
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index 0fb315dd4d2a..577763c3d88b 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -98,7 +98,7 @@ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
98} 98}
99 99
100/* 100/*
101 * provide new auxilliary cache data 101 * provide new auxiliary cache data
102 */ 102 */
103static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, 103static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
104 void *buffer, uint16_t bufmax) 104 void *buffer, uint16_t bufmax)
@@ -117,7 +117,7 @@ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
117} 117}
118 118
119/* 119/*
120 * check that the auxilliary data indicates that the entry is still valid 120 * check that the auxiliary data indicates that the entry is still valid
121 */ 121 */
122static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, 122static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
123 const void *buffer, 123 const void *buffer,
@@ -150,7 +150,7 @@ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
150} 150}
151 151
152/* 152/*
153 * provide new auxilliary cache data 153 * provide new auxiliary cache data
154 */ 154 */
155static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, 155static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
156 void *buffer, uint16_t bufmax) 156 void *buffer, uint16_t bufmax)
@@ -172,7 +172,7 @@ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
172} 172}
173 173
174/* 174/*
175 * check that the auxilliary data indicates that the entry is still valid 175 * check that the auxiliary data indicates that the entry is still valid
176 */ 176 */
177static 177static
178enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data, 178enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data,
@@ -283,7 +283,7 @@ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
283} 283}
284 284
285/* 285/*
286 * provide new auxilliary cache data 286 * provide new auxiliary cache data
287 */ 287 */
288static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, 288static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
289 void *buffer, uint16_t bufmax) 289 void *buffer, uint16_t bufmax)
@@ -309,7 +309,7 @@ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
309} 309}
310 310
311/* 311/*
312 * check that the auxilliary data indicates that the entry is still valid 312 * check that the auxiliary data indicates that the entry is still valid
313 */ 313 */
314static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, 314static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
315 const void *buffer, 315 const void *buffer,
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 0d5eeadf6121..3c090b7555ea 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -293,7 +293,7 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
293 if (!cell) { 293 if (!cell) {
294 /* this should not happen unless user tries to mount 294 /* this should not happen unless user tries to mount
295 * when root cell is not set. Return an impossibly 295 * when root cell is not set. Return an impossibly
296 * bizzare errno to alert the user. Things like 296 * bizarre errno to alert the user. Things like
297 * ENOENT might be "more appropriate" but they happen 297 * ENOENT might be "more appropriate" but they happen
298 * for other reasons. 298 * for other reasons.
299 */ 299 */
diff --git a/fs/attr.c b/fs/attr.c
index 1007ed616314..91dbe2a107f2 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(inode_newsize_ok);
128 * setattr_copy must be called with i_mutex held. 128 * setattr_copy must be called with i_mutex held.
129 * 129 *
130 * setattr_copy updates the inode's metadata with that specified 130 * setattr_copy updates the inode's metadata with that specified
131 * in attr. Noticably missing is inode size update, which is more complex 131 * in attr. Noticeably missing is inode size update, which is more complex
132 * as it requires pagecache updates. 132 * as it requires pagecache updates.
133 * 133 *
134 * The inode is not marked as dirty after this operation. The rationale is 134 * The inode is not marked as dirty after this operation. The rationale is
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 96804a17bbd0..f55ae23b137e 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -612,7 +612,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
612 * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves 612 * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
613 * of the directory tree. There is no need to clear the automount flag 613 * of the directory tree. There is no need to clear the automount flag
614 * following a mount or restore it after an expire because these mounts 614 * following a mount or restore it after an expire because these mounts
615 * are always covered. However, it is neccessary to ensure that these 615 * are always covered. However, it is necessary to ensure that these
616 * flags are clear on non-empty directories to avoid unnecessary calls 616 * flags are clear on non-empty directories to avoid unnecessary calls
617 * during path walks. 617 * during path walks.
618 */ 618 */
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog
index ce8c787916be..75a461cfaca6 100644
--- a/fs/befs/ChangeLog
+++ b/fs/befs/ChangeLog
@@ -24,7 +24,7 @@ Version 0.9 (2002-03-14)
24 24
25Version 0.64 (2002-02-07) 25Version 0.64 (2002-02-07)
26========== 26==========
27* Did the string comparision really right this time (btree.c) [WD] 27* Did the string comparison really right this time (btree.c) [WD]
28 28
29* Fixed up some places where I assumed that a long int could hold 29* Fixed up some places where I assumed that a long int could hold
30 a pointer value. (btree.c) [WD] 30 a pointer value. (btree.c) [WD]
@@ -114,7 +114,7 @@ Version 0.6 (2001-12-15)
114 More flexible. Will soon be controllable at mount time 114 More flexible. Will soon be controllable at mount time
115 (see TODO). [WD] 115 (see TODO). [WD]
116 116
117* Rewrote datastream positon lookups. 117* Rewrote datastream position lookups.
118 (datastream.c) [WD] 118 (datastream.c) [WD]
119 119
120* Moved the TODO list to its own file. 120* Moved the TODO list to its own file.
@@ -150,7 +150,7 @@ Version 0.50 (2001-11-13)
150* Anton also told me that the blocksize is not allowed to be larger than 150* Anton also told me that the blocksize is not allowed to be larger than
151 the page size in linux, which is 4k i386. Oops. Added a test for 151 the page size in linux, which is 4k i386. Oops. Added a test for
152 (blocksize > PAGE_SIZE), and refuse to mount in that case. What this 152 (blocksize > PAGE_SIZE), and refuse to mount in that case. What this
153 practicaly means is that 8k blocksize volumes won't work without a major 153 practically means is that 8k blocksize volumes won't work without a major
154 restructuring of the driver (or an alpha or other 64bit hardware). [WD] 154 restructuring of the driver (or an alpha or other 64bit hardware). [WD]
155 155
156* Cleaned up the befs_count_blocks() function. Much smarter now. 156* Cleaned up the befs_count_blocks() function. Much smarter now.
@@ -183,7 +183,7 @@ Version 0.45 (2001-10-29)
183 structures into the generic pointer fields of the public structures 183 structures into the generic pointer fields of the public structures
184 with kmalloc(). put_super and put_inode free them. This allows us not 184 with kmalloc(). put_super and put_inode free them. This allows us not
185 to have to touch the definitions of the public structures in 185 to have to touch the definitions of the public structures in
186 include/linux/fs.h. Also, befs_inode_info is huge (becuase of the 186 include/linux/fs.h. Also, befs_inode_info is huge (because of the
187 symlink string). (super.c, inode.c, befs_fs.h) [WD] 187 symlink string). (super.c, inode.c, befs_fs.h) [WD]
188 188
189* Fixed a thinko that was corrupting file reads after the first block_run 189* Fixed a thinko that was corrupting file reads after the first block_run
@@ -404,7 +404,7 @@ Version 0.4 (2001-10-28)
404 404
405* Fixed compile errors on 2.4.1 kernel (WD) 405* Fixed compile errors on 2.4.1 kernel (WD)
406 Resolve rejected patches 406 Resolve rejected patches
407 Accomodate changed NLS interface (util.h) 407 Accommodate changed NLS interface (util.h)
408 Needed to include <linux/slab.h> in most files 408 Needed to include <linux/slab.h> in most files
409 Makefile changes 409 Makefile changes
410 fs/Config.in changes 410 fs/Config.in changes
diff --git a/fs/befs/befs_fs_types.h b/fs/befs/befs_fs_types.h
index 7893eaa1e58c..eb557d9dc8be 100644
--- a/fs/befs/befs_fs_types.h
+++ b/fs/befs/befs_fs_types.h
@@ -234,7 +234,7 @@ typedef struct {
234} PACKED befs_btree_super; 234} PACKED befs_btree_super;
235 235
236/* 236/*
237 * Header stucture of each btree node 237 * Header structure of each btree node
238 */ 238 */
239typedef struct { 239typedef struct {
240 fs64 left; 240 fs64 left;
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 4202db7496cb..a66c9b1136e0 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Licensed under the GNU GPL. See the file COPYING for details. 6 * Licensed under the GNU GPL. See the file COPYING for details.
7 * 7 *
8 * 2002-02-05: Sergey S. Kostyliov added binary search withing 8 * 2002-02-05: Sergey S. Kostyliov added binary search within
9 * btree nodes. 9 * btree nodes.
10 * 10 *
11 * Many thanks to: 11 * Many thanks to:
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 06457ed8f3e7..54b8c28bebc8 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -734,7 +734,7 @@ parse_options(char *options, befs_mount_options * opts)
734 734
735/* This function has the responsibiltiy of getting the 735/* This function has the responsibiltiy of getting the
736 * filesystem ready for unmounting. 736 * filesystem ready for unmounting.
737 * Basicly, we free everything that we allocated in 737 * Basically, we free everything that we allocated in
738 * befs_read_inode 738 * befs_read_inode
739 */ 739 */
740static void 740static void
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 811384bec8de..397d3057d336 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -717,7 +717,7 @@ static int load_flat_file(struct linux_binprm * bprm,
717 * help simplify all this mumbo jumbo 717 * help simplify all this mumbo jumbo
718 * 718 *
719 * We've got two different sections of relocation entries. 719 * We've got two different sections of relocation entries.
720 * The first is the GOT which resides at the begining of the data segment 720 * The first is the GOT which resides at the beginning of the data segment
721 * and is terminated with a -1. This one can be relocated in place. 721 * and is terminated with a -1. This one can be relocated in place.
722 * The second is the extra relocation entries tacked after the image's 722 * The second is the extra relocation entries tacked after the image's
723 * data segment. These require a little more processing as the entry is 723 * data segment. These require a little more processing as the entry is
diff --git a/fs/bio.c b/fs/bio.c
index 4d6d4b6c2bf1..840a0d755248 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1436,7 +1436,7 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
1436 * preferred way to end I/O on a bio, it takes care of clearing 1436 * preferred way to end I/O on a bio, it takes care of clearing
1437 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1437 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1438 * established -Exxxx (-EIO, for instance) error values in case 1438 * established -Exxxx (-EIO, for instance) error values in case
1439 * something went wrong. Noone should call bi_end_io() directly on a 1439 * something went wrong. No one should call bi_end_io() directly on a
1440 * bio unless they own it and thus know that it has an end_io 1440 * bio unless they own it and thus know that it has an end_io
1441 * function. 1441 * function.
1442 **/ 1442 **/
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c1511c674f53..5147bdd3b8e1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -653,7 +653,7 @@ void bd_forget(struct inode *inode)
653 * @whole: whole block device containing @bdev, may equal @bdev 653 * @whole: whole block device containing @bdev, may equal @bdev
654 * @holder: holder trying to claim @bdev 654 * @holder: holder trying to claim @bdev
655 * 655 *
656 * Test whther @bdev can be claimed by @holder. 656 * Test whether @bdev can be claimed by @holder.
657 * 657 *
658 * CONTEXT: 658 * CONTEXT:
659 * spin_lock(&bdev_lock). 659 * spin_lock(&bdev_lock).
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index d47ce8307854..3458b5725540 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1284,6 +1284,8 @@ struct btrfs_root {
1284#define BTRFS_INODE_DIRSYNC (1 << 10) 1284#define BTRFS_INODE_DIRSYNC (1 << 10)
1285#define BTRFS_INODE_COMPRESS (1 << 11) 1285#define BTRFS_INODE_COMPRESS (1 << 11)
1286 1286
1287#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
1288
1287/* some macros to generate set/get funcs for the struct fields. This 1289/* some macros to generate set/get funcs for the struct fields. This
1288 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1290 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
1289 * one for u8: 1291 * one for u8:
@@ -2359,6 +2361,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
2359int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 2361int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
2360int btrfs_set_root_node(struct btrfs_root_item *item, 2362int btrfs_set_root_node(struct btrfs_root_item *item,
2361 struct extent_buffer *node); 2363 struct extent_buffer *node);
2364void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
2365
2362/* dir-item.c */ 2366/* dir-item.c */
2363int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 2367int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
2364 struct btrfs_root *root, const char *name, 2368 struct btrfs_root *root, const char *name,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d7a7315bd031..8f1d44ba332f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1275,8 +1275,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1275 root->commit_root = btrfs_root_node(root); 1275 root->commit_root = btrfs_root_node(root);
1276 BUG_ON(!root->node); 1276 BUG_ON(!root->node);
1277out: 1277out:
1278 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) 1278 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1279 root->ref_cows = 1; 1279 root->ref_cows = 1;
1280 btrfs_check_and_init_root_item(&root->root_item);
1281 }
1280 1282
1281 return root; 1283 return root;
1282} 1284}
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2b6c12e983b3..a24a3f2fa13e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -243,7 +243,7 @@ out:
243 * Insert @em into @tree or perform a simple forward/backward merge with 243 * Insert @em into @tree or perform a simple forward/backward merge with
244 * existing mappings. The extent_map struct passed in will be inserted 244 * existing mappings. The extent_map struct passed in will be inserted
245 * into the tree directly, with an additional reference taken, or a 245 * into the tree directly, with an additional reference taken, or a
246 * reference dropped if the merge attempt was successfull. 246 * reference dropped if the merge attempt was successful.
247 */ 247 */
248int add_extent_mapping(struct extent_map_tree *tree, 248int add_extent_mapping(struct extent_map_tree *tree,
249 struct extent_map *em) 249 struct extent_map *em)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 656bc0a892b1..e621ea54a3fd 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -906,7 +906,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
906 unsigned long last_index; 906 unsigned long last_index;
907 size_t num_written = 0; 907 size_t num_written = 0;
908 int nrptrs; 908 int nrptrs;
909 int ret; 909 int ret = 0;
910 910
911 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 911 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
912 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 912 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0037427d8a9d..f561c953205b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -24,6 +24,7 @@
24#include "free-space-cache.h" 24#include "free-space-cache.h"
25#include "transaction.h" 25#include "transaction.h"
26#include "disk-io.h" 26#include "disk-io.h"
27#include "extent_io.h"
27 28
28#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 29#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
29#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 30#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
@@ -81,6 +82,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
81 return ERR_PTR(-ENOENT); 82 return ERR_PTR(-ENOENT);
82 } 83 }
83 84
85 inode->i_mapping->flags &= ~__GFP_FS;
86
84 spin_lock(&block_group->lock); 87 spin_lock(&block_group->lock);
85 if (!root->fs_info->closing) { 88 if (!root->fs_info->closing) {
86 block_group->inode = igrab(inode); 89 block_group->inode = igrab(inode);
@@ -222,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
222 u64 num_entries; 225 u64 num_entries;
223 u64 num_bitmaps; 226 u64 num_bitmaps;
224 u64 generation; 227 u64 generation;
228 u64 used = btrfs_block_group_used(&block_group->item);
225 u32 cur_crc = ~(u32)0; 229 u32 cur_crc = ~(u32)0;
226 pgoff_t index = 0; 230 pgoff_t index = 0;
227 unsigned long first_page_offset; 231 unsigned long first_page_offset;
@@ -467,6 +471,17 @@ next:
467 index++; 471 index++;
468 } 472 }
469 473
474 spin_lock(&block_group->tree_lock);
475 if (block_group->free_space != (block_group->key.offset - used -
476 block_group->bytes_super)) {
477 spin_unlock(&block_group->tree_lock);
478 printk(KERN_ERR "block group %llu has an wrong amount of free "
479 "space\n", block_group->key.objectid);
480 ret = 0;
481 goto free_cache;
482 }
483 spin_unlock(&block_group->tree_lock);
484
470 ret = 1; 485 ret = 1;
471out: 486out:
472 kfree(checksums); 487 kfree(checksums);
@@ -495,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
495 struct list_head *pos, *n; 510 struct list_head *pos, *n;
496 struct page *page; 511 struct page *page;
497 struct extent_state *cached_state = NULL; 512 struct extent_state *cached_state = NULL;
513 struct btrfs_free_cluster *cluster = NULL;
514 struct extent_io_tree *unpin = NULL;
498 struct list_head bitmap_list; 515 struct list_head bitmap_list;
499 struct btrfs_key key; 516 struct btrfs_key key;
517 u64 start, end, len;
500 u64 bytes = 0; 518 u64 bytes = 0;
501 u32 *crc, *checksums; 519 u32 *crc, *checksums;
502 pgoff_t index = 0, last_index = 0; 520 pgoff_t index = 0, last_index = 0;
@@ -505,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
505 int entries = 0; 523 int entries = 0;
506 int bitmaps = 0; 524 int bitmaps = 0;
507 int ret = 0; 525 int ret = 0;
526 bool next_page = false;
508 527
509 root = root->fs_info->tree_root; 528 root = root->fs_info->tree_root;
510 529
@@ -551,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
551 */ 570 */
552 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 571 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
553 572
573 /* Get the cluster for this block_group if it exists */
574 if (!list_empty(&block_group->cluster_list))
575 cluster = list_entry(block_group->cluster_list.next,
576 struct btrfs_free_cluster,
577 block_group_list);
578
579 /*
580 * We shouldn't have switched the pinned extents yet so this is the
581 * right one
582 */
583 unpin = root->fs_info->pinned_extents;
584
554 /* 585 /*
555 * Lock all pages first so we can lock the extent safely. 586 * Lock all pages first so we can lock the extent safely.
556 * 587 *
@@ -580,6 +611,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
580 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 611 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
581 0, &cached_state, GFP_NOFS); 612 0, &cached_state, GFP_NOFS);
582 613
614 /*
615 * When searching for pinned extents, we need to start at our start
616 * offset.
617 */
618 start = block_group->key.objectid;
619
583 /* Write out the extent entries */ 620 /* Write out the extent entries */
584 do { 621 do {
585 struct btrfs_free_space_entry *entry; 622 struct btrfs_free_space_entry *entry;
@@ -587,6 +624,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
587 unsigned long offset = 0; 624 unsigned long offset = 0;
588 unsigned long start_offset = 0; 625 unsigned long start_offset = 0;
589 626
627 next_page = false;
628
590 if (index == 0) { 629 if (index == 0) {
591 start_offset = first_page_offset; 630 start_offset = first_page_offset;
592 offset = start_offset; 631 offset = start_offset;
@@ -598,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
598 entry = addr + start_offset; 637 entry = addr + start_offset;
599 638
600 memset(addr, 0, PAGE_CACHE_SIZE); 639 memset(addr, 0, PAGE_CACHE_SIZE);
601 while (1) { 640 while (node && !next_page) {
602 struct btrfs_free_space *e; 641 struct btrfs_free_space *e;
603 642
604 e = rb_entry(node, struct btrfs_free_space, offset_index); 643 e = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -614,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
614 entry->type = BTRFS_FREE_SPACE_EXTENT; 653 entry->type = BTRFS_FREE_SPACE_EXTENT;
615 } 654 }
616 node = rb_next(node); 655 node = rb_next(node);
617 if (!node) 656 if (!node && cluster) {
618 break; 657 node = rb_first(&cluster->root);
658 cluster = NULL;
659 }
619 offset += sizeof(struct btrfs_free_space_entry); 660 offset += sizeof(struct btrfs_free_space_entry);
620 if (offset + sizeof(struct btrfs_free_space_entry) >= 661 if (offset + sizeof(struct btrfs_free_space_entry) >=
621 PAGE_CACHE_SIZE) 662 PAGE_CACHE_SIZE)
663 next_page = true;
664 entry++;
665 }
666
667 /*
668 * We want to add any pinned extents to our free space cache
669 * so we don't leak the space
670 */
671 while (!next_page && (start < block_group->key.objectid +
672 block_group->key.offset)) {
673 ret = find_first_extent_bit(unpin, start, &start, &end,
674 EXTENT_DIRTY);
675 if (ret) {
676 ret = 0;
677 break;
678 }
679
680 /* This pinned extent is out of our range */
681 if (start >= block_group->key.objectid +
682 block_group->key.offset)
622 break; 683 break;
684
685 len = block_group->key.objectid +
686 block_group->key.offset - start;
687 len = min(len, end + 1 - start);
688
689 entries++;
690 entry->offset = cpu_to_le64(start);
691 entry->bytes = cpu_to_le64(len);
692 entry->type = BTRFS_FREE_SPACE_EXTENT;
693
694 start = end + 1;
695 offset += sizeof(struct btrfs_free_space_entry);
696 if (offset + sizeof(struct btrfs_free_space_entry) >=
697 PAGE_CACHE_SIZE)
698 next_page = true;
623 entry++; 699 entry++;
624 } 700 }
625 *crc = ~(u32)0; 701 *crc = ~(u32)0;
@@ -650,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
650 page_cache_release(page); 726 page_cache_release(page);
651 727
652 index++; 728 index++;
653 } while (node); 729 } while (node || next_page);
654 730
655 /* Write out the bitmaps */ 731 /* Write out the bitmaps */
656 list_for_each_safe(pos, n, &bitmap_list) { 732 list_for_each_safe(pos, n, &bitmap_list) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 93c28a1d6bdc..5cc64ab9c485 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -112,6 +112,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
112static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, 112static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
113 struct btrfs_root *root, struct inode *inode, 113 struct btrfs_root *root, struct inode *inode,
114 u64 start, size_t size, size_t compressed_size, 114 u64 start, size_t size, size_t compressed_size,
115 int compress_type,
115 struct page **compressed_pages) 116 struct page **compressed_pages)
116{ 117{
117 struct btrfs_key key; 118 struct btrfs_key key;
@@ -126,12 +127,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
126 size_t cur_size = size; 127 size_t cur_size = size;
127 size_t datasize; 128 size_t datasize;
128 unsigned long offset; 129 unsigned long offset;
129 int compress_type = BTRFS_COMPRESS_NONE;
130 130
131 if (compressed_size && compressed_pages) { 131 if (compressed_size && compressed_pages)
132 compress_type = root->fs_info->compress_type;
133 cur_size = compressed_size; 132 cur_size = compressed_size;
134 }
135 133
136 path = btrfs_alloc_path(); 134 path = btrfs_alloc_path();
137 if (!path) 135 if (!path)
@@ -221,7 +219,7 @@ fail:
221static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, 219static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
222 struct btrfs_root *root, 220 struct btrfs_root *root,
223 struct inode *inode, u64 start, u64 end, 221 struct inode *inode, u64 start, u64 end,
224 size_t compressed_size, 222 size_t compressed_size, int compress_type,
225 struct page **compressed_pages) 223 struct page **compressed_pages)
226{ 224{
227 u64 isize = i_size_read(inode); 225 u64 isize = i_size_read(inode);
@@ -254,7 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
254 inline_len = min_t(u64, isize, actual_end); 252 inline_len = min_t(u64, isize, actual_end);
255 ret = insert_inline_extent(trans, root, inode, start, 253 ret = insert_inline_extent(trans, root, inode, start,
256 inline_len, compressed_size, 254 inline_len, compressed_size,
257 compressed_pages); 255 compress_type, compressed_pages);
258 BUG_ON(ret); 256 BUG_ON(ret);
259 btrfs_delalloc_release_metadata(inode, end + 1 - start); 257 btrfs_delalloc_release_metadata(inode, end + 1 - start);
260 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 258 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
@@ -433,12 +431,13 @@ again:
433 * to make an uncompressed inline extent. 431 * to make an uncompressed inline extent.
434 */ 432 */
435 ret = cow_file_range_inline(trans, root, inode, 433 ret = cow_file_range_inline(trans, root, inode,
436 start, end, 0, NULL); 434 start, end, 0, 0, NULL);
437 } else { 435 } else {
438 /* try making a compressed inline extent */ 436 /* try making a compressed inline extent */
439 ret = cow_file_range_inline(trans, root, inode, 437 ret = cow_file_range_inline(trans, root, inode,
440 start, end, 438 start, end,
441 total_compressed, pages); 439 total_compressed,
440 compress_type, pages);
442 } 441 }
443 if (ret == 0) { 442 if (ret == 0) {
444 /* 443 /*
@@ -792,7 +791,7 @@ static noinline int cow_file_range(struct inode *inode,
792 if (start == 0) { 791 if (start == 0) {
793 /* lets try to make an inline extent */ 792 /* lets try to make an inline extent */
794 ret = cow_file_range_inline(trans, root, inode, 793 ret = cow_file_range_inline(trans, root, inode,
795 start, end, 0, NULL); 794 start, end, 0, 0, NULL);
796 if (ret == 0) { 795 if (ret == 0) {
797 extent_clear_unlock_delalloc(inode, 796 extent_clear_unlock_delalloc(inode,
798 &BTRFS_I(inode)->io_tree, 797 &BTRFS_I(inode)->io_tree,
@@ -2222,8 +2221,6 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2222 insert = 1; 2221 insert = 1;
2223#endif 2222#endif
2224 insert = 1; 2223 insert = 1;
2225 } else {
2226 WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
2227 } 2224 }
2228 2225
2229 if (!BTRFS_I(inode)->orphan_meta_reserved) { 2226 if (!BTRFS_I(inode)->orphan_meta_reserved) {
@@ -2324,7 +2321,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2324 2321
2325 /* 2322 /*
2326 * if ret == 0 means we found what we were searching for, which 2323 * if ret == 0 means we found what we were searching for, which
2327 * is weird, but possible, so only screw with path if we didnt 2324 * is weird, but possible, so only screw with path if we didn't
2328 * find the key and see if we have stuff that matches 2325 * find the key and see if we have stuff that matches
2329 */ 2326 */
2330 if (ret > 0) { 2327 if (ret > 0) {
@@ -2537,8 +2534,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2537 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2534 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2538 2535
2539 alloc_group_block = btrfs_inode_block_group(leaf, inode_item); 2536 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2540 if (location.objectid == BTRFS_FREE_SPACE_OBJECTID)
2541 inode->i_mapping->flags &= ~__GFP_FS;
2542 2537
2543 /* 2538 /*
2544 * try to precache a NULL acl entry for files that don't have 2539 * try to precache a NULL acl entry for files that don't have
@@ -6960,8 +6955,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6960 * should cover the worst case number of items we'll modify. 6955 * should cover the worst case number of items we'll modify.
6961 */ 6956 */
6962 trans = btrfs_start_transaction(root, 20); 6957 trans = btrfs_start_transaction(root, 20);
6963 if (IS_ERR(trans)) 6958 if (IS_ERR(trans)) {
6964 return PTR_ERR(trans); 6959 ret = PTR_ERR(trans);
6960 goto out_notrans;
6961 }
6965 6962
6966 btrfs_set_trans_block_group(trans, new_dir); 6963 btrfs_set_trans_block_group(trans, new_dir);
6967 6964
@@ -7061,7 +7058,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7061 } 7058 }
7062out_fail: 7059out_fail:
7063 btrfs_end_transaction_throttle(trans, root); 7060 btrfs_end_transaction_throttle(trans, root);
7064 7061out_notrans:
7065 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 7062 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
7066 up_read(&root->fs_info->subvol_sem); 7063 up_read(&root->fs_info->subvol_sem);
7067 7064
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7c07fe26b7cf..cfc264fefdb0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -373,6 +373,10 @@ static noinline int create_subvol(struct btrfs_root *root,
373 inode_item->nbytes = cpu_to_le64(root->leafsize); 373 inode_item->nbytes = cpu_to_le64(root->leafsize);
374 inode_item->mode = cpu_to_le32(S_IFDIR | 0755); 374 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
375 375
376 root_item.flags = 0;
377 root_item.byte_limit = 0;
378 inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
379
376 btrfs_set_root_bytenr(&root_item, leaf->start); 380 btrfs_set_root_bytenr(&root_item, leaf->start);
377 btrfs_set_root_generation(&root_item, trans->transid); 381 btrfs_set_root_generation(&root_item, trans->transid);
378 btrfs_set_root_level(&root_item, 0); 382 btrfs_set_root_level(&root_item, 0);
@@ -2436,8 +2440,10 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
2436 return PTR_ERR(trans); 2440 return PTR_ERR(trans);
2437 transid = trans->transid; 2441 transid = trans->transid;
2438 ret = btrfs_commit_transaction_async(trans, root, 0); 2442 ret = btrfs_commit_transaction_async(trans, root, 0);
2439 if (ret) 2443 if (ret) {
2444 btrfs_end_transaction(trans, root);
2440 return ret; 2445 return ret;
2446 }
2441 2447
2442 if (argp) 2448 if (argp)
2443 if (copy_to_user(argp, &transid, sizeof(transid))) 2449 if (copy_to_user(argp, &transid, sizeof(transid)))
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 58250e09eb05..199a80134312 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2346,7 +2346,7 @@ struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
2346 root = next->root; 2346 root = next->root;
2347 BUG_ON(!root); 2347 BUG_ON(!root);
2348 2348
2349 /* no other choice for non-refernce counted tree */ 2349 /* no other choice for non-references counted tree */
2350 if (!root->ref_cows) 2350 if (!root->ref_cows)
2351 return root; 2351 return root;
2352 2352
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 29b2d7c930eb..6928bff62daa 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -473,3 +473,21 @@ again:
473 btrfs_free_path(path); 473 btrfs_free_path(path);
474 return 0; 474 return 0;
475} 475}
476
477/*
478 * Old btrfs forgets to init root_item->flags and root_item->byte_limit
479 * for subvolumes. To work around this problem, we steal a bit from
480 * root_item->inode_item->flags, and use it to indicate if those fields
481 * have been properly initialized.
482 */
483void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
484{
485 u64 inode_flags = le64_to_cpu(root_item->inode.flags);
486
487 if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) {
488 inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT;
489 root_item->inode.flags = cpu_to_le64(inode_flags);
490 root_item->flags = 0;
491 root_item->byte_limit = 0;
492 }
493}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2edfc039f098..58e7de9cc90c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -644,6 +644,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
644{ 644{
645 struct btrfs_root *root = btrfs_sb(vfs->mnt_sb); 645 struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
646 struct btrfs_fs_info *info = root->fs_info; 646 struct btrfs_fs_info *info = root->fs_info;
647 char *compress_type;
647 648
648 if (btrfs_test_opt(root, DEGRADED)) 649 if (btrfs_test_opt(root, DEGRADED))
649 seq_puts(seq, ",degraded"); 650 seq_puts(seq, ",degraded");
@@ -662,8 +663,16 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
662 if (info->thread_pool_size != min_t(unsigned long, 663 if (info->thread_pool_size != min_t(unsigned long,
663 num_online_cpus() + 2, 8)) 664 num_online_cpus() + 2, 8))
664 seq_printf(seq, ",thread_pool=%d", info->thread_pool_size); 665 seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
665 if (btrfs_test_opt(root, COMPRESS)) 666 if (btrfs_test_opt(root, COMPRESS)) {
666 seq_puts(seq, ",compress"); 667 if (info->compress_type == BTRFS_COMPRESS_ZLIB)
668 compress_type = "zlib";
669 else
670 compress_type = "lzo";
671 if (btrfs_test_opt(root, FORCE_COMPRESS))
672 seq_printf(seq, ",compress-force=%s", compress_type);
673 else
674 seq_printf(seq, ",compress=%s", compress_type);
675 }
667 if (btrfs_test_opt(root, NOSSD)) 676 if (btrfs_test_opt(root, NOSSD))
668 seq_puts(seq, ",nossd"); 677 seq_puts(seq, ",nossd");
669 if (btrfs_test_opt(root, SSD_SPREAD)) 678 if (btrfs_test_opt(root, SSD_SPREAD))
@@ -678,6 +687,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
678 seq_puts(seq, ",discard"); 687 seq_puts(seq, ",discard");
679 if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) 688 if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
680 seq_puts(seq, ",noacl"); 689 seq_puts(seq, ",noacl");
690 if (btrfs_test_opt(root, SPACE_CACHE))
691 seq_puts(seq, ",space_cache");
692 if (btrfs_test_opt(root, CLEAR_CACHE))
693 seq_puts(seq, ",clear_cache");
694 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
695 seq_puts(seq, ",user_subvol_rm_allowed");
681 return 0; 696 return 0;
682} 697}
683 698
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index ce48eb59d615..5b158da7e0bb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -197,6 +197,7 @@ again:
197 197
198 ret = join_transaction(root); 198 ret = join_transaction(root);
199 if (ret < 0) { 199 if (ret < 0) {
200 kmem_cache_free(btrfs_trans_handle_cachep, h);
200 if (type != TRANS_JOIN_NOLOCK) 201 if (type != TRANS_JOIN_NOLOCK)
201 mutex_unlock(&root->fs_info->trans_mutex); 202 mutex_unlock(&root->fs_info->trans_mutex);
202 return ERR_PTR(ret); 203 return ERR_PTR(ret);
@@ -975,6 +976,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
975 record_root_in_trans(trans, root); 976 record_root_in_trans(trans, root);
976 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 977 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
977 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 978 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
979 btrfs_check_and_init_root_item(new_root_item);
978 980
979 root_flags = btrfs_root_flags(new_root_item); 981 root_flags = btrfs_root_flags(new_root_item);
980 if (pending->readonly) 982 if (pending->readonly)
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 37fe101a4e0d..1064805e653b 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -197,7 +197,7 @@ struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
197} 197}
198 198
199/* 199/*
200 * update the auxilliary data for an object object on disk 200 * update the auxiliary data for an object object on disk
201 */ 201 */
202static void cachefiles_update_object(struct fscache_object *_object) 202static void cachefiles_update_object(struct fscache_object *_object)
203{ 203{
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 37368ba2e67c..e159c529fd2b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -24,7 +24,7 @@
24 * context needs to be associated with the osd write during writeback. 24 * context needs to be associated with the osd write during writeback.
25 * 25 *
26 * Similarly, struct ceph_inode_info maintains a set of counters to 26 * Similarly, struct ceph_inode_info maintains a set of counters to
27 * count dirty pages on the inode. In the absense of snapshots, 27 * count dirty pages on the inode. In the absence of snapshots,
28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
29 * 29 *
30 * When a snapshot is taken (that is, when the client receives 30 * When a snapshot is taken (that is, when the client receives
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6b61ded701e1..5323c330bbf3 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -765,7 +765,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
765 if (touch) { 765 if (touch) {
766 struct rb_node *q; 766 struct rb_node *q;
767 767
768 /* touch this + preceeding caps */ 768 /* touch this + preceding caps */
769 __touch_cap(cap); 769 __touch_cap(cap);
770 for (q = rb_first(&ci->i_caps); q != p; 770 for (q = rb_first(&ci->i_caps); q != p;
771 q = rb_next(q)) { 771 q = rb_next(q)) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a1ee8fa3a8e7..f60b07b0feb0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3215,9 +3215,15 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3215{ 3215{
3216 struct ceph_mds_client *mdsc = fsc->mdsc; 3216 struct ceph_mds_client *mdsc = fsc->mdsc;
3217 3217
3218 dout("mdsc_destroy %p\n", mdsc);
3218 ceph_mdsc_stop(mdsc); 3219 ceph_mdsc_stop(mdsc);
3220
3221 /* flush out any connection work with references to us */
3222 ceph_msgr_flush();
3223
3219 fsc->mdsc = NULL; 3224 fsc->mdsc = NULL;
3220 kfree(mdsc); 3225 kfree(mdsc);
3226 dout("mdsc_destroy %p done\n", mdsc);
3221} 3227}
3222 3228
3223 3229
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 0aee66b92af3..e86ec1155f8f 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -342,7 +342,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
342 num = 0; 342 num = 0;
343 snapc->seq = realm->seq; 343 snapc->seq = realm->seq;
344 if (parent) { 344 if (parent) {
345 /* include any of parent's snaps occuring _after_ my 345 /* include any of parent's snaps occurring _after_ my
346 parent became my parent */ 346 parent became my parent */
347 for (i = 0; i < parent->cached_context->num_snaps; i++) 347 for (i = 0; i < parent->cached_context->num_snaps; i++)
348 if (parent->cached_context->snaps[i] >= 348 if (parent->cached_context->snaps[i] >=
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a9e78b4a258c..f2f77fd3c14c 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -353,7 +353,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
353 353
354 if (opt->name) 354 if (opt->name)
355 seq_printf(m, ",name=%s", opt->name); 355 seq_printf(m, ",name=%s", opt->name);
356 if (opt->secret) 356 if (opt->key)
357 seq_puts(m, ",secret=<hidden>"); 357 seq_puts(m, ",secret=<hidden>");
358 358
359 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) 359 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
index 7f7fa3c302af..ea940b1db77b 100644
--- a/fs/cifs/AUTHORS
+++ b/fs/cifs/AUTHORS
@@ -35,7 +35,7 @@ Adrian Bunk (kcalloc cleanups)
35Miklos Szeredi 35Miklos Szeredi
36Kazeon team for various fixes especially for 2.4 version. 36Kazeon team for various fixes especially for 2.4 version.
37Asser Ferno (Change Notify support) 37Asser Ferno (Change Notify support)
38Shaggy (Dave Kleikamp) for inumerable small fs suggestions and some good cleanup 38Shaggy (Dave Kleikamp) for innumerable small fs suggestions and some good cleanup
39Gunter Kukkukk (testing and suggestions for support of old servers) 39Gunter Kukkukk (testing and suggestions for support of old servers)
40Igor Mammedov (DFS support) 40Igor Mammedov (DFS support)
41Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) 41Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 0a265ad9e426..2b68ac57d97d 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -53,7 +53,7 @@ void cifs_dfs_release_automount_timer(void)
53 * 53 *
54 * Extracts sharename form full UNC. 54 * Extracts sharename form full UNC.
55 * i.e. strips from UNC trailing path that is not part of share 55 * i.e. strips from UNC trailing path that is not part of share
56 * name and fixup missing '\' in the begining of DFS node refferal 56 * name and fixup missing '\' in the beginning of DFS node refferal
57 * if necessary. 57 * if necessary.
58 * Returns pointer to share name on success or ERR_PTR on error. 58 * Returns pointer to share name on success or ERR_PTR on error.
59 * Caller is responsible for freeing returned string. 59 * Caller is responsible for freeing returned string.
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 904aa47e3515..2644a5d6cc67 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -5247,7 +5247,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
5247 * Samba server ignores set of file size to zero due to bugs in some 5247 * Samba server ignores set of file size to zero due to bugs in some
5248 * older clients, but we should be precise - we use SetFileSize to 5248 * older clients, but we should be precise - we use SetFileSize to
5249 * set file size and do not want to truncate file size to zero 5249 * set file size and do not want to truncate file size to zero
5250 * accidently as happened on one Samba server beta by putting 5250 * accidentally as happened on one Samba server beta by putting
5251 * zero instead of -1 here 5251 * zero instead of -1 here
5252 */ 5252 */
5253 data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); 5253 data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8d6c17ab593d..6e2b2addfc78 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1572,7 +1572,7 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
1572 return false; 1572 return false;
1573 } 1573 }
1574 1574
1575 /* now check if signing mode is acceptible */ 1575 /* now check if signing mode is acceptable */
1576 if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && 1576 if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
1577 (server->secMode & SECMODE_SIGN_REQUIRED)) 1577 (server->secMode & SECMODE_SIGN_REQUIRED))
1578 return false; 1578 return false;
@@ -2933,7 +2933,7 @@ mount_fail_check:
2933 if (mount_data != mount_data_global) 2933 if (mount_data != mount_data_global)
2934 kfree(mount_data); 2934 kfree(mount_data);
2935 /* If find_unc succeeded then rc == 0 so we can not end */ 2935 /* If find_unc succeeded then rc == 0 so we can not end */
2936 /* up accidently freeing someone elses tcon struct */ 2936 /* up accidentally freeing someone elses tcon struct */
2937 if (tcon) 2937 if (tcon)
2938 cifs_put_tcon(tcon); 2938 cifs_put_tcon(tcon);
2939 else if (pSesInfo) 2939 else if (pSesInfo)
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd5f22918c33..9ea65cf36714 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -189,7 +189,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
189 inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); 189 inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
190 /* EIO could indicate that (posix open) operation is not 190 /* EIO could indicate that (posix open) operation is not
191 supported, despite what server claimed in capability 191 supported, despite what server claimed in capability
192 negotation. EREMOTE indicates DFS junction, which is not 192 negotiation. EREMOTE indicates DFS junction, which is not
193 handled in posix open */ 193 handled in posix open */
194 194
195 if (rc == 0) { 195 if (rc == 0) {
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 90ff3cb10de3..3313dd19f543 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -990,7 +990,7 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
990 * This describes these functions and their helpers. 990 * This describes these functions and their helpers.
991 * 991 *
992 * Allow another kernel system to depend on a config_item. If this 992 * Allow another kernel system to depend on a config_item. If this
993 * happens, the item cannot go away until the dependant can live without 993 * happens, the item cannot go away until the dependent can live without
994 * it. The idea is to give client modules as simple an interface as 994 * it. The idea is to give client modules as simple an interface as
995 * possible. When a system asks them to depend on an item, they just 995 * possible. When a system asks them to depend on an item, they just
996 * call configfs_depend_item(). If the item is live and the client 996 * call configfs_depend_item(). If the item is live and the client
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 04b8c449303f..56d6bfcc1e48 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -519,7 +519,7 @@ static void toss_rsb(struct kref *kref)
519 } 519 }
520} 520}
521 521
522/* When all references to the rsb are gone it's transfered to 522/* When all references to the rsb are gone it's transferred to
523 the tossed list for later disposal. */ 523 the tossed list for later disposal. */
524 524
525static void put_rsb(struct dlm_rsb *r) 525static void put_rsb(struct dlm_rsb *r)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index bffa1e73b9a9..5e2c71f05e46 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -810,7 +810,7 @@ static int tcp_accept_from_sock(struct connection *con)
810 810
811 /* 811 /*
812 * Add it to the active queue in case we got data 812 * Add it to the active queue in case we got data
813 * beween processing the accept adding the socket 813 * between processing the accept adding the socket
814 * to the read_sockets list 814 * to the read_sockets list
815 */ 815 */
816 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 816 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index eda43f362616..14638235f7b2 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -304,7 +304,7 @@ static void set_master_lkbs(struct dlm_rsb *r)
304} 304}
305 305
306/* 306/*
307 * Propogate the new master nodeid to locks 307 * Propagate the new master nodeid to locks
308 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. 308 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
309 * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which 309 * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
310 * rsb's to consider. 310 * rsb's to consider.
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index c27c0ecf90bc..fdb2eb0ad09e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -276,7 +276,7 @@ static void ecryptfs_init_mount_crypt_stat(
276/** 276/**
277 * ecryptfs_parse_options 277 * ecryptfs_parse_options
278 * @sb: The ecryptfs super block 278 * @sb: The ecryptfs super block
279 * @options: The options pased to the kernel 279 * @options: The options passed to the kernel
280 * 280 *
281 * Parse mount options: 281 * Parse mount options:
282 * debug=N - ecryptfs_verbosity level for debug output 282 * debug=N - ecryptfs_verbosity level for debug output
@@ -840,7 +840,7 @@ static int __init ecryptfs_init(void)
840 } 840 }
841 rc = ecryptfs_init_messaging(); 841 rc = ecryptfs_init_messaging();
842 if (rc) { 842 if (rc) {
843 printk(KERN_ERR "Failure occured while attempting to " 843 printk(KERN_ERR "Failure occurred while attempting to "
844 "initialize the communications channel to " 844 "initialize the communications channel to "
845 "ecryptfsd\n"); 845 "ecryptfsd\n");
846 goto out_destroy_kthread; 846 goto out_destroy_kthread;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ed38801b57a7..f9cfd168fbe2 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -181,7 +181,7 @@ struct eventpoll {
181 181
182 /* 182 /*
183 * This is a single linked list that chains all the "struct epitem" that 183 * This is a single linked list that chains all the "struct epitem" that
184 * happened while transfering ready events to userspace w/out 184 * happened while transferring ready events to userspace w/out
185 * holding ->lock. 185 * holding ->lock.
186 */ 186 */
187 struct epitem *ovflist; 187 struct epitem *ovflist;
@@ -606,7 +606,7 @@ static void ep_free(struct eventpoll *ep)
606 * We do not need to hold "ep->mtx" here because the epoll file 606 * We do not need to hold "ep->mtx" here because the epoll file
607 * is on the way to be removed and no one has references to it 607 * is on the way to be removed and no one has references to it
608 * anymore. The only hit might come from eventpoll_release_file() but 608 * anymore. The only hit might come from eventpoll_release_file() but
609 * holding "epmutex" is sufficent here. 609 * holding "epmutex" is sufficient here.
610 */ 610 */
611 mutex_lock(&epmutex); 611 mutex_lock(&epmutex);
612 612
@@ -720,7 +720,7 @@ void eventpoll_release_file(struct file *file)
720 /* 720 /*
721 * We don't want to get "file->f_lock" because it is not 721 * We don't want to get "file->f_lock" because it is not
722 * necessary. It is not necessary because we're in the "struct file" 722 * necessary. It is not necessary because we're in the "struct file"
723 * cleanup path, and this means that noone is using this file anymore. 723 * cleanup path, and this means that no one is using this file anymore.
724 * So, for example, epoll_ctl() cannot hit here since if we reach this 724 * So, for example, epoll_ctl() cannot hit here since if we reach this
725 * point, the file counter already went to zero and fget() would fail. 725 * point, the file counter already went to zero and fget() would fail.
726 * The only hit might come from ep_free() but by holding the mutex 726 * The only hit might come from ep_free() but by holding the mutex
@@ -1112,7 +1112,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1112 * Trigger mode, we need to insert back inside 1112 * Trigger mode, we need to insert back inside
1113 * the ready list, so that the next call to 1113 * the ready list, so that the next call to
1114 * epoll_wait() will check again the events 1114 * epoll_wait() will check again the events
1115 * availability. At this point, noone can insert 1115 * availability. At this point, no one can insert
1116 * into ep->rdllist besides us. The epoll_ctl() 1116 * into ep->rdllist besides us. The epoll_ctl()
1117 * callers are locked out by 1117 * callers are locked out by
1118 * ep_scan_ready_list() holding "mtx" and the 1118 * ep_scan_ready_list() holding "mtx" and the
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index 5e74ad3d4009..3bbd46956d77 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -115,7 +115,7 @@ struct exofs_sb_stats {
115 * Describes the raid used in the FS. It is part of the device table. 115 * Describes the raid used in the FS. It is part of the device table.
116 * This here is taken from the pNFS-objects definition. In exofs we 116 * This here is taken from the pNFS-objects definition. In exofs we
117 * use one raid policy through-out the filesystem. (NOTE: the funny 117 * use one raid policy through-out the filesystem. (NOTE: the funny
118 * alignment at begining. We take care of it at exofs_device_table. 118 * alignment at beginning. We take care of it at exofs_device_table.
119 */ 119 */
120struct exofs_dt_data_map { 120struct exofs_dt_data_map {
121 __le32 cb_num_comps; 121 __le32 cb_num_comps;
@@ -136,7 +136,7 @@ struct exofs_dt_device_info {
136 u8 systemid[OSD_SYSTEMID_LEN]; 136 u8 systemid[OSD_SYSTEMID_LEN];
137 __le64 long_name_offset; /* If !0 then offset-in-file */ 137 __le64 long_name_offset; /* If !0 then offset-in-file */
138 __le32 osdname_len; /* */ 138 __le32 osdname_len; /* */
139 u8 osdname[44]; /* Embbeded, Ususally an asci uuid */ 139 u8 osdname[44]; /* Embbeded, Usually an asci uuid */
140} __packed; 140} __packed;
141 141
142/* 142/*
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 0d06f4e75699..8f44cef1b3ef 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -850,7 +850,7 @@ static int find_next_reservable_window(
850 rsv_window_remove(sb, my_rsv); 850 rsv_window_remove(sb, my_rsv);
851 851
852 /* 852 /*
853 * Let's book the whole avaliable window for now. We will check the 853 * Let's book the whole available window for now. We will check the
854 * disk bitmap later and then, if there are free blocks then we adjust 854 * disk bitmap later and then, if there are free blocks then we adjust
855 * the window size if it's larger than requested. 855 * the window size if it's larger than requested.
856 * Otherwise, we will remove this node from the tree next time 856 * Otherwise, we will remove this node from the tree next time
@@ -1357,9 +1357,9 @@ retry_alloc:
1357 goto allocated; 1357 goto allocated;
1358 } 1358 }
1359 /* 1359 /*
1360 * We may end up a bogus ealier ENOSPC error due to 1360 * We may end up a bogus earlier ENOSPC error due to
1361 * filesystem is "full" of reservations, but 1361 * filesystem is "full" of reservations, but
1362 * there maybe indeed free blocks avaliable on disk 1362 * there maybe indeed free blocks available on disk
1363 * In this case, we just forget about the reservations 1363 * In this case, we just forget about the reservations
1364 * just do block allocation as without reservations. 1364 * just do block allocation as without reservations.
1365 */ 1365 */
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index c47f706878b5..788e09a07f7e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -305,7 +305,7 @@ static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
305 return ind->bh->b_blocknr; 305 return ind->bh->b_blocknr;
306 306
307 /* 307 /*
308 * It is going to be refered from inode itself? OK, just put it into 308 * It is going to be referred from inode itself? OK, just put it into
309 * the same cylinder group then. 309 * the same cylinder group then.
310 */ 310 */
311 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group); 311 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
@@ -913,7 +913,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
913 * 913 *
914 * When we do truncate() we may have to clean the ends of several indirect 914 * When we do truncate() we may have to clean the ends of several indirect
915 * blocks but leave the blocks themselves alive. Block is partially 915 * blocks but leave the blocks themselves alive. Block is partially
916 * truncated if some data below the new i_size is refered from it (and 916 * truncated if some data below the new i_size is referred from it (and
917 * it is on the path to the first completely truncated data block, indeed). 917 * it is on the path to the first completely truncated data block, indeed).
918 * We have to free the top of that path along with everything to the right 918 * We have to free the top of that path along with everything to the right
919 * of the path. Since no allocation past the truncation point is possible 919 * of the path. Since no allocation past the truncation point is possible
@@ -990,7 +990,7 @@ no_top:
990 * @p: array of block numbers 990 * @p: array of block numbers
991 * @q: points immediately past the end of array 991 * @q: points immediately past the end of array
992 * 992 *
993 * We are freeing all blocks refered from that array (numbers are 993 * We are freeing all blocks referred from that array (numbers are
994 * stored as little-endian 32-bit) and updating @inode->i_blocks 994 * stored as little-endian 32-bit) and updating @inode->i_blocks
995 * appropriately. 995 * appropriately.
996 */ 996 */
@@ -1030,7 +1030,7 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
1030 * @q: pointer immediately past the end of array 1030 * @q: pointer immediately past the end of array
1031 * @depth: depth of the branches to free 1031 * @depth: depth of the branches to free
1032 * 1032 *
1033 * We are freeing all blocks refered from these branches (numbers are 1033 * We are freeing all blocks referred from these branches (numbers are
1034 * stored as little-endian 32-bit) and updating @inode->i_blocks 1034 * stored as little-endian 32-bit) and updating @inode->i_blocks
1035 * appropriately. 1035 * appropriately.
1036 */ 1036 */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7731695e65d9..0a78dae7e2cb 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1382,7 +1382,7 @@ static struct dentry *ext2_mount(struct file_system_type *fs_type,
1382 1382
1383/* Read data from quotafile - avoid pagecache and such because we cannot afford 1383/* Read data from quotafile - avoid pagecache and such because we cannot afford
1384 * acquiring the locks... As quota files are never truncated and quota code 1384 * acquiring the locks... As quota files are never truncated and quota code
1385 * itself serializes the operations (and noone else should touch the files) 1385 * itself serializes the operations (and no one else should touch the files)
1386 * we don't have to be afraid of races */ 1386 * we don't have to be afraid of races */
1387static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, 1387static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1388 size_t len, loff_t off) 1388 size_t len, loff_t off)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index c2e4dce984d2..529970617a21 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -35,7 +35,7 @@
35 * +------------------+ 35 * +------------------+
36 * 36 *
37 * The block header is followed by multiple entry descriptors. These entry 37 * The block header is followed by multiple entry descriptors. These entry
38 * descriptors are variable in size, and alligned to EXT2_XATTR_PAD 38 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
39 * byte boundaries. The entry descriptors are sorted by attribute name, 39 * byte boundaries. The entry descriptors are sorted by attribute name,
40 * so that two extended attribute blocks can be compared efficiently. 40 * so that two extended attribute blocks can be compared efficiently.
41 * 41 *
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 153242187fce..fe52297e31ad 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -590,7 +590,7 @@ do_more:
590 BUFFER_TRACE(debug_bh, "Deleted!"); 590 BUFFER_TRACE(debug_bh, "Deleted!");
591 if (!bh2jh(bitmap_bh)->b_committed_data) 591 if (!bh2jh(bitmap_bh)->b_committed_data)
592 BUFFER_TRACE(debug_bh, 592 BUFFER_TRACE(debug_bh,
593 "No commited data in bitmap"); 593 "No committed data in bitmap");
594 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); 594 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
595 __brelse(debug_bh); 595 __brelse(debug_bh);
596 } 596 }
@@ -1063,7 +1063,7 @@ static int find_next_reservable_window(
1063 rsv_window_remove(sb, my_rsv); 1063 rsv_window_remove(sb, my_rsv);
1064 1064
1065 /* 1065 /*
1066 * Let's book the whole avaliable window for now. We will check the 1066 * Let's book the whole available window for now. We will check the
1067 * disk bitmap later and then, if there are free blocks then we adjust 1067 * disk bitmap later and then, if there are free blocks then we adjust
1068 * the window size if it's larger than requested. 1068 * the window size if it's larger than requested.
1069 * Otherwise, we will remove this node from the tree next time 1069 * Otherwise, we will remove this node from the tree next time
@@ -1456,7 +1456,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1456 * 1456 *
1457 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if 1457 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1458 * it is profitable to retry the operation, this function will wait 1458 * it is profitable to retry the operation, this function will wait
1459 * for the current or commiting transaction to complete, and then 1459 * for the current or committing transaction to complete, and then
1460 * return TRUE. 1460 * return TRUE.
1461 * 1461 *
1462 * if the total number of retries exceed three times, return FALSE. 1462 * if the total number of retries exceed three times, return FALSE.
@@ -1632,9 +1632,9 @@ retry_alloc:
1632 goto allocated; 1632 goto allocated;
1633 } 1633 }
1634 /* 1634 /*
1635 * We may end up a bogus ealier ENOSPC error due to 1635 * We may end up a bogus earlier ENOSPC error due to
1636 * filesystem is "full" of reservations, but 1636 * filesystem is "full" of reservations, but
1637 * there maybe indeed free blocks avaliable on disk 1637 * there maybe indeed free blocks available on disk
1638 * In this case, we just forget about the reservations 1638 * In this case, we just forget about the reservations
1639 * just do block allocation as without reservations. 1639 * just do block allocation as without reservations.
1640 */ 1640 */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index fe2541d250e4..68b2e43d7c35 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2055,7 +2055,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
2055 * 2055 *
2056 * When we do truncate() we may have to clean the ends of several 2056 * When we do truncate() we may have to clean the ends of several
2057 * indirect blocks but leave the blocks themselves alive. Block is 2057 * indirect blocks but leave the blocks themselves alive. Block is
2058 * partially truncated if some data below the new i_size is refered 2058 * partially truncated if some data below the new i_size is referred
2059 * from it (and it is on the path to the first completely truncated 2059 * from it (and it is on the path to the first completely truncated
2060 * data block, indeed). We have to free the top of that path along 2060 * data block, indeed). We have to free the top of that path along
2061 * with everything to the right of the path. Since no allocation 2061 * with everything to the right of the path. Since no allocation
@@ -2184,7 +2184,7 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2184 * @first: array of block numbers 2184 * @first: array of block numbers
2185 * @last: points immediately past the end of array 2185 * @last: points immediately past the end of array
2186 * 2186 *
2187 * We are freeing all blocks refered from that array (numbers are stored as 2187 * We are freeing all blocks referred from that array (numbers are stored as
2188 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 2188 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2189 * 2189 *
2190 * We accumulate contiguous runs of blocks to free. Conveniently, if these 2190 * We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -2272,7 +2272,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2272 * @last: pointer immediately past the end of array 2272 * @last: pointer immediately past the end of array
2273 * @depth: depth of the branches to free 2273 * @depth: depth of the branches to free
2274 * 2274 *
2275 * We are freeing all blocks refered from these branches (numbers are 2275 * We are freeing all blocks referred from these branches (numbers are
2276 * stored as little-endian 32-bit) and updating @inode->i_blocks 2276 * stored as little-endian 32-bit) and updating @inode->i_blocks
2277 * appropriately. 2277 * appropriately.
2278 */ 2278 */
@@ -3291,7 +3291,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
3291 if (ext3_should_journal_data(inode)) 3291 if (ext3_should_journal_data(inode))
3292 ret = 3 * (bpp + indirects) + 2; 3292 ret = 3 * (bpp + indirects) + 2;
3293 else 3293 else
3294 ret = 2 * (bpp + indirects) + 2; 3294 ret = 2 * (bpp + indirects) + indirects + 2;
3295 3295
3296#ifdef CONFIG_QUOTA 3296#ifdef CONFIG_QUOTA
3297 /* We know that structure was already allocated during dquot_initialize so 3297 /* We know that structure was already allocated during dquot_initialize so
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 108b142e11ed..7916e4ce166a 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -1009,7 +1009,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
1009 1009
1010 if (test_opt(sb, DEBUG)) 1010 if (test_opt(sb, DEBUG))
1011 printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK 1011 printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
1012 " upto "E3FSBLK" blocks\n", 1012 " up to "E3FSBLK" blocks\n",
1013 o_blocks_count, n_blocks_count); 1013 o_blocks_count, n_blocks_count);
1014 1014
1015 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1015 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 071689f86e18..3c6a9e0eadc1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2925,7 +2925,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2925 2925
2926/* Read data from quotafile - avoid pagecache and such because we cannot afford 2926/* Read data from quotafile - avoid pagecache and such because we cannot afford
2927 * acquiring the locks... As quota files are never truncated and quota code 2927 * acquiring the locks... As quota files are never truncated and quota code
2928 * itself serializes the operations (and noone else should touch the files) 2928 * itself serializes the operations (and no one else should touch the files)
2929 * we don't have to be afraid of races */ 2929 * we don't have to be afraid of races */
2930static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, 2930static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
2931 size_t len, loff_t off) 2931 size_t len, loff_t off)
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 97b970e7dd13..1c67139ad4b4 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -547,7 +547,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
547 * 547 *
548 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 548 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
549 * it is profitable to retry the operation, this function will wait 549 * it is profitable to retry the operation, this function will wait
550 * for the current or commiting transaction to complete, and then 550 * for the current or committing transaction to complete, and then
551 * return TRUE. 551 * return TRUE.
552 * 552 *
553 * if the total number of retries exceed three times, return FALSE. 553 * if the total number of retries exceed three times, return FALSE.
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index e25e99bf7ee1..d0f53538a57f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -86,8 +86,8 @@
86 86
87#ifdef CONFIG_QUOTA 87#ifdef CONFIG_QUOTA
88/* Amount of blocks needed for quota update - we know that the structure was 88/* Amount of blocks needed for quota update - we know that the structure was
89 * allocated so we need to update only inode+data */ 89 * allocated so we need to update only data block */
90#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0) 90#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
91/* Amount of blocks needed for quota insert/delete - we do some block writes 91/* Amount of blocks needed for quota insert/delete - we do some block writes
92 * but inode, sb and group updates are done only once */ 92 * but inode, sb and group updates are done only once */
93#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ 93#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index dd2cb5076ff9..4890d6f3ad15 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1729,7 +1729,7 @@ repeat:
1729 BUG_ON(npath->p_depth != path->p_depth); 1729 BUG_ON(npath->p_depth != path->p_depth);
1730 eh = npath[depth].p_hdr; 1730 eh = npath[depth].p_hdr;
1731 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 1731 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1732 ext_debug("next leaf isnt full(%d)\n", 1732 ext_debug("next leaf isn't full(%d)\n",
1733 le16_to_cpu(eh->eh_entries)); 1733 le16_to_cpu(eh->eh_entries));
1734 path = npath; 1734 path = npath;
1735 goto repeat; 1735 goto repeat;
@@ -2533,7 +2533,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2533/* 2533/*
2534 * This function is called by ext4_ext_map_blocks() if someone tries to write 2534 * This function is called by ext4_ext_map_blocks() if someone tries to write
2535 * to an uninitialized extent. It may result in splitting the uninitialized 2535 * to an uninitialized extent. It may result in splitting the uninitialized
2536 * extent into multiple extents (upto three - one initialized and two 2536 * extent into multiple extents (up to three - one initialized and two
2537 * uninitialized). 2537 * uninitialized).
2538 * There are three possibilities: 2538 * There are three possibilities:
2539 * a> There is no split required: Entire extent should be initialized 2539 * a> There is no split required: Entire extent should be initialized
@@ -3174,7 +3174,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3174 path, flags); 3174 path, flags);
3175 /* 3175 /*
3176 * Flag the inode(non aio case) or end_io struct (aio case) 3176 * Flag the inode(non aio case) or end_io struct (aio case)
3177 * that this IO needs to convertion to written when IO is 3177 * that this IO needs to conversion to written when IO is
3178 * completed 3178 * completed
3179 */ 3179 */
3180 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { 3180 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
@@ -3460,10 +3460,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3460 ext4_ext_mark_uninitialized(&newex); 3460 ext4_ext_mark_uninitialized(&newex);
3461 /* 3461 /*
3462 * io_end structure was created for every IO write to an 3462 * io_end structure was created for every IO write to an
3463 * uninitialized extent. To avoid unecessary conversion, 3463 * uninitialized extent. To avoid unnecessary conversion,
3464 * here we flag the IO that really needs the conversion. 3464 * here we flag the IO that really needs the conversion.
3465 * For non asycn direct IO case, flag the inode state 3465 * For non asycn direct IO case, flag the inode state
3466 * that we need to perform convertion when IO is done. 3466 * that we need to perform conversion when IO is done.
3467 */ 3467 */
3468 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3468 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3469 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { 3469 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 7f74019d6d77..e9473cbe80df 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -101,7 +101,7 @@ extern int ext4_flush_completed_IO(struct inode *inode)
101 * to the work-to-be schedule is freed. 101 * to the work-to-be schedule is freed.
102 * 102 *
103 * Thus we need to keep the io structure still valid here after 103 * Thus we need to keep the io structure still valid here after
104 * convertion finished. The io structure has a flag to 104 * conversion finished. The io structure has a flag to
105 * avoid double converting from both fsync and background work 105 * avoid double converting from both fsync and background work
106 * queue work. 106 * queue work.
107 */ 107 */
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
125 * the parent directory's parent as well, and so on recursively, if 125 * the parent directory's parent as well, and so on recursively, if
126 * they are also freshly created. 126 * they are also freshly created.
127 */ 127 */
128static void ext4_sync_parent(struct inode *inode) 128static int ext4_sync_parent(struct inode *inode)
129{ 129{
130 struct writeback_control wbc;
130 struct dentry *dentry = NULL; 131 struct dentry *dentry = NULL;
132 int ret = 0;
131 133
132 while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { 134 while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
133 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); 135 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
136 if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) 138 if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
137 break; 139 break;
138 inode = dentry->d_parent->d_inode; 140 inode = dentry->d_parent->d_inode;
139 sync_mapping_buffers(inode->i_mapping); 141 ret = sync_mapping_buffers(inode->i_mapping);
142 if (ret)
143 break;
144 memset(&wbc, 0, sizeof(wbc));
145 wbc.sync_mode = WB_SYNC_ALL;
146 wbc.nr_to_write = 0; /* only write out the inode */
147 ret = sync_inode(inode, &wbc);
148 if (ret)
149 break;
140 } 150 }
151 return ret;
141} 152}
142 153
143/* 154/*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
176 if (!journal) { 187 if (!journal) {
177 ret = generic_file_fsync(file, datasync); 188 ret = generic_file_fsync(file, datasync);
178 if (!ret && !list_empty(&inode->i_dentry)) 189 if (!ret && !list_empty(&inode->i_dentry))
179 ext4_sync_parent(inode); 190 ret = ext4_sync_parent(inode);
180 goto out; 191 goto out;
181 } 192 }
182 193
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1a86282b9024..f2fa5e8a582c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2502 * for partial write. 2502 * for partial write.
2503 */ 2503 */
2504 set_buffer_new(bh); 2504 set_buffer_new(bh);
2505 set_buffer_mapped(bh);
2505 } 2506 }
2506 return 0; 2507 return 0;
2507} 2508}
@@ -2588,7 +2589,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2588 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2589 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2589 * need to file the inode to the transaction's list in ordered mode because if 2590 * need to file the inode to the transaction's list in ordered mode because if
2590 * we are writing back data added by write(), the inode is already there and if 2591 * we are writing back data added by write(), the inode is already there and if
2591 * we are writing back data modified via mmap(), noone guarantees in which 2592 * we are writing back data modified via mmap(), no one guarantees in which
2592 * transaction the data will hit the disk. In case we are journaling data, we 2593 * transaction the data will hit the disk. In case we are journaling data, we
2593 * cannot start transaction directly because transaction start ranks above page 2594 * cannot start transaction directly because transaction start ranks above page
2594 * lock so we have to do some magic. 2595 * lock so we have to do some magic.
@@ -2690,7 +2691,7 @@ static int ext4_writepage(struct page *page,
2690 2691
2691/* 2692/*
2692 * This is called via ext4_da_writepages() to 2693 * This is called via ext4_da_writepages() to
2693 * calulate the total number of credits to reserve to fit 2694 * calculate the total number of credits to reserve to fit
2694 * a single extent allocation into a single transaction, 2695 * a single extent allocation into a single transaction,
2695 * ext4_da_writpeages() will loop calling this before 2696 * ext4_da_writpeages() will loop calling this before
2696 * the block allocation. 2697 * the block allocation.
@@ -3304,7 +3305,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
3304 * the pages by calling redirty_page_for_writepage() but that 3305 * the pages by calling redirty_page_for_writepage() but that
3305 * would be ugly in the extreme. So instead we would need to 3306 * would be ugly in the extreme. So instead we would need to
3306 * replicate parts of the code in the above functions, 3307 * replicate parts of the code in the above functions,
3307 * simplifying them becuase we wouldn't actually intend to 3308 * simplifying them because we wouldn't actually intend to
3308 * write out the pages, but rather only collect contiguous 3309 * write out the pages, but rather only collect contiguous
3309 * logical block extents, call the multi-block allocator, and 3310 * logical block extents, call the multi-block allocator, and
3310 * then update the buffer heads with the block allocations. 3311 * then update the buffer heads with the block allocations.
@@ -3694,7 +3695,7 @@ retry:
3694 * 3695 *
3695 * The unwrritten extents will be converted to written when DIO is completed. 3696 * The unwrritten extents will be converted to written when DIO is completed.
3696 * For async direct IO, since the IO may still pending when return, we 3697 * For async direct IO, since the IO may still pending when return, we
3697 * set up an end_io call back function, which will do the convertion 3698 * set up an end_io call back function, which will do the conversion
3698 * when async direct IO completed. 3699 * when async direct IO completed.
3699 * 3700 *
3700 * If the O_DIRECT write will extend the file then add this inode to the 3701 * If the O_DIRECT write will extend the file then add this inode to the
@@ -3717,7 +3718,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3717 * We could direct write to holes and fallocate. 3718 * We could direct write to holes and fallocate.
3718 * 3719 *
3719 * Allocated blocks to fill the hole are marked as uninitialized 3720 * Allocated blocks to fill the hole are marked as uninitialized
3720 * to prevent paralel buffered read to expose the stale data 3721 * to prevent parallel buffered read to expose the stale data
3721 * before DIO complete the data IO. 3722 * before DIO complete the data IO.
3722 * 3723 *
3723 * As to previously fallocated extents, ext4 get_block 3724 * As to previously fallocated extents, ext4 get_block
@@ -3778,7 +3779,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3778 int err; 3779 int err;
3779 /* 3780 /*
3780 * for non AIO case, since the IO is already 3781 * for non AIO case, since the IO is already
3781 * completed, we could do the convertion right here 3782 * completed, we could do the conversion right here
3782 */ 3783 */
3783 err = ext4_convert_unwritten_extents(inode, 3784 err = ext4_convert_unwritten_extents(inode,
3784 offset, ret); 3785 offset, ret);
@@ -4025,7 +4026,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
4025 * 4026 *
4026 * When we do truncate() we may have to clean the ends of several 4027 * When we do truncate() we may have to clean the ends of several
4027 * indirect blocks but leave the blocks themselves alive. Block is 4028 * indirect blocks but leave the blocks themselves alive. Block is
4028 * partially truncated if some data below the new i_size is refered 4029 * partially truncated if some data below the new i_size is referred
4029 * from it (and it is on the path to the first completely truncated 4030 * from it (and it is on the path to the first completely truncated
4030 * data block, indeed). We have to free the top of that path along 4031 * data block, indeed). We have to free the top of that path along
4031 * with everything to the right of the path. Since no allocation 4032 * with everything to the right of the path. Since no allocation
@@ -4169,7 +4170,7 @@ out_err:
4169 * @first: array of block numbers 4170 * @first: array of block numbers
4170 * @last: points immediately past the end of array 4171 * @last: points immediately past the end of array
4171 * 4172 *
4172 * We are freeing all blocks refered from that array (numbers are stored as 4173 * We are freeing all blocks referred from that array (numbers are stored as
4173 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 4174 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
4174 * 4175 *
4175 * We accumulate contiguous runs of blocks to free. Conveniently, if these 4176 * We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -4261,7 +4262,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4261 * @last: pointer immediately past the end of array 4262 * @last: pointer immediately past the end of array
4262 * @depth: depth of the branches to free 4263 * @depth: depth of the branches to free
4263 * 4264 *
4264 * We are freeing all blocks refered from these branches (numbers are 4265 * We are freeing all blocks referred from these branches (numbers are
4265 * stored as little-endian 32-bit) and updating @inode->i_blocks 4266 * stored as little-endian 32-bit) and updating @inode->i_blocks
4266 * appropriately. 4267 * appropriately.
4267 */ 4268 */
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
4429 Indirect chain[4]; 4430 Indirect chain[4];
4430 Indirect *partial; 4431 Indirect *partial;
4431 __le32 nr = 0; 4432 __le32 nr = 0;
4432 int n; 4433 int n = 0;
4433 ext4_lblk_t last_block; 4434 ext4_lblk_t last_block, max_block;
4434 unsigned blocksize = inode->i_sb->s_blocksize; 4435 unsigned blocksize = inode->i_sb->s_blocksize;
4435 4436
4436 trace_ext4_truncate_enter(inode); 4437 trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
4455 4456
4456 last_block = (inode->i_size + blocksize-1) 4457 last_block = (inode->i_size + blocksize-1)
4457 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4458 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4459 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
4460 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4458 4461
4459 if (inode->i_size & (blocksize - 1)) 4462 if (inode->i_size & (blocksize - 1))
4460 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4463 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4461 goto out_stop; 4464 goto out_stop;
4462 4465
4463 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4466 if (last_block != max_block) {
4464 if (n == 0) 4467 n = ext4_block_to_path(inode, last_block, offsets, NULL);
4465 goto out_stop; /* error */ 4468 if (n == 0)
4469 goto out_stop; /* error */
4470 }
4466 4471
4467 /* 4472 /*
4468 * OK. This truncate is going to happen. We add the inode to the 4473 * OK. This truncate is going to happen. We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
4493 */ 4498 */
4494 ei->i_disksize = inode->i_size; 4499 ei->i_disksize = inode->i_size;
4495 4500
4496 if (n == 1) { /* direct blocks */ 4501 if (last_block == max_block) {
4502 /*
4503 * It is unnecessary to free any data blocks if last_block is
4504 * equal to the indirect block limit.
4505 */
4506 goto out_unlock;
4507 } else if (n == 1) { /* direct blocks */
4497 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4508 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4498 i_data + EXT4_NDIR_BLOCKS); 4509 i_data + EXT4_NDIR_BLOCKS);
4499 goto do_indirects; 4510 goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
4553 ; 4564 ;
4554 } 4565 }
4555 4566
4567out_unlock:
4556 up_write(&ei->i_data_sem); 4568 up_write(&ei->i_data_sem);
4557 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4569 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4558 ext4_mark_inode_dirty(handle, inode); 4570 ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5398 /* if nrblocks are contiguous */ 5410 /* if nrblocks are contiguous */
5399 if (chunk) { 5411 if (chunk) {
5400 /* 5412 /*
5401 * With N contiguous data blocks, it need at most 5413 * With N contiguous data blocks, we need at most
5402 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 5414 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
5403 * 2 dindirect blocks 5415 * 2 dindirect blocks, and 1 tindirect block
5404 * 1 tindirect block
5405 */ 5416 */
5406 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 5417 return DIV_ROUND_UP(nrblocks,
5407 return indirects + 3; 5418 EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
5408 } 5419 }
5409 /* 5420 /*
5410 * if nrblocks are not contiguous, worse case, each block touch 5421 * if nrblocks are not contiguous, worse case, each block touch
@@ -5478,7 +5489,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5478} 5489}
5479 5490
5480/* 5491/*
5481 * Calulate the total number of credits to reserve to fit 5492 * Calculate the total number of credits to reserve to fit
5482 * the modification of a single pages into a single transaction, 5493 * the modification of a single pages into a single transaction,
5483 * which may include multiple chunks of block allocations. 5494 * which may include multiple chunks of block allocations.
5484 * 5495 *
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a5837a837a8b..d8a16eecf1d5 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -92,7 +92,7 @@
92 * between CPUs. It is possible to get scheduled at this point. 92 * between CPUs. It is possible to get scheduled at this point.
93 * 93 *
94 * The locality group prealloc space is used looking at whether we have 94 * The locality group prealloc space is used looking at whether we have
95 * enough free space (pa_free) withing the prealloc space. 95 * enough free space (pa_free) within the prealloc space.
96 * 96 *
97 * If we can't allocate blocks via inode prealloc or/and locality group 97 * If we can't allocate blocks via inode prealloc or/and locality group
98 * prealloc then we look at the buddy cache. The buddy cache is represented 98 * prealloc then we look at the buddy cache. The buddy cache is represented
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index d1bafa57f483..92816b4e0f16 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -517,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
517 * start with one credit accounted for 517 * start with one credit accounted for
518 * superblock modification. 518 * superblock modification.
519 * 519 *
520 * For the tmp_inode we already have commited the 520 * For the tmp_inode we already have committed the
521 * trascation that created the inode. Later as and 521 * trascation that created the inode. Later as and
522 * when we add extents we extent the journal 522 * when we add extents we extent the journal
523 */ 523 */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 22546ad7f0ae..8553dfb310af 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
242 * journal_end calls result in the superblock being marked dirty, so 242 * journal_end calls result in the superblock being marked dirty, so
243 * that sync() will call the filesystem's write_super callback if 243 * that sync() will call the filesystem's write_super callback if
244 * appropriate. 244 * appropriate.
245 *
246 * To avoid j_barrier hold in userspace when a user calls freeze(),
247 * ext4 prevents a new handle from being started by s_frozen, which
248 * is in an upper layer.
245 */ 249 */
246handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) 250handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
247{ 251{
248 journal_t *journal; 252 journal_t *journal;
253 handle_t *handle;
249 254
250 if (sb->s_flags & MS_RDONLY) 255 if (sb->s_flags & MS_RDONLY)
251 return ERR_PTR(-EROFS); 256 return ERR_PTR(-EROFS);
252 257
253 vfs_check_frozen(sb, SB_FREEZE_TRANS);
254 /* Special case here: if the journal has aborted behind our
255 * backs (eg. EIO in the commit thread), then we still need to
256 * take the FS itself readonly cleanly. */
257 journal = EXT4_SB(sb)->s_journal; 258 journal = EXT4_SB(sb)->s_journal;
258 if (journal) { 259 handle = ext4_journal_current_handle();
259 if (is_journal_aborted(journal)) { 260
260 ext4_abort(sb, "Detected aborted journal"); 261 /*
261 return ERR_PTR(-EROFS); 262 * If a handle has been started, it should be allowed to
262 } 263 * finish, otherwise deadlock could happen between freeze
263 return jbd2_journal_start(journal, nblocks); 264 * and others(e.g. truncate) due to the restart of the
265 * journal handle if the filesystem is forzen and active
266 * handles are not stopped.
267 */
268 if (!handle)
269 vfs_check_frozen(sb, SB_FREEZE_TRANS);
270
271 if (!journal)
272 return ext4_get_nojournal();
273 /*
274 * Special case here: if the journal has aborted behind our
275 * backs (eg. EIO in the commit thread), then we still need to
276 * take the FS itself readonly cleanly.
277 */
278 if (is_journal_aborted(journal)) {
279 ext4_abort(sb, "Detected aborted journal");
280 return ERR_PTR(-EROFS);
264 } 281 }
265 return ext4_get_nojournal(); 282 return jbd2_journal_start(journal, nblocks);
266} 283}
267 284
268/* 285/*
@@ -617,7 +634,7 @@ __acquires(bitlock)
617 * filesystem will have already been marked read/only and the 634 * filesystem will have already been marked read/only and the
618 * journal has been aborted. We return 1 as a hint to callers 635 * journal has been aborted. We return 1 as a hint to callers
619 * who might what to use the return value from 636 * who might what to use the return value from
620 * ext4_grp_locked_error() to distinguish beween the 637 * ext4_grp_locked_error() to distinguish between the
621 * ERRORS_CONT and ERRORS_RO case, and perhaps return more 638 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
622 * aggressively from the ext4 function in question, with a 639 * aggressively from the ext4 function in question, with a
623 * more appropriate error code. 640 * more appropriate error code.
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
2975 mutex_unlock(&ext4_li_info->li_list_mtx); 2992 mutex_unlock(&ext4_li_info->li_list_mtx);
2976 2993
2977 sbi->s_li_request = elr; 2994 sbi->s_li_request = elr;
2995 /*
2996 * set elr to NULL here since it has been inserted to
2997 * the request_list and the removal and free of it is
2998 * handled by ext4_clear_request_list from now on.
2999 */
3000 elr = NULL;
2978 3001
2979 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 3002 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
2980 ret = ext4_run_lazyinit_thread(); 3003 ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3385 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3408 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3386 spin_lock_init(&sbi->s_next_gen_lock); 3409 spin_lock_init(&sbi->s_next_gen_lock);
3387 3410
3411 init_timer(&sbi->s_err_report);
3412 sbi->s_err_report.function = print_daily_error_info;
3413 sbi->s_err_report.data = (unsigned long) sb;
3414
3388 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3415 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3389 ext4_count_free_blocks(sb)); 3416 ext4_count_free_blocks(sb));
3390 if (!err) { 3417 if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
3646 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 3673 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
3647 *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 3674 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
3648 3675
3649 init_timer(&sbi->s_err_report);
3650 sbi->s_err_report.function = print_daily_error_info;
3651 sbi->s_err_report.data = (unsigned long) sb;
3652 if (es->s_error_count) 3676 if (es->s_error_count)
3653 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 3677 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
3654 3678
@@ -3672,6 +3696,7 @@ failed_mount_wq:
3672 sbi->s_journal = NULL; 3696 sbi->s_journal = NULL;
3673 } 3697 }
3674failed_mount3: 3698failed_mount3:
3699 del_timer(&sbi->s_err_report);
3675 if (sbi->s_flex_groups) { 3700 if (sbi->s_flex_groups) {
3676 if (is_vmalloc_addr(sbi->s_flex_groups)) 3701 if (is_vmalloc_addr(sbi->s_flex_groups))
3677 vfree(sbi->s_flex_groups); 3702 vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
4138/* 4163/*
4139 * LVM calls this function before a (read-only) snapshot is created. This 4164 * LVM calls this function before a (read-only) snapshot is created. This
4140 * gives us a chance to flush the journal completely and mark the fs clean. 4165 * gives us a chance to flush the journal completely and mark the fs clean.
4166 *
4167 * Note that only this function cannot bring a filesystem to be in a clean
4168 * state independently, because ext4 prevents a new handle from being started
4169 * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
4170 * the upper layer.
4141 */ 4171 */
4142static int ext4_freeze(struct super_block *sb) 4172static int ext4_freeze(struct super_block *sb)
4143{ 4173{
@@ -4614,17 +4644,30 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4614 4644
4615static int ext4_quota_off(struct super_block *sb, int type) 4645static int ext4_quota_off(struct super_block *sb, int type)
4616{ 4646{
4647 struct inode *inode = sb_dqopt(sb)->files[type];
4648 handle_t *handle;
4649
4617 /* Force all delayed allocation blocks to be allocated. 4650 /* Force all delayed allocation blocks to be allocated.
4618 * Caller already holds s_umount sem */ 4651 * Caller already holds s_umount sem */
4619 if (test_opt(sb, DELALLOC)) 4652 if (test_opt(sb, DELALLOC))
4620 sync_filesystem(sb); 4653 sync_filesystem(sb);
4621 4654
4655 /* Update modification times of quota files when userspace can
4656 * start looking at them */
4657 handle = ext4_journal_start(inode, 1);
4658 if (IS_ERR(handle))
4659 goto out;
4660 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4661 ext4_mark_inode_dirty(handle, inode);
4662 ext4_journal_stop(handle);
4663
4664out:
4622 return dquot_quota_off(sb, type); 4665 return dquot_quota_off(sb, type);
4623} 4666}
4624 4667
4625/* Read data from quotafile - avoid pagecache and such because we cannot afford 4668/* Read data from quotafile - avoid pagecache and such because we cannot afford
4626 * acquiring the locks... As quota files are never truncated and quota code 4669 * acquiring the locks... As quota files are never truncated and quota code
4627 * itself serializes the operations (and noone else should touch the files) 4670 * itself serializes the operations (and no one else should touch the files)
4628 * we don't have to be afraid of races */ 4671 * we don't have to be afraid of races */
4629static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 4672static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
4630 size_t len, loff_t off) 4673 size_t len, loff_t off)
@@ -4714,9 +4757,8 @@ out:
4714 if (inode->i_size < off + len) { 4757 if (inode->i_size < off + len) {
4715 i_size_write(inode, off + len); 4758 i_size_write(inode, off + len);
4716 EXT4_I(inode)->i_disksize = inode->i_size; 4759 EXT4_I(inode)->i_disksize = inode->i_size;
4760 ext4_mark_inode_dirty(handle, inode);
4717 } 4761 }
4718 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4719 ext4_mark_inode_dirty(handle, inode);
4720 mutex_unlock(&inode->i_mutex); 4762 mutex_unlock(&inode->i_mutex);
4721 return len; 4763 return len;
4722} 4764}
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index 78948b4b1894..c9a6a94e58e9 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -164,7 +164,7 @@ vxfs_read_fshead(struct super_block *sbp)
164 goto out_free_pfp; 164 goto out_free_pfp;
165 } 165 }
166 if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) { 166 if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
167 printk(KERN_ERR "vxfs: structual list inode is of wrong type (%x)\n", 167 printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n",
168 VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK); 168 VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK);
169 goto out_iput_stilist; 169 goto out_iput_stilist;
170 } 170 }
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 6c5131d592f0..3360f1e678ad 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -162,7 +162,7 @@ vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
162/** 162/**
163 * vxfs_inode_by_name - find inode number for dentry 163 * vxfs_inode_by_name - find inode number for dentry
164 * @dip: directory to search in 164 * @dip: directory to search in
165 * @dp: dentry we seach for 165 * @dp: dentry we search for
166 * 166 *
167 * Description: 167 * Description:
168 * vxfs_inode_by_name finds out the inode number of 168 * vxfs_inode_by_name finds out the inode number of
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index d8324296486f..b7b3af502615 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -60,7 +60,7 @@ enum {
60 * 60 *
61 * The Object Location Table header is placed at the beginning of each 61 * The Object Location Table header is placed at the beginning of each
62 * OLT extent. It is used to fing certain filesystem-wide metadata, e.g. 62 * OLT extent. It is used to fing certain filesystem-wide metadata, e.g.
63 * the inital inode list, the fileset header or the device configuration. 63 * the initial inode list, the fileset header or the device configuration.
64 */ 64 */
65struct vxfs_olt { 65struct vxfs_olt {
66 u_int32_t olt_magic; /* magic number */ 66 u_int32_t olt_magic; /* magic number */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b5ed541fb137..34591ee804b5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -144,7 +144,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
144 * 144 *
145 * Description: 145 * Description:
146 * This does WB_SYNC_NONE opportunistic writeback. The IO is only 146 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
147 * started when this function returns, we make no guarentees on 147 * started when this function returns, we make no guarantees on
148 * completion. Caller need not hold sb s_umount semaphore. 148 * completion. Caller need not hold sb s_umount semaphore.
149 * 149 *
150 */ 150 */
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6ea00734984e..82a66466a24c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -523,7 +523,7 @@ static int fuse_readpage(struct file *file, struct page *page)
523 goto out; 523 goto out;
524 524
525 /* 525 /*
526 * Page writeback can extend beyond the liftime of the 526 * Page writeback can extend beyond the lifetime of the
527 * page-cache page, so make sure we read a properly synced 527 * page-cache page, so make sure we read a properly synced
528 * page. 528 * page.
529 */ 529 */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef3dc4b9fae2..74add2ddcc3f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1136,7 +1136,7 @@ void gfs2_trim_blocks(struct inode *inode)
1136 * earlier versions of GFS2 have a bug in the stuffed file reading 1136 * earlier versions of GFS2 have a bug in the stuffed file reading
1137 * code which will result in a buffer overrun if the size is larger 1137 * code which will result in a buffer overrun if the size is larger
1138 * than the max stuffed file size. In order to prevent this from 1138 * than the max stuffed file size. In order to prevent this from
1139 * occuring, such files are unstuffed, but in other cases we can 1139 * occurring, such files are unstuffed, but in other cases we can
1140 * just update the inode size directly. 1140 * just update the inode size directly.
1141 * 1141 *
1142 * Returns: 0 on success, or -ve on error 1142 * Returns: 0 on success, or -ve on error
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e2431313491f..f07643e21bfa 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1123,7 +1123,7 @@ void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1123 * @number: the lock number 1123 * @number: the lock number
1124 * @glops: the glock operations for the type of glock 1124 * @glops: the glock operations for the type of glock
1125 * @state: the state to acquire the glock in 1125 * @state: the state to acquire the glock in
1126 * @flags: modifier flags for the aquisition 1126 * @flags: modifier flags for the acquisition
1127 * @gh: the struct gfs2_holder 1127 * @gh: the struct gfs2_holder
1128 * 1128 *
1129 * Returns: errno 1129 * Returns: errno
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ec73ed70bae1..a4e23d68a398 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -657,7 +657,7 @@ out:
657 * @sdp: the file system 657 * @sdp: the file system
658 * 658 *
659 * This function flushes data and meta data for all machines by 659 * This function flushes data and meta data for all machines by
660 * aquiring the transaction log exclusively. All journals are 660 * acquiring the transaction log exclusively. All journals are
661 * ensured to be in a clean state as well. 661 * ensured to be in a clean state as well.
662 * 662 *
663 * Returns: errno 663 * Returns: errno
diff --git a/fs/inode.c b/fs/inode.c
index 5f4e11aaeb5c..33c963d08ab4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -125,6 +125,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
125static DECLARE_RWSEM(iprune_sem); 125static DECLARE_RWSEM(iprune_sem);
126 126
127/* 127/*
128 * Empty aops. Can be used for the cases where the user does not
129 * define any of the address_space operations.
130 */
131const struct address_space_operations empty_aops = {
132};
133EXPORT_SYMBOL(empty_aops);
134
135/*
128 * Statistics gathering.. 136 * Statistics gathering..
129 */ 137 */
130struct inodes_stat_t inodes_stat; 138struct inodes_stat_t inodes_stat;
@@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write,
176 */ 184 */
177int inode_init_always(struct super_block *sb, struct inode *inode) 185int inode_init_always(struct super_block *sb, struct inode *inode)
178{ 186{
179 static const struct address_space_operations empty_aops;
180 static const struct inode_operations empty_iops; 187 static const struct inode_operations empty_iops;
181 static const struct file_operations empty_fops; 188 static const struct file_operations empty_fops;
182 struct address_space *const mapping = &inode->i_data; 189 struct address_space *const mapping = &inode->i_data;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index da871ee084d3..69b180459463 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -362,7 +362,7 @@ void journal_commit_transaction(journal_t *journal)
362 * we do not require it to remember exactly which old buffers it 362 * we do not require it to remember exactly which old buffers it
363 * has reserved. This is consistent with the existing behaviour 363 * has reserved. This is consistent with the existing behaviour
364 * that multiple journal_get_write_access() calls to the same 364 * that multiple journal_get_write_access() calls to the same
365 * buffer are perfectly permissable. 365 * buffer are perfectly permissible.
366 */ 366 */
367 while (commit_transaction->t_reserved_list) { 367 while (commit_transaction->t_reserved_list) {
368 jh = commit_transaction->t_reserved_list; 368 jh = commit_transaction->t_reserved_list;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index eb11601f2e00..b3713afaaa9e 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -770,7 +770,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
770 journal->j_wbufsize = n; 770 journal->j_wbufsize = n;
771 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 771 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
772 if (!journal->j_wbuf) { 772 if (!journal->j_wbuf) {
773 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 773 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
774 __func__); 774 __func__);
775 goto out_err; 775 goto out_err;
776 } 776 }
@@ -831,7 +831,7 @@ journal_t * journal_init_inode (struct inode *inode)
831 journal->j_wbufsize = n; 831 journal->j_wbufsize = n;
832 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 832 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
833 if (!journal->j_wbuf) { 833 if (!journal->j_wbuf) {
834 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 834 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
835 __func__); 835 __func__);
836 goto out_err; 836 goto out_err;
837 } 837 }
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d29018307e2e..305a90763154 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -71,7 +71,7 @@
71 * switching hash tables under them. For operations on the lists of entries in 71 * switching hash tables under them. For operations on the lists of entries in
72 * the hash table j_revoke_lock is used. 72 * the hash table j_revoke_lock is used.
73 * 73 *
74 * Finally, also replay code uses the hash tables but at this moment noone else 74 * Finally, also replay code uses the hash tables but at this moment no one else
75 * can touch them (filesystem isn't mounted yet) and hence no locking is 75 * can touch them (filesystem isn't mounted yet) and hence no locking is
76 * needed. 76 * needed.
77 */ 77 */
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 5b2e4c30a2a1..60d2319651b2 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1392,7 +1392,7 @@ int journal_stop(handle_t *handle)
1392 * by 30x or more... 1392 * by 30x or more...
1393 * 1393 *
1394 * We try and optimize the sleep time against what the underlying disk 1394 * We try and optimize the sleep time against what the underlying disk
1395 * can do, instead of having a static sleep time. This is usefull for 1395 * can do, instead of having a static sleep time. This is useful for
1396 * the case where our storage is so fast that it is more optimal to go 1396 * the case where our storage is so fast that it is more optimal to go
1397 * ahead and force a flush and wait for the transaction to be committed 1397 * ahead and force a flush and wait for the transaction to be committed
1398 * than it is to wait for an arbitrary amount of time for new writers to 1398 * than it is to wait for an arbitrary amount of time for new writers to
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index fa36d7662b21..6e28000a4b21 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
105 int ret; 105 int ret;
106 struct timespec now = current_kernel_time(); 106 struct timespec now = current_kernel_time();
107 107
108 *cbh = NULL;
109
108 if (is_journal_aborted(journal)) 110 if (is_journal_aborted(journal))
109 return 0; 111 return 0;
110 112
@@ -403,7 +405,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
403 * we do not require it to remember exactly which old buffers it 405 * we do not require it to remember exactly which old buffers it
404 * has reserved. This is consistent with the existing behaviour 406 * has reserved. This is consistent with the existing behaviour
405 * that multiple jbd2_journal_get_write_access() calls to the same 407 * that multiple jbd2_journal_get_write_access() calls to the same
406 * buffer are perfectly permissable. 408 * buffer are perfectly permissible.
407 */ 409 */
408 while (commit_transaction->t_reserved_list) { 410 while (commit_transaction->t_reserved_list) {
409 jh = commit_transaction->t_reserved_list; 411 jh = commit_transaction->t_reserved_list;
@@ -806,7 +808,7 @@ wait_for_iobuf:
806 if (err) 808 if (err)
807 __jbd2_journal_abort_hard(journal); 809 __jbd2_journal_abort_hard(journal);
808 } 810 }
809 if (!err && !is_journal_aborted(journal)) 811 if (cbh)
810 err = journal_wait_on_commit_record(journal, cbh); 812 err = journal_wait_on_commit_record(journal, cbh);
811 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 813 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
812 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && 814 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 90407b8fece7..e0ec3db1c395 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -917,7 +917,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
917 journal->j_wbufsize = n; 917 journal->j_wbufsize = n;
918 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 918 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
919 if (!journal->j_wbuf) { 919 if (!journal->j_wbuf) {
920 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 920 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
921 __func__); 921 __func__);
922 goto out_err; 922 goto out_err;
923 } 923 }
@@ -983,7 +983,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
983 journal->j_wbufsize = n; 983 journal->j_wbufsize = n;
984 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 984 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
985 if (!journal->j_wbuf) { 985 if (!journal->j_wbuf) {
986 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 986 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
987 __func__); 987 __func__);
988 goto out_err; 988 goto out_err;
989 } 989 }
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
2413 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); 2413 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
2414 if (!new_dev) 2414 if (!new_dev)
2415 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ 2415 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
2416 bd = bdget(device);
2416 spin_lock(&devname_cache_lock); 2417 spin_lock(&devname_cache_lock);
2417 if (devcache[i]) { 2418 if (devcache[i]) {
2418 if (devcache[i]->device == device) { 2419 if (devcache[i]->device == device) {
2419 kfree(new_dev); 2420 kfree(new_dev);
2421 bdput(bd);
2420 ret = devcache[i]->devname; 2422 ret = devcache[i]->devname;
2421 spin_unlock(&devname_cache_lock); 2423 spin_unlock(&devname_cache_lock);
2422 return ret; 2424 return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
2425 } 2427 }
2426 devcache[i] = new_dev; 2428 devcache[i] = new_dev;
2427 devcache[i]->device = device; 2429 devcache[i]->device = device;
2428 bd = bdget(device);
2429 if (bd) { 2430 if (bd) {
2430 bdevname(bd, devcache[i]->devname); 2431 bdevname(bd, devcache[i]->devname);
2431 bdput(bd); 2432 bdput(bd);
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 9ad321fd63fd..69fd93588118 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -71,7 +71,7 @@
71 * switching hash tables under them. For operations on the lists of entries in 71 * switching hash tables under them. For operations on the lists of entries in
72 * the hash table j_revoke_lock is used. 72 * the hash table j_revoke_lock is used.
73 * 73 *
74 * Finally, also replay code uses the hash tables but at this moment noone else 74 * Finally, also replay code uses the hash tables but at this moment no one else
75 * can touch them (filesystem isn't mounted yet) and hence no locking is 75 * can touch them (filesystem isn't mounted yet) and hence no locking is
76 * needed. 76 * needed.
77 */ 77 */
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 1d1191050f99..05fa77a23711 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1403,7 +1403,7 @@ int jbd2_journal_stop(handle_t *handle)
1403 1403
1404 /* 1404 /*
1405 * Once we drop t_updates, if it goes to zero the transaction 1405 * Once we drop t_updates, if it goes to zero the transaction
1406 * could start commiting on us and eventually disappear. So 1406 * could start committing on us and eventually disappear. So
1407 * once we do this, we must not dereference transaction 1407 * once we do this, we must not dereference transaction
1408 * pointer again. 1408 * pointer again.
1409 */ 1409 */
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
index 5d3ea4070f01..ca28964abd4b 100644
--- a/fs/jffs2/TODO
+++ b/fs/jffs2/TODO
@@ -11,7 +11,7 @@
11 - checkpointing (do we need this? scan is quite fast) 11 - checkpointing (do we need this? scan is quite fast)
12 - make the scan code populate real inodes so read_inode just after 12 - make the scan code populate real inodes so read_inode just after
13 mount doesn't have to read the flash twice for large files. 13 mount doesn't have to read the flash twice for large files.
14 Make this a per-inode option, changable with chattr, so you can 14 Make this a per-inode option, changeable with chattr, so you can
15 decide which inodes should be in-core immediately after mount. 15 decide which inodes should be in-core immediately after mount.
16 - test, test, test 16 - test, test, test
17 17
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index d32ee9412cb9..2ab1a0d91210 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -24,7 +24,7 @@
24 * 24 *
25 * Returns: 0 if the data CRC is correct; 25 * Returns: 0 if the data CRC is correct;
26 * 1 - if incorrect; 26 * 1 - if incorrect;
27 * error code if an error occured. 27 * error code if an error occurred.
28 */ 28 */
29static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) 29static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
30{ 30{
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 800171dca53b..e537fb0e0184 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -121,7 +121,7 @@ int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
121 temp->nodetype = ri->nodetype; 121 temp->nodetype = ri->nodetype;
122 temp->inode = ri->ino; 122 temp->inode = ri->ino;
123 temp->version = ri->version; 123 temp->version = ri->version;
124 temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */ 124 temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */
125 temp->totlen = ri->totlen; 125 temp->totlen = ri->totlen;
126 temp->next = NULL; 126 temp->next = NULL;
127 127
@@ -139,7 +139,7 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r
139 139
140 temp->nodetype = rd->nodetype; 140 temp->nodetype = rd->nodetype;
141 temp->totlen = rd->totlen; 141 temp->totlen = rd->totlen;
142 temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */ 142 temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */
143 temp->pino = rd->pino; 143 temp->pino = rd->pino;
144 temp->version = rd->version; 144 temp->version = rd->version;
145 temp->ino = rd->ino; 145 temp->ino = rd->ino;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 07ee1546b2fa..4515bea0268f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1116,7 +1116,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1116 1116
1117/* 1117/*
1118 * On NAND we try to mark this block bad. If the block was erased more 1118 * On NAND we try to mark this block bad. If the block was erased more
1119 * than MAX_ERASE_FAILURES we mark it finaly bad. 1119 * than MAX_ERASE_FAILURES we mark it finally bad.
1120 * Don't care about failures. This block remains on the erase-pending 1120 * Don't care about failures. This block remains on the erase-pending
1121 * or badblock list as long as nobody manipulates the flash with 1121 * or badblock list as long as nobody manipulates the flash with
1122 * a bootloader or something like that. 1122 * a bootloader or something like that.
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index c92ea3b3ea5e..4496872cf4e7 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1649,7 +1649,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
1649 } 1649 }
1650 1650
1651 /* search the tree within the dmap control page for 1651 /* search the tree within the dmap control page for
1652 * sufficent free space. if sufficient free space is found, 1652 * sufficient free space. if sufficient free space is found,
1653 * dbFindLeaf() returns the index of the leaf at which 1653 * dbFindLeaf() returns the index of the leaf at which
1654 * free space was found. 1654 * free space was found.
1655 */ 1655 */
@@ -2744,7 +2744,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
2744 /* check which (leafno or buddy) is the left buddy. 2744 /* check which (leafno or buddy) is the left buddy.
2745 * the left buddy gets to claim the blocks resulting 2745 * the left buddy gets to claim the blocks resulting
2746 * from the join while the right gets to claim none. 2746 * from the join while the right gets to claim none.
2747 * the left buddy is also eligable to participate in 2747 * the left buddy is also eligible to participate in
2748 * a join at the next higher level while the right 2748 * a join at the next higher level while the right
2749 * is not. 2749 * is not.
2750 * 2750 *
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5d3bbd10f8db..e5fe8506ed16 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
126 126
127 /* allocate the disk blocks for the extent. initially, extBalloc() 127 /* allocate the disk blocks for the extent. initially, extBalloc()
128 * will try to allocate disk blocks for the requested size (xlen). 128 * will try to allocate disk blocks for the requested size (xlen).
129 * if this fails (xlen contiguous free blocks not avaliable), it'll 129 * if this fails (xlen contiguous free blocks not available), it'll
130 * try to allocate a smaller number of blocks (producing a smaller 130 * try to allocate a smaller number of blocks (producing a smaller
131 * extent), with this smaller number of blocks consisting of the 131 * extent), with this smaller number of blocks consisting of the
132 * requested number of blocks rounded down to the next smaller 132 * requested number of blocks rounded down to the next smaller
@@ -481,7 +481,7 @@ int extFill(struct inode *ip, xad_t * xp)
481 * 481 *
482 * initially, we will try to allocate disk blocks for the 482 * initially, we will try to allocate disk blocks for the
483 * requested size (nblocks). if this fails (nblocks 483 * requested size (nblocks). if this fails (nblocks
484 * contiguous free blocks not avaliable), we'll try to allocate 484 * contiguous free blocks not available), we'll try to allocate
485 * a smaller number of blocks (producing a smaller extent), with 485 * a smaller number of blocks (producing a smaller extent), with
486 * this smaller number of blocks consisting of the requested 486 * this smaller number of blocks consisting of the requested
487 * number of blocks rounded down to the next smaller power of 2 487 * number of blocks rounded down to the next smaller power of 2
@@ -575,7 +575,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
575 * to a new set of blocks. If moving the extent, we initially 575 * to a new set of blocks. If moving the extent, we initially
576 * will try to allocate disk blocks for the requested size 576 * will try to allocate disk blocks for the requested size
577 * (newnblks). if this fails (new contiguous free blocks not 577 * (newnblks). if this fails (new contiguous free blocks not
578 * avaliable), we'll try to allocate a smaller number of 578 * available), we'll try to allocate a smaller number of
579 * blocks (producing a smaller extent), with this smaller 579 * blocks (producing a smaller extent), with this smaller
580 * number of blocks consisting of the requested number of 580 * number of blocks consisting of the requested number of
581 * blocks rounded down to the next smaller power of 2 581 * blocks rounded down to the next smaller power of 2
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 3a09423b6c22..ed53a4740168 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1069,7 +1069,7 @@ int diFree(struct inode *ip)
1069 */ 1069 */
1070 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { 1070 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
1071 /* in preparation for removing the iag from the 1071 /* in preparation for removing the iag from the
1072 * ag extent free list, read the iags preceeding 1072 * ag extent free list, read the iags preceding
1073 * and following the iag on the ag extent free 1073 * and following the iag on the ag extent free
1074 * list. 1074 * list.
1075 */ 1075 */
@@ -1095,7 +1095,7 @@ int diFree(struct inode *ip)
1095 int inofreefwd = le32_to_cpu(iagp->inofreefwd); 1095 int inofreefwd = le32_to_cpu(iagp->inofreefwd);
1096 1096
1097 /* in preparation for removing the iag from the 1097 /* in preparation for removing the iag from the
1098 * ag inode free list, read the iags preceeding 1098 * ag inode free list, read the iags preceding
1099 * and following the iag on the ag inode free 1099 * and following the iag on the ag inode free
1100 * list. before reading these iags, we must make 1100 * list. before reading these iags, we must make
1101 * sure that we already don't have them in hand 1101 * sure that we already don't have them in hand
@@ -1681,7 +1681,7 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
1681 * try to allocate a new extent of free inodes. 1681 * try to allocate a new extent of free inodes.
1682 */ 1682 */
1683 if (addext) { 1683 if (addext) {
1684 /* if free space is not avaliable for this new extent, try 1684 /* if free space is not available for this new extent, try
1685 * below to allocate a free and existing (already backed) 1685 * below to allocate a free and existing (already backed)
1686 * inode from the ag. 1686 * inode from the ag.
1687 */ 1687 */
@@ -2036,7 +2036,7 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
2036 2036
2037 /* check if this is the last free inode within the iag. 2037 /* check if this is the last free inode within the iag.
2038 * if so, it will have to be removed from the ag free 2038 * if so, it will have to be removed from the ag free
2039 * inode list, so get the iags preceeding and following 2039 * inode list, so get the iags preceding and following
2040 * it on the list. 2040 * it on the list.
2041 */ 2041 */
2042 if (iagp->nfreeinos == cpu_to_le32(1)) { 2042 if (iagp->nfreeinos == cpu_to_le32(1)) {
@@ -2208,7 +2208,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
2208 2208
2209 /* check if this is the last free extent within the 2209 /* check if this is the last free extent within the
2210 * iag. if so, the iag must be removed from the ag 2210 * iag. if so, the iag must be removed from the ag
2211 * free extent list, so get the iags preceeding and 2211 * free extent list, so get the iags preceding and
2212 * following the iag on this list. 2212 * following the iag on this list.
2213 */ 2213 */
2214 if (iagp->nfreeexts == cpu_to_le32(1)) { 2214 if (iagp->nfreeexts == cpu_to_le32(1)) {
@@ -2504,7 +2504,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2504 } 2504 }
2505 2505
2506 2506
2507 /* get the next avaliable iag number */ 2507 /* get the next available iag number */
2508 iagno = imap->im_nextiag; 2508 iagno = imap->im_nextiag;
2509 2509
2510 /* make sure that we have not exceeded the maximum inode 2510 /* make sure that we have not exceeded the maximum inode
@@ -2615,7 +2615,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2615 2615
2616 duplicateIXtree(sb, blkno, xlen, &xaddr); 2616 duplicateIXtree(sb, blkno, xlen, &xaddr);
2617 2617
2618 /* update the next avaliable iag number */ 2618 /* update the next available iag number */
2619 imap->im_nextiag += 1; 2619 imap->im_nextiag += 1;
2620 2620
2621 /* Add the iag to the iag free list so we don't lose the iag 2621 /* Add the iag to the iag free list so we don't lose the iag
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 9236bc49ae7f..e38c21598850 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -288,7 +288,7 @@ struct lrd {
288 /* 288 /*
289 * SYNCPT: log sync point 289 * SYNCPT: log sync point
290 * 290 *
291 * replay log upto syncpt address specified; 291 * replay log up to syncpt address specified;
292 */ 292 */
293 struct { 293 struct {
294 __le32 sync; /* 4: syncpt address (0 = here) */ 294 __le32 sync; /* 4: syncpt address (0 = here) */
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index d94f8d9e87d7..a78beda85f68 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -75,7 +75,7 @@ extern void grab_metapage(struct metapage *);
75extern void force_metapage(struct metapage *); 75extern void force_metapage(struct metapage *);
76 76
77/* 77/*
78 * hold_metapage and put_metapage are used in conjuction. The page lock 78 * hold_metapage and put_metapage are used in conjunction. The page lock
79 * is not dropped between the two, so no other threads can get or release 79 * is not dropped between the two, so no other threads can get or release
80 * the metapage 80 * the metapage
81 */ 81 */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 9466957ec841..f6cc0c09ec63 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -636,7 +636,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
636 * the inode of the page and available to all anonymous 636 * the inode of the page and available to all anonymous
637 * transactions until txCommit() time at which point 637 * transactions until txCommit() time at which point
638 * they are transferred to the transaction tlock list of 638 * they are transferred to the transaction tlock list of
639 * the commiting transaction of the inode) 639 * the committing transaction of the inode)
640 */ 640 */
641 if (xtid == 0) { 641 if (xtid == 0) {
642 tlck->tid = tid; 642 tlck->tid = tid;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 1aba0039f1c9..8ea5efb5a34e 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -57,7 +57,7 @@
57 * 2. compute new FSCKSize from new LVSize; 57 * 2. compute new FSCKSize from new LVSize;
58 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where 58 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
59 * assert(new FSSize >= old FSSize), 59 * assert(new FSSize >= old FSSize),
60 * i.e., file system must not be shrinked; 60 * i.e., file system must not be shrunk;
61 */ 61 */
62int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) 62int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
63{ 63{
@@ -182,7 +182,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
182 */ 182 */
183 newFSSize = newLVSize - newLogSize - newFSCKSize; 183 newFSSize = newLVSize - newLogSize - newFSCKSize;
184 184
185 /* file system cannot be shrinked */ 185 /* file system cannot be shrunk */
186 if (newFSSize < bmp->db_mapsize) { 186 if (newFSSize < bmp->db_mapsize) {
187 rc = -EINVAL; 187 rc = -EINVAL;
188 goto out; 188 goto out;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index eeca48a031ab..06c8a67cbe76 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -644,7 +644,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
644 644
645/* Read data from quotafile - avoid pagecache and such because we cannot afford 645/* Read data from quotafile - avoid pagecache and such because we cannot afford
646 * acquiring the locks... As quota files are never truncated and quota code 646 * acquiring the locks... As quota files are never truncated and quota code
647 * itself serializes the operations (and noone else should touch the files) 647 * itself serializes the operations (and no one else should touch the files)
648 * we don't have to be afraid of races */ 648 * we don't have to be afraid of races */
649static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, 649static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
650 size_t len, loff_t off) 650 size_t len, loff_t off)
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 7466e9dcc8c5..339e17e9133d 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -60,7 +60,7 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
60 * asynchronous properties. So just to prevent the first implementor of such 60 * asynchronous properties. So just to prevent the first implementor of such
61 * a thing from breaking logfs in 2350, we do the usual pointless dance to 61 * a thing from breaking logfs in 2350, we do the usual pointless dance to
62 * declare a completion variable and wait for completion before returning 62 * declare a completion variable and wait for completion before returning
63 * from mtd_erase(). What an excercise in futility! 63 * from mtd_erase(). What an exercise in futility!
64 */ 64 */
65static void logfs_erase_callback(struct erase_info *ei) 65static void logfs_erase_callback(struct erase_info *ei)
66{ 66{
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index f9ddf0c388c8..9ed89d1663f8 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -92,7 +92,7 @@ static int beyond_eof(struct inode *inode, loff_t bix)
92 * so short names (len <= 9) don't even occupy the complete 32bit name 92 * so short names (len <= 9) don't even occupy the complete 32bit name
93 * space. A prime >256 ensures short names quickly spread the 32bit 93 * space. A prime >256 ensures short names quickly spread the 32bit
94 * name space. Add about 26 for the estimated amount of information 94 * name space. Add about 26 for the estimated amount of information
95 * of each character and pick a prime nearby, preferrably a bit-sparse 95 * of each character and pick a prime nearby, preferably a bit-sparse
96 * one. 96 * one.
97 */ 97 */
98static u32 hash_32(const char *s, int len, u32 seed) 98static u32 hash_32(const char *s, int len, u32 seed)
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index ee99a9f5dfd3..9e22085231b3 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1616,7 +1616,7 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1616 err = logfs_write_buf(inode, page, flags); 1616 err = logfs_write_buf(inode, page, flags);
1617 if (!err && shrink_level(gc_level) == 0) { 1617 if (!err && shrink_level(gc_level) == 0) {
1618 /* Rewrite cannot mark the inode dirty but has to 1618 /* Rewrite cannot mark the inode dirty but has to
1619 * write it immediatly. 1619 * write it immediately.
1620 * Q: Can't we just create an alias for the inode 1620 * Q: Can't we just create an alias for the inode
1621 * instead? And if not, why not? 1621 * instead? And if not, why not?
1622 */ 1622 */
diff --git a/fs/mbcache.c b/fs/mbcache.c
index a25444ab2baf..2f174be06555 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -542,7 +542,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
542 * mb_cache_entry_find_first() 542 * mb_cache_entry_find_first()
543 * 543 *
544 * Find the first cache entry on a given device with a certain key in 544 * Find the first cache entry on a given device with a certain key in
545 * an additional index. Additonal matches can be found with 545 * an additional index. Additional matches can be found with
546 * mb_cache_entry_find_next(). Returns NULL if no match was found. The 546 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
547 * returned cache entry is locked for shared access ("multiple readers"). 547 * returned cache entry is locked for shared access ("multiple readers").
548 * 548 *
diff --git a/fs/namei.c b/fs/namei.c
index 3cb616d38d9c..e6cd6113872c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -70,7 +70,7 @@
70 * name indicated by the symlink. The old code always complained that the 70 * name indicated by the symlink. The old code always complained that the
71 * name already exists, due to not following the symlink even if its target 71 * name already exists, due to not following the symlink even if its target
72 * is nonexistent. The new semantics affects also mknod() and link() when 72 * is nonexistent. The new semantics affects also mknod() and link() when
73 * the name is a symlink pointing to a non-existant name. 73 * the name is a symlink pointing to a non-existent name.
74 * 74 *
75 * I don't know which semantics is the right one, since I have no access 75 * I don't know which semantics is the right one, since I have no access
76 * to standards. But I found by trial that HP-UX 9.0 has the full "new" 76 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 00a1d1c3d3a4..0250e4ce4893 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -596,7 +596,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
596/* server->priv.data = NULL; */ 596/* server->priv.data = NULL; */
597 597
598 server->m = data; 598 server->m = data;
599 /* Althought anything producing this is buggy, it happens 599 /* Although anything producing this is buggy, it happens
600 now because of PATH_MAX changes.. */ 600 now because of PATH_MAX changes.. */
601 if (server->m.time_out < 1) { 601 if (server->m.time_out < 1) {
602 server->m.time_out = 10; 602 server->m.time_out = 10;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 14e0f9371d14..00ecf62ce7c1 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -241,7 +241,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
241 241
242 args->cbl_layout_type = ntohl(*p++); 242 args->cbl_layout_type = ntohl(*p++);
243 /* Depite the spec's xdr, iomode really belongs in the FILE switch, 243 /* Depite the spec's xdr, iomode really belongs in the FILE switch,
244 * as it is unuseable and ignored with the other types. 244 * as it is unusable and ignored with the other types.
245 */ 245 */
246 iomode = ntohl(*p++); 246 iomode = ntohl(*p++);
247 args->cbl_layoutchanged = ntohl(*p++); 247 args->cbl_layoutchanged = ntohl(*p++);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 3ac5bd695e5e..2f093ed16980 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -301,7 +301,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
301 * disk, but it retrieves and clears ctx->error after synching, despite 301 * disk, but it retrieves and clears ctx->error after synching, despite
302 * the two being set at the same time in nfs_context_set_write_error(). 302 * the two being set at the same time in nfs_context_set_write_error().
303 * This is because the former is used to notify the _next_ call to 303 * This is because the former is used to notify the _next_ call to
304 * nfs_file_write() that a write error occured, and hence cause it to 304 * nfs_file_write() that a write error occurred, and hence cause it to
305 * fall back to doing a synchronous write. 305 * fall back to doing a synchronous write.
306 */ 306 */
307static int 307static int
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 9166fcb66da2..89fc160fd5b0 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -148,67 +148,64 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors,
148 return pseudoflavor; 148 return pseudoflavor;
149} 149}
150 150
151static rpc_authflavor_t nfs_negotiate_security(const struct dentry *parent, const struct dentry *dentry) 151static int nfs_negotiate_security(const struct dentry *parent,
152 const struct dentry *dentry,
153 rpc_authflavor_t *flavor)
152{ 154{
153 int status = 0;
154 struct page *page; 155 struct page *page;
155 struct nfs4_secinfo_flavors *flavors; 156 struct nfs4_secinfo_flavors *flavors;
156 int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); 157 int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
157 rpc_authflavor_t flavor = RPC_AUTH_UNIX; 158 int ret = -EPERM;
158 159
159 secinfo = NFS_PROTO(parent->d_inode)->secinfo; 160 secinfo = NFS_PROTO(parent->d_inode)->secinfo;
160 if (secinfo != NULL) { 161 if (secinfo != NULL) {
161 page = alloc_page(GFP_KERNEL); 162 page = alloc_page(GFP_KERNEL);
162 if (!page) { 163 if (!page) {
163 status = -ENOMEM; 164 ret = -ENOMEM;
164 goto out; 165 goto out;
165 } 166 }
166 flavors = page_address(page); 167 flavors = page_address(page);
167 status = secinfo(parent->d_inode, &dentry->d_name, flavors); 168 ret = secinfo(parent->d_inode, &dentry->d_name, flavors);
168 flavor = nfs_find_best_sec(flavors, dentry->d_inode); 169 *flavor = nfs_find_best_sec(flavors, dentry->d_inode);
169 put_page(page); 170 put_page(page);
170 } 171 }
171 172
172 return flavor;
173
174out: 173out:
175 status = -ENOMEM; 174 return ret;
176 return status;
177} 175}
178 176
179static rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent, 177static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
180 struct dentry *dentry, struct path *path, 178 struct dentry *dentry, struct path *path,
181 struct nfs_fh *fh, struct nfs_fattr *fattr) 179 struct nfs_fh *fh, struct nfs_fattr *fattr,
180 rpc_authflavor_t *flavor)
182{ 181{
183 rpc_authflavor_t flavor;
184 struct rpc_clnt *clone; 182 struct rpc_clnt *clone;
185 struct rpc_auth *auth; 183 struct rpc_auth *auth;
186 int err; 184 int err;
187 185
188 flavor = nfs_negotiate_security(parent, path->dentry); 186 err = nfs_negotiate_security(parent, path->dentry, flavor);
189 if (flavor < 0) 187 if (err < 0)
190 goto out; 188 goto out;
191 clone = rpc_clone_client(server->client); 189 clone = rpc_clone_client(server->client);
192 auth = rpcauth_create(flavor, clone); 190 auth = rpcauth_create(*flavor, clone);
193 if (!auth) { 191 if (!auth) {
194 flavor = -EIO; 192 err = -EIO;
195 goto out_shutdown; 193 goto out_shutdown;
196 } 194 }
197 err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, 195 err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
198 &path->dentry->d_name, 196 &path->dentry->d_name,
199 fh, fattr); 197 fh, fattr);
200 if (err < 0)
201 flavor = err;
202out_shutdown: 198out_shutdown:
203 rpc_shutdown_client(clone); 199 rpc_shutdown_client(clone);
204out: 200out:
205 return flavor; 201 return err;
206} 202}
207#else /* CONFIG_NFS_V4 */ 203#else /* CONFIG_NFS_V4 */
208static inline rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, 204static inline int nfs_lookup_with_sec(struct nfs_server *server,
209 struct dentry *parent, struct dentry *dentry, 205 struct dentry *parent, struct dentry *dentry,
210 struct path *path, struct nfs_fh *fh, 206 struct path *path, struct nfs_fh *fh,
211 struct nfs_fattr *fattr) 207 struct nfs_fattr *fattr,
208 rpc_authflavor_t *flavor)
212{ 209{
213 return -EPERM; 210 return -EPERM;
214} 211}
@@ -234,7 +231,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
234 struct nfs_fh *fh = NULL; 231 struct nfs_fh *fh = NULL;
235 struct nfs_fattr *fattr = NULL; 232 struct nfs_fattr *fattr = NULL;
236 int err; 233 int err;
237 rpc_authflavor_t flavor = 1; 234 rpc_authflavor_t flavor = RPC_AUTH_UNIX;
238 235
239 dprintk("--> nfs_d_automount()\n"); 236 dprintk("--> nfs_d_automount()\n");
240 237
@@ -255,13 +252,8 @@ struct vfsmount *nfs_d_automount(struct path *path)
255 err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode, 252 err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode,
256 &path->dentry->d_name, 253 &path->dentry->d_name,
257 fh, fattr); 254 fh, fattr);
258 if (err == -EPERM) { 255 if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL)
259 flavor = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr); 256 err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor);
260 if (flavor < 0)
261 err = flavor;
262 else
263 err = 0;
264 }
265 dput(parent); 257 dput(parent);
266 if (err != 0) { 258 if (err != 0) {
267 mnt = ERR_PTR(err); 259 mnt = ERR_PTR(err);
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 085a354e0f08..7c44579f5832 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,7 +33,7 @@
33#include "pnfs.h" 33#include "pnfs.h"
34 34
35/* 35/*
36 * Field testing shows we need to support upto 4096 stripe indices. 36 * Field testing shows we need to support up to 4096 stripe indices.
37 * We store each index as a u8 (u32 on the wire) to keep the memory footprint 37 * We store each index as a u8 (u32 on the wire) to keep the memory footprint
38 * reasonable. This in turn means we support a maximum of 256 38 * reasonable. This in turn means we support a maximum of 256
39 * RFC 5661 multipath_list4 structures. 39 * RFC 5661 multipath_list4 structures.
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index dfd1e6d7e6c3..9bf41eab3e46 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2204,8 +2204,6 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
2204 goto out; 2204 goto out;
2205 } 2205 }
2206 ret = nfs4_lookup_root(server, fhandle, info); 2206 ret = nfs4_lookup_root(server, fhandle, info);
2207 if (ret < 0)
2208 ret = -EAGAIN;
2209out: 2207out:
2210 return ret; 2208 return ret;
2211} 2209}
@@ -2226,7 +2224,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2226 2224
2227 for (i = 0; i < len; i++) { 2225 for (i = 0; i < len; i++) {
2228 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2226 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2229 if (status == 0) 2227 if (status != -EPERM)
2230 break; 2228 break;
2231 } 2229 }
2232 if (status == 0) 2230 if (status == 0)
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index ec0f277be7f5..6940439bd609 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -173,7 +173,7 @@ xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
173 return -EINVAL; 173 return -EINVAL;
174 break; 174 break;
175 case ACL_MASK: 175 case ACL_MASK:
176 /* Solaris sometimes sets additonal bits in the mask */ 176 /* Solaris sometimes sets additional bits in the mask */
177 entry->e_perm &= S_IRWXO; 177 entry->e_perm &= S_IRWXO;
178 break; 178 break;
179 default: 179 default:
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 0c6d81670137..7c831a2731fa 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
38 exp_readlock(); 38 exp_readlock();
39 nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); 39 nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
40 fh_put(&fh); 40 fh_put(&fh);
41 rqstp->rq_client = NULL;
42 exp_readunlock(); 41 exp_readunlock();
43 /* We return nlm error codes as nlm doesn't know 42 /* We return nlm error codes as nlm doesn't know
44 * about nfsd, but nfsd does know about nlm.. 43 * about nfsd, but nfsd does know about nlm..
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7e84a852cdae..ad48faca20fc 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -702,7 +702,7 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
702 *p++ = htonl(resp->eof); 702 *p++ = htonl(resp->eof);
703 *p++ = htonl(resp->count); /* xdr opaque count */ 703 *p++ = htonl(resp->count); /* xdr opaque count */
704 xdr_ressize_check(rqstp, p); 704 xdr_ressize_check(rqstp, p);
705 /* now update rqstp->rq_res to reflect data aswell */ 705 /* now update rqstp->rq_res to reflect data as well */
706 rqstp->rq_res.page_len = resp->count; 706 rqstp->rq_res.page_len = resp->count;
707 if (resp->count & 3) { 707 if (resp->count & 3) {
708 /* need to pad the tail */ 708 /* need to pad the tail */
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fbde6f79922e..aa309aa93fe8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
397 397
398static void free_generic_stateid(struct nfs4_stateid *stp) 398static void free_generic_stateid(struct nfs4_stateid *stp)
399{ 399{
400 int oflag = nfs4_access_bmap_to_omode(stp); 400 int oflag;
401 401
402 nfs4_file_put_access(stp->st_file, oflag); 402 if (stp->st_access_bmap) {
403 put_nfs4_file(stp->st_file); 403 oflag = nfs4_access_bmap_to_omode(stp);
404 nfs4_file_put_access(stp->st_file, oflag);
405 put_nfs4_file(stp->st_file);
406 }
404 kmem_cache_free(stateid_slab, stp); 407 kmem_cache_free(stateid_slab, stp);
405} 408}
406 409
@@ -3055,7 +3058,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3055 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 3058 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3056 return nfs_ok; 3059 return nfs_ok;
3057 else if (locks_in_grace()) { 3060 else if (locks_in_grace()) {
3058 /* Answer in remaining cases depends on existance of 3061 /* Answer in remaining cases depends on existence of
3059 * conflicting state; so we must wait out the grace period. */ 3062 * conflicting state; so we must wait out the grace period. */
3060 return nfserr_grace; 3063 return nfserr_grace;
3061 } else if (flags & WR_STATE) 3064 } else if (flags & WR_STATE)
@@ -3675,7 +3678,7 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
3675/* 3678/*
3676 * Alloc a lock owner structure. 3679 * Alloc a lock owner structure.
3677 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 3680 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
3678 * occured. 3681 * occurred.
3679 * 3682 *
3680 * strhashval = lock_ownerstr_hashval 3683 * strhashval = lock_ownerstr_hashval
3681 */ 3684 */
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 4ce005dbf3e6..65ec595e2226 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -451,7 +451,7 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
451 *p++ = htonl(resp->count); 451 *p++ = htonl(resp->count);
452 xdr_ressize_check(rqstp, p); 452 xdr_ressize_check(rqstp, p);
453 453
454 /* now update rqstp->rq_res to reflect data aswell */ 454 /* now update rqstp->rq_res to reflect data as well */
455 rqstp->rq_res.page_len = resp->count; 455 rqstp->rq_res.page_len = resp->count;
456 if (resp->count & 3) { 456 if (resp->count & 3) {
457 /* need to pad the tail */ 457 /* need to pad the tail */
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 93589fccdd97..397e73258631 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
72 /* 72 /*
73 * check to see if the page is mapped already (no holes) 73 * check to see if the page is mapped already (no holes)
74 */ 74 */
75 if (PageMappedToDisk(page)) { 75 if (PageMappedToDisk(page))
76 unlock_page(page);
77 goto mapped; 76 goto mapped;
78 } 77
79 if (page_has_buffers(page)) { 78 if (page_has_buffers(page)) {
80 struct buffer_head *bh, *head; 79 struct buffer_head *bh, *head;
81 int fully_mapped = 1; 80 int fully_mapped = 1;
@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
90 89
91 if (fully_mapped) { 90 if (fully_mapped) {
92 SetPageMappedToDisk(page); 91 SetPageMappedToDisk(page);
93 unlock_page(page);
94 goto mapped; 92 goto mapped;
95 } 93 }
96 } 94 }
@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
105 return VM_FAULT_SIGBUS; 103 return VM_FAULT_SIGBUS;
106 104
107 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); 105 ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
108 if (unlikely(ret)) { 106 if (ret != VM_FAULT_LOCKED) {
109 nilfs_transaction_abort(inode->i_sb); 107 nilfs_transaction_abort(inode->i_sb);
110 return ret; 108 return ret;
111 } 109 }
110 nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
112 nilfs_transaction_commit(inode->i_sb); 111 nilfs_transaction_commit(inode->i_sb);
113 112
114 mapped: 113 mapped:
115 SetPageChecked(page); 114 SetPageChecked(page);
116 wait_on_page_writeback(page); 115 wait_on_page_writeback(page);
117 return 0; 116 return VM_FAULT_LOCKED;
118} 117}
119 118
120static const struct vm_operations_struct nilfs_file_vm_ops = { 119static const struct vm_operations_struct nilfs_file_vm_ops = {
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 856e8e4e0b74..a8dd344303cb 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -114,19 +114,19 @@ enum {
114 * Macros to check inode numbers 114 * Macros to check inode numbers
115 */ 115 */
116#define NILFS_MDT_INO_BITS \ 116#define NILFS_MDT_INO_BITS \
117 ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \ 117 ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
118 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \ 118 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
119 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO)) 119 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
120 120
121#define NILFS_SYS_INO_BITS \ 121#define NILFS_SYS_INO_BITS \
122 ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS) 122 ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
123 123
124#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino) 124#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
125 125
126#define NILFS_MDT_INODE(sb, ino) \ 126#define NILFS_MDT_INODE(sb, ino) \
127 ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino)))) 127 ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
128#define NILFS_VALID_INODE(sb, ino) \ 128#define NILFS_VALID_INODE(sb, ino) \
129 ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino)))) 129 ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
130 130
131/** 131/**
132 * struct nilfs_transaction_info: context information for synchronization 132 * struct nilfs_transaction_info: context information for synchronization
@@ -285,7 +285,7 @@ extern void nilfs_destroy_inode(struct inode *);
285extern void nilfs_error(struct super_block *, const char *, const char *, ...) 285extern void nilfs_error(struct super_block *, const char *, const char *, ...)
286 __attribute__ ((format (printf, 3, 4))); 286 __attribute__ ((format (printf, 3, 4)));
287extern void nilfs_warning(struct super_block *, const char *, const char *, ...) 287extern void nilfs_warning(struct super_block *, const char *, const char *, ...)
288 __attribute__ ((format (printf, 3, 4))); 288 __attribute__ ((format (printf, 3, 4)));
289extern struct nilfs_super_block * 289extern struct nilfs_super_block *
290nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); 290nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
291extern int nilfs_store_magic_and_option(struct super_block *, 291extern int nilfs_store_magic_and_option(struct super_block *,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 4d2a1ee0eb47..1168059c7efd 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -500,7 +500,7 @@ void nilfs_mapping_init(struct address_space *mapping,
500 mapping_set_gfp_mask(mapping, GFP_NOFS); 500 mapping_set_gfp_mask(mapping, GFP_NOFS);
501 mapping->assoc_mapping = NULL; 501 mapping->assoc_mapping = NULL;
502 mapping->backing_dev_info = bdi; 502 mapping->backing_dev_info = bdi;
503 mapping->a_ops = NULL; 503 mapping->a_ops = &empty_aops;
504} 504}
505 505
506/* 506/*
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 6b1305dc26c0..9fde1c00a296 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -164,7 +164,7 @@ static int process_access_response(struct fsnotify_group *group,
164 fd, response); 164 fd, response);
165 /* 165 /*
166 * make sure the response is valid, if invalid we do nothing and either 166 * make sure the response is valid, if invalid we do nothing and either
167 * userspace can send a valid responce or we will clean it up after the 167 * userspace can send a valid response or we will clean it up after the
168 * timeout 168 * timeout
169 */ 169 */
170 switch (response) { 170 switch (response) {
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index a91b69a6a291..e3cbd746f64a 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -194,10 +194,11 @@ static int idr_callback(int id, void *p, void *data)
194 194
195static void inotify_free_group_priv(struct fsnotify_group *group) 195static void inotify_free_group_priv(struct fsnotify_group *group)
196{ 196{
197 /* ideally the idr is empty and we won't hit the BUG in teh callback */ 197 /* ideally the idr is empty and we won't hit the BUG in the callback */
198 idr_for_each(&group->inotify_data.idr, idr_callback, group); 198 idr_for_each(&group->inotify_data.idr, idr_callback, group);
199 idr_remove_all(&group->inotify_data.idr); 199 idr_remove_all(&group->inotify_data.idr);
200 idr_destroy(&group->inotify_data.idr); 200 idr_destroy(&group->inotify_data.idr);
201 atomic_dec(&group->inotify_data.user->inotify_devs);
201 free_uid(group->inotify_data.user); 202 free_uid(group->inotify_data.user);
202} 203}
203 204
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index bd46e7c8a0ef..8445fbc8985c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on)
290static int inotify_release(struct inode *ignored, struct file *file) 290static int inotify_release(struct inode *ignored, struct file *file)
291{ 291{
292 struct fsnotify_group *group = file->private_data; 292 struct fsnotify_group *group = file->private_data;
293 struct user_struct *user = group->inotify_data.user;
294 293
295 pr_debug("%s: group=%p\n", __func__, group); 294 pr_debug("%s: group=%p\n", __func__, group);
296 295
@@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file)
299 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 298 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
300 fsnotify_put_group(group); 299 fsnotify_put_group(group);
301 300
302 atomic_dec(&user->inotify_devs);
303
304 return 0; 301 return 0;
305} 302}
306 303
@@ -697,7 +694,7 @@ retry:
697 return ret; 694 return ret;
698} 695}
699 696
700static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) 697static struct fsnotify_group *inotify_new_group(unsigned int max_events)
701{ 698{
702 struct fsnotify_group *group; 699 struct fsnotify_group *group;
703 700
@@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
710 spin_lock_init(&group->inotify_data.idr_lock); 707 spin_lock_init(&group->inotify_data.idr_lock);
711 idr_init(&group->inotify_data.idr); 708 idr_init(&group->inotify_data.idr);
712 group->inotify_data.last_wd = 0; 709 group->inotify_data.last_wd = 0;
713 group->inotify_data.user = user;
714 group->inotify_data.fa = NULL; 710 group->inotify_data.fa = NULL;
711 group->inotify_data.user = get_current_user();
712
713 if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
714 inotify_max_user_instances) {
715 fsnotify_put_group(group);
716 return ERR_PTR(-EMFILE);
717 }
715 718
716 return group; 719 return group;
717} 720}
@@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
721SYSCALL_DEFINE1(inotify_init1, int, flags) 724SYSCALL_DEFINE1(inotify_init1, int, flags)
722{ 725{
723 struct fsnotify_group *group; 726 struct fsnotify_group *group;
724 struct user_struct *user;
725 int ret; 727 int ret;
726 728
727 /* Check the IN_* constants for consistency. */ 729 /* Check the IN_* constants for consistency. */
@@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
731 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 733 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
732 return -EINVAL; 734 return -EINVAL;
733 735
734 user = get_current_user();
735 if (unlikely(atomic_read(&user->inotify_devs) >=
736 inotify_max_user_instances)) {
737 ret = -EMFILE;
738 goto out_free_uid;
739 }
740
741 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 736 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
742 group = inotify_new_group(user, inotify_max_queued_events); 737 group = inotify_new_group(inotify_max_queued_events);
743 if (IS_ERR(group)) { 738 if (IS_ERR(group))
744 ret = PTR_ERR(group); 739 return PTR_ERR(group);
745 goto out_free_uid;
746 }
747
748 atomic_inc(&user->inotify_devs);
749 740
750 ret = anon_inode_getfd("inotify", &inotify_fops, group, 741 ret = anon_inode_getfd("inotify", &inotify_fops, group,
751 O_RDONLY | flags); 742 O_RDONLY | flags);
752 if (ret >= 0) 743 if (ret < 0)
753 return ret; 744 fsnotify_put_group(group);
754 745
755 fsnotify_put_group(group);
756 atomic_dec(&user->inotify_devs);
757out_free_uid:
758 free_uid(user);
759 return ret; 746 return ret;
760} 747}
761 748
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 50c00856f730..252ab1f6452b 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -24,7 +24,7 @@
24 * referencing this object. The object typically will live inside the kernel 24 * referencing this object. The object typically will live inside the kernel
25 * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task 25 * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
26 * which can find this object holding the appropriete locks, can take a reference 26 * which can find this object holding the appropriete locks, can take a reference
27 * and the object itself is guarenteed to survive until the reference is dropped. 27 * and the object itself is guaranteed to survive until the reference is dropped.
28 * 28 *
29 * LOCKING: 29 * LOCKING:
30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST 30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index f5094ee224c1..f14fde2b03d6 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -197,7 +197,7 @@ err_out:
197 } else if (ctx_needs_reset) { 197 } else if (ctx_needs_reset) {
198 /* 198 /*
199 * If there is no attribute list, restoring the search context 199 * If there is no attribute list, restoring the search context
200 * is acomplished simply by copying the saved context back over 200 * is accomplished simply by copying the saved context back over
201 * the caller supplied context. If there is an attribute list, 201 * the caller supplied context. If there is an attribute list,
202 * things are more complicated as we need to deal with mapping 202 * things are more complicated as we need to deal with mapping
203 * of mft records and resulting potential changes in pointers. 203 * of mft records and resulting potential changes in pointers.
@@ -1181,7 +1181,7 @@ not_found:
1181 * for, i.e. if one wants to add the attribute to the mft record this is the 1181 * for, i.e. if one wants to add the attribute to the mft record this is the
1182 * correct place to insert its attribute list entry into. 1182 * correct place to insert its attribute list entry into.
1183 * 1183 *
1184 * When -errno != -ENOENT, an error occured during the lookup. @ctx->attr is 1184 * When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
1185 * then undefined and in particular you should not rely on it not changing. 1185 * then undefined and in particular you should not rely on it not changing.
1186 */ 1186 */
1187int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name, 1187int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index ef9ed854255c..ee4144ce5d7c 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -501,7 +501,7 @@ int ntfs_read_compressed_block(struct page *page)
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> 501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 502 vol->cluster_size_bits;
503 /* 503 /*
504 * The first vcn after the last wanted vcn (minumum alignment is again 504 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 505 * PAGE_CACHE_SIZE.
506 */ 506 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) 507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 0b56c6b7ec01..c05d6dcf77a4 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -622,7 +622,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
622 */ 622 */
623 /* Everyone gets all permissions. */ 623 /* Everyone gets all permissions. */
624 vi->i_mode |= S_IRWXUGO; 624 vi->i_mode |= S_IRWXUGO;
625 /* If read-only, noone gets write permissions. */ 625 /* If read-only, no one gets write permissions. */
626 if (IS_RDONLY(vi)) 626 if (IS_RDONLY(vi))
627 vi->i_mode &= ~S_IWUGO; 627 vi->i_mode &= ~S_IWUGO;
628 if (m->flags & MFT_RECORD_IS_DIRECTORY) { 628 if (m->flags & MFT_RECORD_IS_DIRECTORY) {
@@ -2529,7 +2529,7 @@ retry_truncate:
2529 * specifies that the behaviour is unspecified thus we do not 2529 * specifies that the behaviour is unspecified thus we do not
2530 * have to do anything. This means that in our implementation 2530 * have to do anything. This means that in our implementation
2531 * in the rare case that the file is mmap()ped and a write 2531 * in the rare case that the file is mmap()ped and a write
2532 * occured into the mmap()ped region just beyond the file size 2532 * occurred into the mmap()ped region just beyond the file size
2533 * and writepage has not yet been called to write out the page 2533 * and writepage has not yet been called to write out the page
2534 * (which would clear the area beyond the file size) and we now 2534 * (which would clear the area beyond the file size) and we now
2535 * extend the file size to incorporate this dirty region 2535 * extend the file size to incorporate this dirty region
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 8b2549f672bf..faece7190866 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -286,7 +286,7 @@ typedef le16 MFT_RECORD_FLAGS;
286 * fragmented. Volume free space includes the empty part of the mft zone and 286 * fragmented. Volume free space includes the empty part of the mft zone and
287 * when the volume's free 88% are used up, the mft zone is shrunk by a factor 287 * when the volume's free 88% are used up, the mft zone is shrunk by a factor
288 * of 2, thus making more space available for more files/data. This process is 288 * of 2, thus making more space available for more files/data. This process is
289 * repeated everytime there is no more free space except for the mft zone until 289 * repeated every time there is no more free space except for the mft zone until
290 * there really is no more free space. 290 * there really is no more free space.
291 */ 291 */
292 292
@@ -1657,13 +1657,13 @@ typedef enum {
1657 * pointed to by the Owner field was provided by a defaulting mechanism 1657 * pointed to by the Owner field was provided by a defaulting mechanism
1658 * rather than explicitly provided by the original provider of the 1658 * rather than explicitly provided by the original provider of the
1659 * security descriptor. This may affect the treatment of the SID with 1659 * security descriptor. This may affect the treatment of the SID with
1660 * respect to inheritence of an owner. 1660 * respect to inheritance of an owner.
1661 * 1661 *
1662 * SE_GROUP_DEFAULTED - This boolean flag, when set, indicates that the SID in 1662 * SE_GROUP_DEFAULTED - This boolean flag, when set, indicates that the SID in
1663 * the Group field was provided by a defaulting mechanism rather than 1663 * the Group field was provided by a defaulting mechanism rather than
1664 * explicitly provided by the original provider of the security 1664 * explicitly provided by the original provider of the security
1665 * descriptor. This may affect the treatment of the SID with respect to 1665 * descriptor. This may affect the treatment of the SID with respect to
1666 * inheritence of a primary group. 1666 * inheritance of a primary group.
1667 * 1667 *
1668 * SE_DACL_PRESENT - This boolean flag, when set, indicates that the security 1668 * SE_DACL_PRESENT - This boolean flag, when set, indicates that the security
1669 * descriptor contains a discretionary ACL. If this flag is set and the 1669 * descriptor contains a discretionary ACL. If this flag is set and the
@@ -1674,7 +1674,7 @@ typedef enum {
1674 * pointed to by the Dacl field was provided by a defaulting mechanism 1674 * pointed to by the Dacl field was provided by a defaulting mechanism
1675 * rather than explicitly provided by the original provider of the 1675 * rather than explicitly provided by the original provider of the
1676 * security descriptor. This may affect the treatment of the ACL with 1676 * security descriptor. This may affect the treatment of the ACL with
1677 * respect to inheritence of an ACL. This flag is ignored if the 1677 * respect to inheritance of an ACL. This flag is ignored if the
1678 * DaclPresent flag is not set. 1678 * DaclPresent flag is not set.
1679 * 1679 *
1680 * SE_SACL_PRESENT - This boolean flag, when set, indicates that the security 1680 * SE_SACL_PRESENT - This boolean flag, when set, indicates that the security
@@ -1686,7 +1686,7 @@ typedef enum {
1686 * pointed to by the Sacl field was provided by a defaulting mechanism 1686 * pointed to by the Sacl field was provided by a defaulting mechanism
1687 * rather than explicitly provided by the original provider of the 1687 * rather than explicitly provided by the original provider of the
1688 * security descriptor. This may affect the treatment of the ACL with 1688 * security descriptor. This may affect the treatment of the ACL with
1689 * respect to inheritence of an ACL. This flag is ignored if the 1689 * respect to inheritance of an ACL. This flag is ignored if the
1690 * SaclPresent flag is not set. 1690 * SaclPresent flag is not set.
1691 * 1691 *
1692 * SE_SELF_RELATIVE - This boolean flag, when set, indicates that the security 1692 * SE_SELF_RELATIVE - This boolean flag, when set, indicates that the security
@@ -2283,7 +2283,7 @@ typedef struct {
2283 // the key_length is zero, then the vcn immediately 2283 // the key_length is zero, then the vcn immediately
2284 // follows the INDEX_ENTRY_HEADER. Regardless of 2284 // follows the INDEX_ENTRY_HEADER. Regardless of
2285 // key_length, the address of the 8-byte boundary 2285 // key_length, the address of the 8-byte boundary
2286 // alligned vcn of INDEX_ENTRY{_HEADER} *ie is given by 2286 // aligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
2287 // (char*)ie + le16_to_cpu(ie*)->length) - sizeof(VCN), 2287 // (char*)ie + le16_to_cpu(ie*)->length) - sizeof(VCN),
2288 // where sizeof(VCN) can be hardcoded as 8 if wanted. */ 2288 // where sizeof(VCN) can be hardcoded as 8 if wanted. */
2289} __attribute__ ((__packed__)) INDEX_ENTRY; 2289} __attribute__ ((__packed__)) INDEX_ENTRY;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 4dadcdf3d451..c71de292c5ad 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -669,7 +669,7 @@ err_out:
669 * of cases where we think that a volume is dirty when in fact it is clean. 669 * of cases where we think that a volume is dirty when in fact it is clean.
670 * This should only affect volumes that have not been shutdown cleanly but did 670 * This should only affect volumes that have not been shutdown cleanly but did
671 * not have any pending, non-check-pointed i/o, i.e. they were completely idle 671 * not have any pending, non-check-pointed i/o, i.e. they were completely idle
672 * at least for the five seconds preceeding the unclean shutdown. 672 * at least for the five seconds preceding the unclean shutdown.
673 * 673 *
674 * This function assumes that the $LogFile journal has already been consistency 674 * This function assumes that the $LogFile journal has already been consistency
675 * checked by a call to ntfs_check_logfile() and in particular if the $LogFile 675 * checked by a call to ntfs_check_logfile() and in particular if the $LogFile
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h
index b5a6f08bd35c..aa2b6ac3f0a4 100644
--- a/fs/ntfs/logfile.h
+++ b/fs/ntfs/logfile.h
@@ -222,7 +222,7 @@ typedef struct {
222/* 24*/ sle64 file_size; /* Usable byte size of the log file. If the 222/* 24*/ sle64 file_size; /* Usable byte size of the log file. If the
223 restart_area_offset + the offset of the 223 restart_area_offset + the offset of the
224 file_size are > 510 then corruption has 224 file_size are > 510 then corruption has
225 occured. This is the very first check when 225 occurred. This is the very first check when
226 starting with the restart_area as if it 226 starting with the restart_area as if it
227 fails it means that some of the above values 227 fails it means that some of the above values
228 will be corrupted by the multi sector 228 will be corrupted by the multi sector
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 326e7475a22a..382857f9c7db 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -73,7 +73,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
73 if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + 73 if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
74 vol->mft_record_size) { 74 vol->mft_record_size) {
75 page = ERR_PTR(-ENOENT); 75 page = ERR_PTR(-ENOENT);
76 ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, " 76 ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
77 "which is beyond the end of the mft. " 77 "which is beyond the end of the mft. "
78 "This is probably a bug in the ntfs " 78 "This is probably a bug in the ntfs "
79 "driver.", ni->mft_no); 79 "driver.", ni->mft_no);
@@ -1442,7 +1442,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
1442 // Note: It will need to be a special mft record and if none of 1442 // Note: It will need to be a special mft record and if none of
1443 // those are available it gets rather complicated... 1443 // those are available it gets rather complicated...
1444 ntfs_error(vol->sb, "Not enough space in this mft record to " 1444 ntfs_error(vol->sb, "Not enough space in this mft record to "
1445 "accomodate extended mft bitmap attribute " 1445 "accommodate extended mft bitmap attribute "
1446 "extent. Cannot handle this yet."); 1446 "extent. Cannot handle this yet.");
1447 ret = -EOPNOTSUPP; 1447 ret = -EOPNOTSUPP;
1448 goto undo_alloc; 1448 goto undo_alloc;
@@ -1879,7 +1879,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
1879 // and we would then need to update all references to this mft 1879 // and we would then need to update all references to this mft
1880 // record appropriately. This is rather complicated... 1880 // record appropriately. This is rather complicated...
1881 ntfs_error(vol->sb, "Not enough space in this mft record to " 1881 ntfs_error(vol->sb, "Not enough space in this mft record to "
1882 "accomodate extended mft data attribute " 1882 "accommodate extended mft data attribute "
1883 "extent. Cannot handle this yet."); 1883 "extent. Cannot handle this yet.");
1884 ret = -EOPNOTSUPP; 1884 ret = -EOPNOTSUPP;
1885 goto undo_alloc; 1885 goto undo_alloc;
@@ -2357,7 +2357,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
2357 } 2357 }
2358#ifdef DEBUG 2358#ifdef DEBUG
2359 read_lock_irqsave(&mftbmp_ni->size_lock, flags); 2359 read_lock_irqsave(&mftbmp_ni->size_lock, flags);
2360 ntfs_debug("Status of mftbmp after initialized extention: " 2360 ntfs_debug("Status of mftbmp after initialized extension: "
2361 "allocated_size 0x%llx, data_size 0x%llx, " 2361 "allocated_size 0x%llx, data_size 0x%llx, "
2362 "initialized_size 0x%llx.", 2362 "initialized_size 0x%llx.",
2363 (long long)mftbmp_ni->allocated_size, 2363 (long long)mftbmp_ni->allocated_size,
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 56a9a6d25a2a..eac7d6788a10 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -1243,7 +1243,7 @@ err_out:
1243 * write. 1243 * write.
1244 * 1244 *
1245 * This is used when building the mapping pairs array of a runlist to compress 1245 * This is used when building the mapping pairs array of a runlist to compress
1246 * a given logical cluster number (lcn) or a specific run length to the minumum 1246 * a given logical cluster number (lcn) or a specific run length to the minimum
1247 * size possible. 1247 * size possible.
1248 * 1248 *
1249 * Return the number of bytes written on success. On error, i.e. the 1249 * Return the number of bytes written on success. On error, i.e. the
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29099a07b9fe..b52706da4645 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -458,7 +458,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
458 * the volume on boot and updates them. 458 * the volume on boot and updates them.
459 * 459 *
460 * When remounting read-only, mark the volume clean if no volume errors 460 * When remounting read-only, mark the volume clean if no volume errors
461 * have occured. 461 * have occurred.
462 */ 462 */
463 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 463 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
464 static const char *es = ". Cannot remount read-write."; 464 static const char *es = ". Cannot remount read-write.";
@@ -1269,7 +1269,7 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
1269 "hibernated on the volume."); 1269 "hibernated on the volume.");
1270 return 0; 1270 return 0;
1271 } 1271 }
1272 /* A real error occured. */ 1272 /* A real error occurred. */
1273 ntfs_error(vol->sb, "Failed to find inode number for " 1273 ntfs_error(vol->sb, "Failed to find inode number for "
1274 "hiberfil.sys."); 1274 "hiberfil.sys.");
1275 return ret; 1275 return ret;
@@ -1370,7 +1370,7 @@ static bool load_and_init_quota(ntfs_volume *vol)
1370 NVolSetQuotaOutOfDate(vol); 1370 NVolSetQuotaOutOfDate(vol);
1371 return true; 1371 return true;
1372 } 1372 }
1373 /* A real error occured. */ 1373 /* A real error occurred. */
1374 ntfs_error(vol->sb, "Failed to find inode number for $Quota."); 1374 ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
1375 return false; 1375 return false;
1376 } 1376 }
@@ -1454,7 +1454,7 @@ not_enabled:
1454 NVolSetUsnJrnlStamped(vol); 1454 NVolSetUsnJrnlStamped(vol);
1455 return true; 1455 return true;
1456 } 1456 }
1457 /* A real error occured. */ 1457 /* A real error occurred. */
1458 ntfs_error(vol->sb, "Failed to find inode number for " 1458 ntfs_error(vol->sb, "Failed to find inode number for "
1459 "$UsnJrnl."); 1459 "$UsnJrnl.");
1460 return false; 1460 return false;
@@ -2292,7 +2292,7 @@ static void ntfs_put_super(struct super_block *sb)
2292 ntfs_commit_inode(vol->mft_ino); 2292 ntfs_commit_inode(vol->mft_ino);
2293 2293
2294 /* 2294 /*
2295 * If a read-write mount and no volume errors have occured, mark the 2295 * If a read-write mount and no volume errors have occurred, mark the
2296 * volume clean. Also, re-commit all affected inodes. 2296 * volume clean. Also, re-commit all affected inodes.
2297 */ 2297 */
2298 if (!(sb->s_flags & MS_RDONLY)) { 2298 if (!(sb->s_flags & MS_RDONLY)) {
@@ -2496,7 +2496,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2496 if (vol->nr_clusters & 63) 2496 if (vol->nr_clusters & 63)
2497 nr_free += 64 - (vol->nr_clusters & 63); 2497 nr_free += 64 - (vol->nr_clusters & 63);
2498 up_read(&vol->lcnbmp_lock); 2498 up_read(&vol->lcnbmp_lock);
2499 /* If errors occured we may well have gone below zero, fix this. */ 2499 /* If errors occurred we may well have gone below zero, fix this. */
2500 if (nr_free < 0) 2500 if (nr_free < 0)
2501 nr_free = 0; 2501 nr_free = 0;
2502 ntfs_debug("Exiting."); 2502 ntfs_debug("Exiting.");
@@ -2561,7 +2561,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2561 } 2561 }
2562 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2562 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
2563 index - 1); 2563 index - 1);
2564 /* If errors occured we may well have gone below zero, fix this. */ 2564 /* If errors occurred we may well have gone below zero, fix this. */
2565 if (nr_free < 0) 2565 if (nr_free < 0)
2566 nr_free = 0; 2566 nr_free = 0;
2567 ntfs_debug("Exiting."); 2567 ntfs_debug("Exiting.");
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index b27a0d86f8c5..48aa9c7401c7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4519,7 +4519,7 @@ set_tail_append:
4519} 4519}
4520 4520
4521/* 4521/*
4522 * Helper function called at the begining of an insert. 4522 * Helper function called at the beginning of an insert.
4523 * 4523 *
4524 * This computes a few things that are commonly used in the process of 4524 * This computes a few things that are commonly used in the process of
4525 * inserting into the btree: 4525 * inserting into the btree:
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index eceb456037c1..75cf3ad987a6 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -71,7 +71,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
71 71
72/* 72/*
73 * Using a named enum representing lock types in terms of #N bit stored in 73 * Using a named enum representing lock types in terms of #N bit stored in
74 * iocb->private, which is going to be used for communication bewteen 74 * iocb->private, which is going to be used for communication between
75 * ocfs2_dio_end_io() and ocfs2_file_aio_write/read(). 75 * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
76 */ 76 */
77enum ocfs2_iocb_lock_bits { 77enum ocfs2_iocb_lock_bits {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 2461eb3272ed..643720209a98 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2275,7 +2275,7 @@ void o2hb_free_hb_set(struct config_group *group)
2275 kfree(hs); 2275 kfree(hs);
2276} 2276}
2277 2277
2278/* hb callback registration and issueing */ 2278/* hb callback registration and issuing */
2279 2279
2280static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type) 2280static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
2281{ 2281{
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index a87366750f23..8f9cea1597af 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -89,7 +89,7 @@ static void o2quo_fence_self(void)
89 }; 89 };
90} 90}
91 91
92/* Indicate that a timeout occured on a hearbeat region write. The 92/* Indicate that a timeout occurred on a hearbeat region write. The
93 * other nodes in the cluster may consider us dead at that time so we 93 * other nodes in the cluster may consider us dead at that time so we
94 * want to "fence" ourselves so that we don't scribble on the disk 94 * want to "fence" ourselves so that we don't scribble on the disk
95 * after they think they've recovered us. This can't solve all 95 * after they think they've recovered us. This can't solve all
@@ -261,7 +261,7 @@ void o2quo_hb_still_up(u8 node)
261 spin_unlock(&qs->qs_lock); 261 spin_unlock(&qs->qs_lock);
262} 262}
263 263
264/* This is analagous to hb_up. as a node's connection comes up we delay the 264/* This is analogous to hb_up. as a node's connection comes up we delay the
265 * quorum decision until we see it heartbeating. the hold will be droped in 265 * quorum decision until we see it heartbeating. the hold will be droped in
266 * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if 266 * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
267 * it's already heartbeating we we might be dropping a hold that conn_up got. 267 * it's already heartbeating we we might be dropping a hold that conn_up got.
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ee04ff5ee603..db5ee4b4f47a 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -565,7 +565,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
565 * the work queue actually being up. */ 565 * the work queue actually being up. */
566 if (!valid && o2net_wq) { 566 if (!valid && o2net_wq) {
567 unsigned long delay; 567 unsigned long delay;
568 /* delay if we're withing a RECONNECT_DELAY of the 568 /* delay if we're within a RECONNECT_DELAY of the
569 * last attempt */ 569 * last attempt */
570 delay = (nn->nn_last_connect_attempt + 570 delay = (nn->nn_last_connect_attempt +
571 msecs_to_jiffies(o2net_reconnect_delay())) 571 msecs_to_jiffies(o2net_reconnect_delay()))
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9d67610dfc74..fede57ed005f 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -808,7 +808,7 @@ lookup:
808 dlm_mle_detach_hb_events(dlm, mle); 808 dlm_mle_detach_hb_events(dlm, mle);
809 dlm_put_mle(mle); 809 dlm_put_mle(mle);
810 mle = NULL; 810 mle = NULL;
811 /* this is lame, but we cant wait on either 811 /* this is lame, but we can't wait on either
812 * the mle or lockres waitqueue here */ 812 * the mle or lockres waitqueue here */
813 if (mig) 813 if (mig)
814 msleep(100); 814 msleep(100);
@@ -843,7 +843,7 @@ lookup:
843 843
844 /* finally add the lockres to its hash bucket */ 844 /* finally add the lockres to its hash bucket */
845 __dlm_insert_lockres(dlm, res); 845 __dlm_insert_lockres(dlm, res);
846 /* since this lockres is new it doesnt not require the spinlock */ 846 /* since this lockres is new it doesn't not require the spinlock */
847 dlm_lockres_grab_inflight_ref_new(dlm, res); 847 dlm_lockres_grab_inflight_ref_new(dlm, res);
848 848
849 /* if this node does not become the master make sure to drop 849 /* if this node does not become the master make sure to drop
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 177d3a6c2a5f..b4c8bb6b8d28 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -835,7 +835,7 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
835 835
836 /* If we have allowd wipe of this inode for another node, it 836 /* If we have allowd wipe of this inode for another node, it
837 * will be marked here so we can safely skip it. Recovery will 837 * will be marked here so we can safely skip it. Recovery will
838 * cleanup any inodes we might inadvertantly skip here. */ 838 * cleanup any inodes we might inadvertently skip here. */
839 if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) 839 if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE)
840 goto bail_unlock; 840 goto bail_unlock;
841 841
@@ -917,7 +917,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
917 * the inode open lock in ocfs2_read_locked_inode(). When we 917 * the inode open lock in ocfs2_read_locked_inode(). When we
918 * get to ->delete_inode(), each node tries to convert it's 918 * get to ->delete_inode(), each node tries to convert it's
919 * lock to an exclusive. Trylocks are serialized by the inode 919 * lock to an exclusive. Trylocks are serialized by the inode
920 * meta data lock. If the upconvert suceeds, we know the inode 920 * meta data lock. If the upconvert succeeds, we know the inode
921 * is no longer live and can be deleted. 921 * is no longer live and can be deleted.
922 * 922 *
923 * Though we call this with the meta data lock held, the 923 * Though we call this with the meta data lock held, the
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index dcc2d9327150..b141a44605ca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1368,7 +1368,7 @@ skip_recovery:
1368 mlog_errno(status); 1368 mlog_errno(status);
1369 1369
1370 /* Now it is right time to recover quotas... We have to do this under 1370 /* Now it is right time to recover quotas... We have to do this under
1371 * superblock lock so that noone can start using the slot (and crash) 1371 * superblock lock so that no one can start using the slot (and crash)
1372 * before we recover it */ 1372 * before we recover it */
1373 for (i = 0; i < rm_quota_used; i++) { 1373 for (i = 0; i < rm_quota_used; i++) {
1374 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); 1374 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 6180da1e37e6..68cf2f6d3c6a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -215,7 +215,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
215 /* WARNING: This only kicks off a single 215 /* WARNING: This only kicks off a single
216 * checkpoint. If someone races you and adds more 216 * checkpoint. If someone races you and adds more
217 * metadata to the journal, you won't know, and will 217 * metadata to the journal, you won't know, and will
218 * wind up waiting *alot* longer than necessary. Right 218 * wind up waiting *a lot* longer than necessary. Right
219 * now we only use this in clear_inode so that's 219 * now we only use this in clear_inode so that's
220 * OK. */ 220 * OK. */
221 ocfs2_start_checkpoint(osb); 221 ocfs2_start_checkpoint(osb);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 28f2cc1080d8..e5d738cd9cc0 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -2128,7 +2128,7 @@ leave:
2128} 2128}
2129 2129
2130/** 2130/**
2131 * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly 2131 * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to receive a newly
2132 * allocated file. This is different from the typical 'add to orphan dir' 2132 * allocated file. This is different from the typical 'add to orphan dir'
2133 * operation in that the inode does not yet exist. This is a problem because 2133 * operation in that the inode does not yet exist. This is a problem because
2134 * the orphan dir stringifies the inode block number to come up with it's 2134 * the orphan dir stringifies the inode block number to come up with it's
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index bf2e7764920e..b68f87a83924 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -441,7 +441,7 @@ static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = {
441struct ocfs2_block_check { 441struct ocfs2_block_check {
442/*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */ 442/*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */
443 __le16 bc_ecc; /* Single-error-correction parity vector. 443 __le16 bc_ecc; /* Single-error-correction parity vector.
444 This is a simple Hamming code dependant 444 This is a simple Hamming code dependent
445 on the blocksize. OCFS2's maximum 445 on the blocksize. OCFS2's maximum
446 blocksize, 4K, requires 16 parity bits, 446 blocksize, 4K, requires 16 parity bits,
447 so we fit in __le16. */ 447 so we fit in __le16. */
@@ -750,7 +750,7 @@ struct ocfs2_dinode {
750 after an unclean 750 after an unclean
751 shutdown */ 751 shutdown */
752 } journal1; 752 } journal1;
753 } id1; /* Inode type dependant 1 */ 753 } id1; /* Inode type dependent 1 */
754/*C0*/ union { 754/*C0*/ union {
755 struct ocfs2_super_block i_super; 755 struct ocfs2_super_block i_super;
756 struct ocfs2_local_alloc i_lab; 756 struct ocfs2_local_alloc i_lab;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 279aef68025b..92fcd575775a 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -556,7 +556,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
556 spin_unlock(&dq_data_lock); 556 spin_unlock(&dq_data_lock);
557 err = ocfs2_qinfo_lock(info, freeing); 557 err = ocfs2_qinfo_lock(info, freeing);
558 if (err < 0) { 558 if (err < 0) {
559 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write" 559 mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
560 " (type=%d, id=%u)\n", dquot->dq_type, 560 " (type=%d, id=%u)\n", dquot->dq_type,
561 (unsigned)dquot->dq_id); 561 (unsigned)dquot->dq_id);
562 goto out; 562 goto out;
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
index 1e49cc29d06c..42c2b804f3fd 100644
--- a/fs/ocfs2/reservations.h
+++ b/fs/ocfs2/reservations.h
@@ -29,7 +29,7 @@
29struct ocfs2_alloc_reservation { 29struct ocfs2_alloc_reservation {
30 struct rb_node r_node; 30 struct rb_node r_node;
31 31
32 unsigned int r_start; /* Begining of current window */ 32 unsigned int r_start; /* Beginning of current window */
33 unsigned int r_len; /* Length of the window */ 33 unsigned int r_len; /* Length of the window */
34 34
35 unsigned int r_last_len; /* Length of most recent alloc */ 35 unsigned int r_last_len; /* Length of most recent alloc */
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 8ce7398ae1d2..1ec56fdb8d0d 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -126,7 +126,7 @@ struct ocfs2_stack_operations {
126 * 126 *
127 * ->connect() must not return until it is guaranteed that 127 * ->connect() must not return until it is guaranteed that
128 * 128 *
129 * - Node down notifications for the filesystem will be recieved 129 * - Node down notifications for the filesystem will be received
130 * and passed to conn->cc_recovery_handler(). 130 * and passed to conn->cc_recovery_handler().
131 * - Locking requests for the filesystem will be processed. 131 * - Locking requests for the filesystem will be processed.
132 */ 132 */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index ab6e2061074f..ba5d97e4a73e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1511,7 +1511,7 @@ static int ocfs2_cluster_group_search(struct inode *inode,
1511 max_bits = le16_to_cpu(gd->bg_bits); 1511 max_bits = le16_to_cpu(gd->bg_bits);
1512 1512
1513 /* Tail groups in cluster bitmaps which aren't cpg 1513 /* Tail groups in cluster bitmaps which aren't cpg
1514 * aligned are prone to partial extention by a failed 1514 * aligned are prone to partial extension by a failed
1515 * fs resize. If the file system resize never got to 1515 * fs resize. If the file system resize never got to
1516 * update the dinode cluster count, then we don't want 1516 * update the dinode cluster count, then we don't want
1517 * to trust any clusters past it, regardless of what 1517 * to trust any clusters past it, regardless of what
@@ -2459,7 +2459,7 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
2459 /* The alloc_bh comes from ocfs2_free_dinode() or 2459 /* The alloc_bh comes from ocfs2_free_dinode() or
2460 * ocfs2_free_clusters(). The callers have all locked the 2460 * ocfs2_free_clusters(). The callers have all locked the
2461 * allocator and gotten alloc_bh from the lock call. This 2461 * allocator and gotten alloc_bh from the lock call. This
2462 * validates the dinode buffer. Any corruption that has happended 2462 * validates the dinode buffer. Any corruption that has happened
2463 * is a code bug. */ 2463 * is a code bug. */
2464 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 2464 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
2465 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); 2465 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 69fa11b35aa4..5a521c748859 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -78,7 +78,7 @@ static struct kmem_cache *ocfs2_inode_cachep = NULL;
78struct kmem_cache *ocfs2_dquot_cachep; 78struct kmem_cache *ocfs2_dquot_cachep;
79struct kmem_cache *ocfs2_qf_chunk_cachep; 79struct kmem_cache *ocfs2_qf_chunk_cachep;
80 80
81/* OCFS2 needs to schedule several differnt types of work which 81/* OCFS2 needs to schedule several different types of work which
82 * require cluster locking, disk I/O, recovery waits, etc. Since these 82 * require cluster locking, disk I/O, recovery waits, etc. Since these
83 * types of work tend to be heavy we avoid using the kernel events 83 * types of work tend to be heavy we avoid using the kernel events
84 * workqueue and schedule on our own. */ 84 * workqueue and schedule on our own. */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 57a215dc2d9b..81ecf9c0bf0a 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3554,7 +3554,7 @@ int ocfs2_xattr_set(struct inode *inode,
3554 down_write(&OCFS2_I(inode)->ip_xattr_sem); 3554 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3555 /* 3555 /*
3556 * Scan inode and external block to find the same name 3556 * Scan inode and external block to find the same name
3557 * extended attribute and collect search infomation. 3557 * extended attribute and collect search information.
3558 */ 3558 */
3559 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis); 3559 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3560 if (ret) 3560 if (ret)
@@ -3578,7 +3578,7 @@ int ocfs2_xattr_set(struct inode *inode,
3578 goto cleanup; 3578 goto cleanup;
3579 } 3579 }
3580 3580
3581 /* Check whether the value is refcounted and do some prepartion. */ 3581 /* Check whether the value is refcounted and do some preparation. */
3582 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL && 3582 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
3583 (!xis.not_found || !xbs.not_found)) { 3583 (!xis.not_found || !xbs.not_found)) {
3584 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi, 3584 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index ac546975031f..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -500,7 +500,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
500 /* everything is up and running, commence */ 500 /* everything is up and running, commence */
501 rcu_assign_pointer(ptbl->part[partno], p); 501 rcu_assign_pointer(ptbl->part[partno], p);
502 502
503 /* suppress uevent if the disk supresses it */ 503 /* suppress uevent if the disk suppresses it */
504 if (!dev_get_uevent_suppress(ddev)) 504 if (!dev_get_uevent_suppress(ddev))
505 kobject_uevent(&pdev->kobj, KOBJ_ADD); 505 kobject_uevent(&pdev->kobj, KOBJ_ADD);
506 506
@@ -585,7 +585,7 @@ rescan:
585 /* 585 /*
586 * If any partition code tried to read beyond EOD, try 586 * If any partition code tried to read beyond EOD, try
587 * unlocking native capacity even if partition table is 587 * unlocking native capacity even if partition table is
588 * sucessfully read as we could be missing some partitions. 588 * successfully read as we could be missing some partitions.
589 */ 589 */
590 if (state->access_beyond_eod) { 590 if (state->access_beyond_eod) {
591 printk(KERN_WARNING 591 printk(KERN_WARNING
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5a670c11aeac..dd6628d3ba42 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -220,7 +220,7 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
220 } 220 }
221 221
222 /* 222 /*
223 * Noone else is allowed. 223 * No one else is allowed.
224 */ 224 */
225 mmput(mm); 225 mmput(mm);
226 return ERR_PTR(-EPERM); 226 return ERR_PTR(-EPERM);
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 867d0ac026ce..8007ae7c0d8c 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,5 +1,5 @@
1config PSTORE 1config PSTORE
2 bool "Persistant store support" 2 bool "Persistent store support"
3 default n 3 default n
4 help 4 help
5 This option enables generic access to platform level 5 This option enables generic access to platform level
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index fcc8ae75d874..d3c032f5fa0a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire);
442 */ 442 */
443int dquot_commit(struct dquot *dquot) 443int dquot_commit(struct dquot *dquot)
444{ 444{
445 int ret = 0, ret2 = 0; 445 int ret = 0;
446 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 446 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
447 447
448 mutex_lock(&dqopt->dqio_mutex); 448 mutex_lock(&dqopt->dqio_mutex);
@@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot)
454 spin_unlock(&dq_list_lock); 454 spin_unlock(&dq_list_lock);
455 /* Inactive dquot can be only if there was error during read/init 455 /* Inactive dquot can be only if there was error during read/init
456 * => we have better not writing it */ 456 * => we have better not writing it */
457 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 457 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
458 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 458 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
459 if (info_dirty(&dqopt->info[dquot->dq_type])) { 459 else
460 ret2 = dqopt->ops[dquot->dq_type]->write_file_info( 460 ret = -EIO;
461 dquot->dq_sb, dquot->dq_type);
462 }
463 if (ret >= 0)
464 ret = ret2;
465 }
466out_sem: 461out_sem:
467 mutex_unlock(&dqopt->dqio_mutex); 462 mutex_unlock(&dqopt->dqio_mutex);
468 return ret; 463 return ret;
@@ -956,7 +951,7 @@ static inline int dqput_blocks(struct dquot *dquot)
956 951
957/* 952/*
958 * Remove references to dquots from inode and add dquot to list for freeing 953 * Remove references to dquots from inode and add dquot to list for freeing
959 * if we have the last referece to dquot 954 * if we have the last reference to dquot
960 * We can't race with anybody because we hold dqptr_sem for writing... 955 * We can't race with anybody because we hold dqptr_sem for writing...
961 */ 956 */
962static int remove_inode_dquot_ref(struct inode *inode, int type, 957static int remove_inode_dquot_ref(struct inode *inode, int type,
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index c77514bd5776..c5e82ece7c6c 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,7 +1,7 @@
1/* 1/*
2** Write ahead logging implementation copyright Chris Mason 2000 2** Write ahead logging implementation copyright Chris Mason 2000
3** 3**
4** The background commits make this code very interelated, and 4** The background commits make this code very interrelated, and
5** overly complex. I need to rethink things a bit....The major players: 5** overly complex. I need to rethink things a bit....The major players:
6** 6**
7** journal_begin -- call with the number of blocks you expect to log. 7** journal_begin -- call with the number of blocks you expect to log.
@@ -2725,7 +2725,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
2725 REISERFS_DISK_OFFSET_IN_BYTES / 2725 REISERFS_DISK_OFFSET_IN_BYTES /
2726 sb->s_blocksize + 2); 2726 sb->s_blocksize + 2);
2727 2727
2728 /* Sanity check to see is the standard journal fitting withing first bitmap 2728 /* Sanity check to see is the standard journal fitting within first bitmap
2729 (actual for small blocksizes) */ 2729 (actual for small blocksizes) */
2730 if (!SB_ONDISK_JOURNAL_DEVICE(sb) && 2730 if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2731 (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + 2731 (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index b87aa2c1afc1..7df1ce48203a 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -15,7 +15,7 @@
15 * for this mutex, no need for a system wide mutex facility. 15 * for this mutex, no need for a system wide mutex facility.
16 * 16 *
17 * Also this lock is often released before a call that could block because 17 * Also this lock is often released before a call that could block because
18 * reiserfs performances were partialy based on the release while schedule() 18 * reiserfs performances were partially based on the release while schedule()
19 * property of the Bkl. 19 * property of the Bkl.
20 */ 20 */
21void reiserfs_write_lock(struct super_block *s) 21void reiserfs_write_lock(struct super_block *s)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 0aab04f46827..b216ff6be1c9 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -393,7 +393,7 @@ void add_save_link(struct reiserfs_transaction_handle *th,
393 /* body of "save" link */ 393 /* body of "save" link */
394 link = INODE_PKEY(inode)->k_dir_id; 394 link = INODE_PKEY(inode)->k_dir_id;
395 395
396 /* put "save" link inot tree, don't charge quota to anyone */ 396 /* put "save" link into tree, don't charge quota to anyone */
397 retval = 397 retval =
398 reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link); 398 reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
399 if (retval) { 399 if (retval) {
@@ -2104,7 +2104,7 @@ out:
2104 2104
2105/* Read data from quotafile - avoid pagecache and such because we cannot afford 2105/* Read data from quotafile - avoid pagecache and such because we cannot afford
2106 * acquiring the locks... As quota files are never truncated and quota code 2106 * acquiring the locks... As quota files are never truncated and quota code
2107 * itself serializes the operations (and noone else should touch the files) 2107 * itself serializes the operations (and no one else should touch the files)
2108 * we don't have to be afraid of races */ 2108 * we don't have to be afraid of races */
2109static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data, 2109static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
2110 size_t len, loff_t off) 2110 size_t len, loff_t off)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5c11ca82b782..47d2a4498b03 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -396,7 +396,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
396 struct address_space *mapping = dir->i_mapping; 396 struct address_space *mapping = dir->i_mapping;
397 struct page *page; 397 struct page *page;
398 /* We can deadlock if we try to free dentries, 398 /* We can deadlock if we try to free dentries,
399 and an unlink/rmdir has just occured - GFP_NOFS avoids this */ 399 and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
400 mapping_set_gfp_mask(mapping, GFP_NOFS); 400 mapping_set_gfp_mask(mapping, GFP_NOFS);
401 page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); 401 page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
402 if (!IS_ERR(page)) { 402 if (!IS_ERR(page)) {
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 26b15ae34d6f..c37b520132ff 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -104,7 +104,7 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
104 entry = &cache->entry[i]; 104 entry = &cache->entry[i];
105 105
106 /* 106 /*
107 * Initialise choosen cache entry, and fill it in from 107 * Initialise chosen cache entry, and fill it in from
108 * disk. 108 * disk.
109 */ 109 */
110 cache->unused--; 110 cache->unused--;
@@ -286,7 +286,7 @@ cleanup:
286 286
287 287
288/* 288/*
289 * Copy upto length bytes from cache entry to buffer starting at offset bytes 289 * Copy up to length bytes from cache entry to buffer starting at offset bytes
290 * into the cache entry. If there's not length bytes then copy the number of 290 * into the cache entry. If there's not length bytes then copy the number of
291 * bytes available. In all cases return the number of bytes copied. 291 * bytes available. In all cases return the number of bytes copied.
292 */ 292 */
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index d7440904be17..f8b0160da2da 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -47,7 +47,7 @@ config UBIFS_FS_DEBUG
47 bool "Enable debugging support" 47 bool "Enable debugging support"
48 depends on UBIFS_FS 48 depends on UBIFS_FS
49 select DEBUG_FS 49 select DEBUG_FS
50 select KALLSYMS_ALL 50 select KALLSYMS
51 help 51 help
52 This option enables UBIFS debugging support. It makes sure various 52 This option enables UBIFS debugging support. It makes sure various
53 assertions, self-checks, debugging messages and test modes are compiled 53 assertions, self-checks, debugging messages and test modes are compiled
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index c8ff0d1ae5d3..8b3a7da531eb 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -147,7 +147,7 @@ static int make_free_space(struct ubifs_info *c)
147 if (liab2 < liab1) 147 if (liab2 < liab1)
148 return -EAGAIN; 148 return -EAGAIN;
149 149
150 dbg_budg("new liability %lld (not shrinked)", liab2); 150 dbg_budg("new liability %lld (not shrunk)", liab2);
151 151
152 /* Liability did not shrink again, try GC */ 152 /* Liability did not shrink again, try GC */
153 dbg_budg("Run GC"); 153 dbg_budg("Run GC");
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index b148fbc80f8d..1bd01ded7123 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -577,7 +577,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
577 size_t sz; 577 size_t sz;
578 578
579 if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) 579 if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
580 goto out; 580 return 0;
581 581
582 INIT_LIST_HEAD(&list); 582 INIT_LIST_HEAD(&list);
583 583
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index f25a7339f800..004d3745dc45 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -972,11 +972,39 @@ void dbg_dump_index(struct ubifs_info *c)
972void dbg_save_space_info(struct ubifs_info *c) 972void dbg_save_space_info(struct ubifs_info *c)
973{ 973{
974 struct ubifs_debug_info *d = c->dbg; 974 struct ubifs_debug_info *d = c->dbg;
975 975 int freeable_cnt;
976 ubifs_get_lp_stats(c, &d->saved_lst);
977 976
978 spin_lock(&c->space_lock); 977 spin_lock(&c->space_lock);
978 memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
979
980 /*
981 * We use a dirty hack here and zero out @c->freeable_cnt, because it
982 * affects the free space calculations, and UBIFS might not know about
983 * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
984 * only when we read their lprops, and we do this only lazily, upon the
985 * need. So at any given point of time @c->freeable_cnt might be not
986 * exactly accurate.
987 *
988 * Just one example about the issue we hit when we did not zero
989 * @c->freeable_cnt.
990 * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
991 * amount of free space in @d->saved_free
992 * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
993 * information from flash, where we cache LEBs from various
994 * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
995 * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
996 * -> 'ubifs_get_pnode()' -> 'update_cats()'
997 * -> 'ubifs_add_to_cat()').
998 * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
999 * becomes %1.
1000 * 4. We calculate the amount of free space when the re-mount is
1001 * finished in 'dbg_check_space_info()' and it does not match
1002 * @d->saved_free.
1003 */
1004 freeable_cnt = c->freeable_cnt;
1005 c->freeable_cnt = 0;
979 d->saved_free = ubifs_get_free_space_nolock(c); 1006 d->saved_free = ubifs_get_free_space_nolock(c);
1007 c->freeable_cnt = freeable_cnt;
980 spin_unlock(&c->space_lock); 1008 spin_unlock(&c->space_lock);
981} 1009}
982 1010
@@ -993,12 +1021,15 @@ int dbg_check_space_info(struct ubifs_info *c)
993{ 1021{
994 struct ubifs_debug_info *d = c->dbg; 1022 struct ubifs_debug_info *d = c->dbg;
995 struct ubifs_lp_stats lst; 1023 struct ubifs_lp_stats lst;
996 long long avail, free; 1024 long long free;
1025 int freeable_cnt;
997 1026
998 spin_lock(&c->space_lock); 1027 spin_lock(&c->space_lock);
999 avail = ubifs_calc_available(c, c->min_idx_lebs); 1028 freeable_cnt = c->freeable_cnt;
1029 c->freeable_cnt = 0;
1030 free = ubifs_get_free_space_nolock(c);
1031 c->freeable_cnt = freeable_cnt;
1000 spin_unlock(&c->space_lock); 1032 spin_unlock(&c->space_lock);
1001 free = ubifs_get_free_space(c);
1002 1033
1003 if (free != d->saved_free) { 1034 if (free != d->saved_free) {
1004 ubifs_err("free space changed from %lld to %lld", 1035 ubifs_err("free space changed from %lld to %lld",
@@ -2806,40 +2837,38 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
2806 struct ubifs_debug_info *d = c->dbg; 2837 struct ubifs_debug_info *d = c->dbg;
2807 2838
2808 sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); 2839 sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
2809 d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); 2840 fname = d->dfs_dir_name;
2810 if (IS_ERR(d->dfs_dir)) { 2841 dent = debugfs_create_dir(fname, dfs_rootdir);
2811 err = PTR_ERR(d->dfs_dir); 2842 if (IS_ERR_OR_NULL(dent))
2812 ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
2813 d->dfs_dir_name, err);
2814 goto out; 2843 goto out;
2815 } 2844 d->dfs_dir = dent;
2816 2845
2817 fname = "dump_lprops"; 2846 fname = "dump_lprops";
2818 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); 2847 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2819 if (IS_ERR(dent)) 2848 if (IS_ERR_OR_NULL(dent))
2820 goto out_remove; 2849 goto out_remove;
2821 d->dfs_dump_lprops = dent; 2850 d->dfs_dump_lprops = dent;
2822 2851
2823 fname = "dump_budg"; 2852 fname = "dump_budg";
2824 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); 2853 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2825 if (IS_ERR(dent)) 2854 if (IS_ERR_OR_NULL(dent))
2826 goto out_remove; 2855 goto out_remove;
2827 d->dfs_dump_budg = dent; 2856 d->dfs_dump_budg = dent;
2828 2857
2829 fname = "dump_tnc"; 2858 fname = "dump_tnc";
2830 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); 2859 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2831 if (IS_ERR(dent)) 2860 if (IS_ERR_OR_NULL(dent))
2832 goto out_remove; 2861 goto out_remove;
2833 d->dfs_dump_tnc = dent; 2862 d->dfs_dump_tnc = dent;
2834 2863
2835 return 0; 2864 return 0;
2836 2865
2837out_remove: 2866out_remove:
2838 err = PTR_ERR(dent);
2839 ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
2840 fname, err);
2841 debugfs_remove_recursive(d->dfs_dir); 2867 debugfs_remove_recursive(d->dfs_dir);
2842out: 2868out:
2869 err = dent ? PTR_ERR(dent) : -ENODEV;
2870 ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
2871 fname, err);
2843 return err; 2872 return err;
2844} 2873}
2845 2874
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 72775d35b99e..ef5155e109a2 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
1270 lnum = branch->lnum; 1270 lnum = branch->lnum;
1271 offs = branch->offs; 1271 offs = branch->offs;
1272 pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); 1272 pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
1273 if (!pnode) { 1273 if (!pnode)
1274 err = -ENOMEM; 1274 return -ENOMEM;
1275 goto out; 1275
1276 }
1277 if (lnum == 0) { 1276 if (lnum == 0) {
1278 /* 1277 /*
1279 * This pnode was not written which just means that the LEB 1278 * This pnode was not written which just means that the LEB
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6ddd9973e681..c75f6133206c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1568,6 +1568,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1568 mutex_lock(&c->umount_mutex); 1568 mutex_lock(&c->umount_mutex);
1569 dbg_save_space_info(c); 1569 dbg_save_space_info(c);
1570 c->remounting_rw = 1; 1570 c->remounting_rw = 1;
1571 c->ro_mount = 0;
1571 1572
1572 err = check_free_space(c); 1573 err = check_free_space(c);
1573 if (err) 1574 if (err)
@@ -1676,13 +1677,13 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1676 } 1677 }
1677 1678
1678 dbg_gen("re-mounted read-write"); 1679 dbg_gen("re-mounted read-write");
1679 c->ro_mount = 0;
1680 c->remounting_rw = 0; 1680 c->remounting_rw = 0;
1681 err = dbg_check_space_info(c); 1681 err = dbg_check_space_info(c);
1682 mutex_unlock(&c->umount_mutex); 1682 mutex_unlock(&c->umount_mutex);
1683 return err; 1683 return err;
1684 1684
1685out: 1685out:
1686 c->ro_mount = 1;
1686 vfree(c->orph_buf); 1687 vfree(c->orph_buf);
1687 c->orph_buf = NULL; 1688 c->orph_buf = NULL;
1688 if (c->bgt) { 1689 if (c->bgt) {
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index c74400f88fe0..3299f469e712 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -56,6 +56,7 @@
56 */ 56 */
57 57
58#include "ubifs.h" 58#include "ubifs.h"
59#include <linux/fs.h>
59#include <linux/slab.h> 60#include <linux/slab.h>
60#include <linux/xattr.h> 61#include <linux/xattr.h>
61#include <linux/posix_acl_xattr.h> 62#include <linux/posix_acl_xattr.h>
@@ -80,7 +81,6 @@ enum {
80}; 81};
81 82
82static const struct inode_operations none_inode_operations; 83static const struct inode_operations none_inode_operations;
83static const struct address_space_operations none_address_operations;
84static const struct file_operations none_file_operations; 84static const struct file_operations none_file_operations;
85 85
86/** 86/**
@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
130 } 130 }
131 131
132 /* Re-define all operations to be "nothing" */ 132 /* Re-define all operations to be "nothing" */
133 inode->i_mapping->a_ops = &none_address_operations; 133 inode->i_mapping->a_ops = &empty_aops;
134 inode->i_op = &none_inode_operations; 134 inode->i_op = &none_inode_operations;
135 inode->i_fop = &none_file_operations; 135 inode->i_fop = &none_file_operations;
136 136
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 27a4babe7df0..e765743cf9f3 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -78,7 +78,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
78 78
79/* 79/*
80 * Returns the location of the fragment from 80 * Returns the location of the fragment from
81 * the begining of the filesystem. 81 * the beginning of the filesystem.
82 */ 82 */
83 83
84static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock) 84static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 7693d6293404..3915ade6f9a8 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -483,9 +483,9 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
483} 483}
484 484
485/* 485/*
486 * Diffrent types of UFS hold fs_cstotal in different 486 * Different types of UFS hold fs_cstotal in different
487 * places, and use diffrent data structure for it. 487 * places, and use different data structure for it.
488 * To make things simplier we just copy fs_cstotal to ufs_sb_private_info 488 * To make things simpler we just copy fs_cstotal to ufs_sb_private_info
489 */ 489 */
490static void ufs_setup_cstotal(struct super_block *sb) 490static void ufs_setup_cstotal(struct super_block *sb)
491{ 491{
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 11014302c9ca..5f821dbc0579 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
479 break; 479 break;
480 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) 480 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
481 ufs_sync_inode (inode); 481 ufs_sync_inode (inode);
482 blk_flush_plug(current);
483 yield(); 482 yield();
484 } 483 }
485 484
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 52dbd14260ba..79ce38be15a1 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1295,7 +1295,7 @@ xfs_get_blocks_direct(
1295 * If the private argument is non-NULL __xfs_get_blocks signals us that we 1295 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1296 * need to issue a transaction to convert the range from unwritten to written 1296 * need to issue a transaction to convert the range from unwritten to written
1297 * extents. In case this is regular synchronous I/O we just call xfs_end_io 1297 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1298 * to do this and we are done. But in case this was a successfull AIO 1298 * to do this and we are done. But in case this was a successful AIO
1299 * request this handler is called from interrupt context, from which we 1299 * request this handler is called from interrupt context, from which we
1300 * can't start transactions. In that case offload the I/O completion to 1300 * can't start transactions. In that case offload the I/O completion to
1301 * the workqueues we also use for buffered I/O completion. 1301 * the workqueues we also use for buffered I/O completion.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 596bb2c9de42..9ef9ed2cfe2e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -120,7 +120,7 @@ xfs_buf_lru_add(
120 * The unlocked check is safe here because it only occurs when there are not 120 * The unlocked check is safe here because it only occurs when there are not
121 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there 121 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
122 * to optimise the shrinker removing the buffer from the LRU and calling 122 * to optimise the shrinker removing the buffer from the LRU and calling
123 * xfs_buf_free(). i.e. it removes an unneccessary round trip on the 123 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
124 * bt_lru_lock. 124 * bt_lru_lock.
125 */ 125 */
126STATIC void 126STATIC void
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
293 size_t nbytes, offset; 293 size_t nbytes, offset;
294 gfp_t gfp_mask = xb_to_gfp(flags); 294 gfp_t gfp_mask = xb_to_gfp(flags);
295 unsigned short page_count, i; 295 unsigned short page_count, i;
296 pgoff_t first;
297 xfs_off_t end; 296 xfs_off_t end;
298 int error; 297 int error;
299 298
@@ -333,7 +332,6 @@ use_alloc_page:
333 return error; 332 return error;
334 333
335 offset = bp->b_offset; 334 offset = bp->b_offset;
336 first = bp->b_file_offset >> PAGE_SHIFT;
337 bp->b_flags |= _XBF_PAGES; 335 bp->b_flags |= _XBF_PAGES;
338 336
339 for (i = 0; i < bp->b_page_count; i++) { 337 for (i = 0; i < bp->b_page_count; i++) {
@@ -380,7 +378,7 @@ out_free_pages:
380} 378}
381 379
382/* 380/*
383 * Map buffer into kernel address-space if nessecary. 381 * Map buffer into kernel address-space if necessary.
384 */ 382 */
385STATIC int 383STATIC int
386_xfs_buf_map_pages( 384_xfs_buf_map_pages(
@@ -657,8 +655,6 @@ xfs_buf_readahead(
657 xfs_off_t ioff, 655 xfs_off_t ioff,
658 size_t isize) 656 size_t isize)
659{ 657{
660 struct backing_dev_info *bdi;
661
662 if (bdi_read_congested(target->bt_bdi)) 658 if (bdi_read_congested(target->bt_bdi))
663 return; 659 return;
664 660
@@ -919,8 +915,6 @@ xfs_buf_lock(
919 915
920 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 916 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
921 xfs_log_force(bp->b_target->bt_mount, 0); 917 xfs_log_force(bp->b_target->bt_mount, 0);
922 if (atomic_read(&bp->b_io_remaining))
923 blk_flush_plug(current);
924 down(&bp->b_sema); 918 down(&bp->b_sema);
925 XB_SET_OWNER(bp); 919 XB_SET_OWNER(bp);
926 920
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
1309{ 1303{
1310 trace_xfs_buf_iowait(bp, _RET_IP_); 1304 trace_xfs_buf_iowait(bp, _RET_IP_);
1311 1305
1312 if (atomic_read(&bp->b_io_remaining))
1313 blk_flush_plug(current);
1314 wait_for_completion(&bp->b_iowait); 1306 wait_for_completion(&bp->b_iowait);
1315 1307
1316 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1308 trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
1747 do { 1739 do {
1748 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1740 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1749 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); 1741 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1750 int count = 0;
1751 struct list_head tmp; 1742 struct list_head tmp;
1743 struct blk_plug plug;
1752 1744
1753 if (unlikely(freezing(current))) { 1745 if (unlikely(freezing(current))) {
1754 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1746 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
1764 1756
1765 xfs_buf_delwri_split(target, &tmp, age); 1757 xfs_buf_delwri_split(target, &tmp, age);
1766 list_sort(NULL, &tmp, xfs_buf_cmp); 1758 list_sort(NULL, &tmp, xfs_buf_cmp);
1759
1760 blk_start_plug(&plug);
1767 while (!list_empty(&tmp)) { 1761 while (!list_empty(&tmp)) {
1768 struct xfs_buf *bp; 1762 struct xfs_buf *bp;
1769 bp = list_first_entry(&tmp, struct xfs_buf, b_list); 1763 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1770 list_del_init(&bp->b_list); 1764 list_del_init(&bp->b_list);
1771 xfs_bdstrat_cb(bp); 1765 xfs_bdstrat_cb(bp);
1772 count++;
1773 } 1766 }
1774 if (count) 1767 blk_finish_plug(&plug);
1775 blk_flush_plug(current);
1776
1777 } while (!kthread_should_stop()); 1768 } while (!kthread_should_stop());
1778 1769
1779 return 0; 1770 return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
1793 int pincount = 0; 1784 int pincount = 0;
1794 LIST_HEAD(tmp_list); 1785 LIST_HEAD(tmp_list);
1795 LIST_HEAD(wait_list); 1786 LIST_HEAD(wait_list);
1787 struct blk_plug plug;
1796 1788
1797 xfs_buf_runall_queues(xfsconvertd_workqueue); 1789 xfs_buf_runall_queues(xfsconvertd_workqueue);
1798 xfs_buf_runall_queues(xfsdatad_workqueue); 1790 xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
1807 * we do that after issuing all the IO. 1799 * we do that after issuing all the IO.
1808 */ 1800 */
1809 list_sort(NULL, &tmp_list, xfs_buf_cmp); 1801 list_sort(NULL, &tmp_list, xfs_buf_cmp);
1802
1803 blk_start_plug(&plug);
1810 while (!list_empty(&tmp_list)) { 1804 while (!list_empty(&tmp_list)) {
1811 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); 1805 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1812 ASSERT(target == bp->b_target); 1806 ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
1817 } 1811 }
1818 xfs_bdstrat_cb(bp); 1812 xfs_bdstrat_cb(bp);
1819 } 1813 }
1814 blk_finish_plug(&plug);
1820 1815
1821 if (wait) { 1816 if (wait) {
1822 /* Expedite and wait for IO to complete. */ 1817 /* Wait for IO to complete. */
1823 blk_flush_plug(current);
1824 while (!list_empty(&wait_list)) { 1818 while (!list_empty(&wait_list)) {
1825 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1819 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1826 1820
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 52aadfbed132..f4213ba1ff85 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -381,7 +381,7 @@ xfs_aio_write_isize_update(
381 381
382/* 382/*
383 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then 383 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
384 * part of the I/O may have been written to disk before the error occured. In 384 * part of the I/O may have been written to disk before the error occurred. In
385 * this case the on-disk file size may have been adjusted beyond the in-memory 385 * this case the on-disk file size may have been adjusted beyond the in-memory
386 * file size and now needs to be truncated back. 386 * file size and now needs to be truncated back.
387 */ 387 */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 9ff7fc603d2f..dd21784525a8 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -70,7 +70,7 @@ xfs_synchronize_times(
70 70
71/* 71/*
72 * If the linux inode is valid, mark it dirty. 72 * If the linux inode is valid, mark it dirty.
73 * Used when commiting a dirty inode into a transaction so that 73 * Used when committing a dirty inode into a transaction so that
74 * the inode will get written back by the linux code 74 * the inode will get written back by the linux code
75 */ 75 */
76void 76void
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 508e06fd7d1e..3ca795609113 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -28,53 +28,47 @@
28/* 28/*
29 * XFS logging functions 29 * XFS logging functions
30 */ 30 */
31static int 31static void
32__xfs_printk( 32__xfs_printk(
33 const char *level, 33 const char *level,
34 const struct xfs_mount *mp, 34 const struct xfs_mount *mp,
35 struct va_format *vaf) 35 struct va_format *vaf)
36{ 36{
37 if (mp && mp->m_fsname) 37 if (mp && mp->m_fsname)
38 return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); 38 printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
39 return printk("%sXFS: %pV\n", level, vaf); 39 printk("%sXFS: %pV\n", level, vaf);
40} 40}
41 41
42int xfs_printk( 42void xfs_printk(
43 const char *level, 43 const char *level,
44 const struct xfs_mount *mp, 44 const struct xfs_mount *mp,
45 const char *fmt, ...) 45 const char *fmt, ...)
46{ 46{
47 struct va_format vaf; 47 struct va_format vaf;
48 va_list args; 48 va_list args;
49 int r;
50 49
51 va_start(args, fmt); 50 va_start(args, fmt);
52 51
53 vaf.fmt = fmt; 52 vaf.fmt = fmt;
54 vaf.va = &args; 53 vaf.va = &args;
55 54
56 r = __xfs_printk(level, mp, &vaf); 55 __xfs_printk(level, mp, &vaf);
57 va_end(args); 56 va_end(args);
58
59 return r;
60} 57}
61 58
62#define define_xfs_printk_level(func, kern_level) \ 59#define define_xfs_printk_level(func, kern_level) \
63int func(const struct xfs_mount *mp, const char *fmt, ...) \ 60void func(const struct xfs_mount *mp, const char *fmt, ...) \
64{ \ 61{ \
65 struct va_format vaf; \ 62 struct va_format vaf; \
66 va_list args; \ 63 va_list args; \
67 int r; \
68 \ 64 \
69 va_start(args, fmt); \ 65 va_start(args, fmt); \
70 \ 66 \
71 vaf.fmt = fmt; \ 67 vaf.fmt = fmt; \
72 vaf.va = &args; \ 68 vaf.va = &args; \
73 \ 69 \
74 r = __xfs_printk(kern_level, mp, &vaf); \ 70 __xfs_printk(kern_level, mp, &vaf); \
75 va_end(args); \ 71 va_end(args); \
76 \
77 return r; \
78} \ 72} \
79 73
80define_xfs_printk_level(xfs_emerg, KERN_EMERG); 74define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
88define_xfs_printk_level(xfs_debug, KERN_DEBUG); 82define_xfs_printk_level(xfs_debug, KERN_DEBUG);
89#endif 83#endif
90 84
91int 85void
92xfs_alert_tag( 86xfs_alert_tag(
93 const struct xfs_mount *mp, 87 const struct xfs_mount *mp,
94 int panic_tag, 88 int panic_tag,
@@ -97,7 +91,6 @@ xfs_alert_tag(
97 struct va_format vaf; 91 struct va_format vaf;
98 va_list args; 92 va_list args;
99 int do_panic = 0; 93 int do_panic = 0;
100 int r;
101 94
102 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { 95 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
103 xfs_printk(KERN_ALERT, mp, 96 xfs_printk(KERN_ALERT, mp,
@@ -110,12 +103,10 @@ xfs_alert_tag(
110 vaf.fmt = fmt; 103 vaf.fmt = fmt;
111 vaf.va = &args; 104 vaf.va = &args;
112 105
113 r = __xfs_printk(KERN_ALERT, mp, &vaf); 106 __xfs_printk(KERN_ALERT, mp, &vaf);
114 va_end(args); 107 va_end(args);
115 108
116 BUG_ON(do_panic); 109 BUG_ON(do_panic);
117
118 return r;
119} 110}
120 111
121void 112void
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h
index e77ffa16745b..f1b3fc1b6c4e 100644
--- a/fs/xfs/linux-2.6/xfs_message.h
+++ b/fs/xfs/linux-2.6/xfs_message.h
@@ -3,32 +3,34 @@
3 3
4struct xfs_mount; 4struct xfs_mount;
5 5
6extern int xfs_printk(const char *level, const struct xfs_mount *mp, 6extern void xfs_printk(const char *level, const struct xfs_mount *mp,
7 const char *fmt, ...) 7 const char *fmt, ...)
8 __attribute__ ((format (printf, 3, 4))); 8 __attribute__ ((format (printf, 3, 4)));
9extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) 9extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
10 __attribute__ ((format (printf, 2, 3))); 10 __attribute__ ((format (printf, 2, 3)));
11extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) 11extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
12 __attribute__ ((format (printf, 2, 3))); 12 __attribute__ ((format (printf, 2, 3)));
13extern int xfs_alert_tag(const struct xfs_mount *mp, int tag, 13extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
14 const char *fmt, ...) 14 const char *fmt, ...)
15 __attribute__ ((format (printf, 3, 4))); 15 __attribute__ ((format (printf, 3, 4)));
16extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) 16extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
17 __attribute__ ((format (printf, 2, 3))); 17 __attribute__ ((format (printf, 2, 3)));
18extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...) 18extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
19 __attribute__ ((format (printf, 2, 3))); 19 __attribute__ ((format (printf, 2, 3)));
20extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) 20extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
21 __attribute__ ((format (printf, 2, 3))); 21 __attribute__ ((format (printf, 2, 3)));
22extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) 22extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
23 __attribute__ ((format (printf, 2, 3))); 23 __attribute__ ((format (printf, 2, 3)));
24extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...) 24extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
25 __attribute__ ((format (printf, 2, 3))); 25 __attribute__ ((format (printf, 2, 3)));
26 26
27#ifdef DEBUG 27#ifdef DEBUG
28extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) 28extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
29 __attribute__ ((format (printf, 2, 3))); 29 __attribute__ ((format (printf, 2, 3)));
30#else 30#else
31#define xfs_debug(mp, fmt, ...) (0) 31static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
32{
33}
32#endif 34#endif
33 35
34extern void assfail(char *expr, char *f, int l); 36extern void assfail(char *expr, char *f, int l);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1ba5c451da36..b38e58d02299 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -816,75 +816,6 @@ xfs_setup_devices(
816 return 0; 816 return 0;
817} 817}
818 818
819/*
820 * XFS AIL push thread support
821 */
822void
823xfsaild_wakeup(
824 struct xfs_ail *ailp,
825 xfs_lsn_t threshold_lsn)
826{
827 /* only ever move the target forwards */
828 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
829 ailp->xa_target = threshold_lsn;
830 wake_up_process(ailp->xa_task);
831 }
832}
833
834STATIC int
835xfsaild(
836 void *data)
837{
838 struct xfs_ail *ailp = data;
839 xfs_lsn_t last_pushed_lsn = 0;
840 long tout = 0; /* milliseconds */
841
842 while (!kthread_should_stop()) {
843 /*
844 * for short sleeps indicating congestion, don't allow us to
845 * get woken early. Otherwise all we do is bang on the AIL lock
846 * without making progress.
847 */
848 if (tout && tout <= 20)
849 __set_current_state(TASK_KILLABLE);
850 else
851 __set_current_state(TASK_INTERRUPTIBLE);
852 schedule_timeout(tout ?
853 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
854
855 /* swsusp */
856 try_to_freeze();
857
858 ASSERT(ailp->xa_mount->m_log);
859 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
860 continue;
861
862 tout = xfsaild_push(ailp, &last_pushed_lsn);
863 }
864
865 return 0;
866} /* xfsaild */
867
868int
869xfsaild_start(
870 struct xfs_ail *ailp)
871{
872 ailp->xa_target = 0;
873 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
874 ailp->xa_mount->m_fsname);
875 if (IS_ERR(ailp->xa_task))
876 return -PTR_ERR(ailp->xa_task);
877 return 0;
878}
879
880void
881xfsaild_stop(
882 struct xfs_ail *ailp)
883{
884 kthread_stop(ailp->xa_task);
885}
886
887
888/* Catch misguided souls that try to use this interface on XFS */ 819/* Catch misguided souls that try to use this interface on XFS */
889STATIC struct inode * 820STATIC struct inode *
890xfs_fs_alloc_inode( 821xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
1191 return -error; 1122 return -error;
1192 1123
1193 if (laptop_mode) { 1124 if (laptop_mode) {
1194 int prev_sync_seq = mp->m_sync_seq;
1195
1196 /* 1125 /*
1197 * The disk must be active because we're syncing. 1126 * The disk must be active because we're syncing.
1198 * We schedule xfssyncd now (now that the disk is 1127 * We schedule xfssyncd now (now that the disk is
1199 * active) instead of later (when it might not be). 1128 * active) instead of later (when it might not be).
1200 */ 1129 */
1201 wake_up_process(mp->m_sync_task); 1130 flush_delayed_work_sync(&mp->m_sync_work);
1202 /*
1203 * We have to wait for the sync iteration to complete.
1204 * If we don't, the disk activity caused by the sync
1205 * will come after the sync is completed, and that
1206 * triggers another sync from laptop mode.
1207 */
1208 wait_event(mp->m_wait_single_sync_task,
1209 mp->m_sync_seq != prev_sync_seq);
1210 } 1131 }
1211 1132
1212 return 0; 1133 return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
1490 spin_lock_init(&mp->m_sb_lock); 1411 spin_lock_init(&mp->m_sb_lock);
1491 mutex_init(&mp->m_growlock); 1412 mutex_init(&mp->m_growlock);
1492 atomic_set(&mp->m_active_trans, 0); 1413 atomic_set(&mp->m_active_trans, 0);
1493 INIT_LIST_HEAD(&mp->m_sync_list);
1494 spin_lock_init(&mp->m_sync_lock);
1495 init_waitqueue_head(&mp->m_wait_single_sync_task);
1496 1414
1497 mp->m_super = sb; 1415 mp->m_super = sb;
1498 sb->s_fs_info = mp; 1416 sb->s_fs_info = mp;
@@ -1799,6 +1717,38 @@ xfs_destroy_zones(void)
1799} 1717}
1800 1718
1801STATIC int __init 1719STATIC int __init
1720xfs_init_workqueues(void)
1721{
1722 /*
1723 * max_active is set to 8 to give enough concurency to allow
1724 * multiple work operations on each CPU to run. This allows multiple
1725 * filesystems to be running sync work concurrently, and scales with
1726 * the number of CPUs in the system.
1727 */
1728 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1729 if (!xfs_syncd_wq)
1730 goto out;
1731
1732 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1733 if (!xfs_ail_wq)
1734 goto out_destroy_syncd;
1735
1736 return 0;
1737
1738out_destroy_syncd:
1739 destroy_workqueue(xfs_syncd_wq);
1740out:
1741 return -ENOMEM;
1742}
1743
1744STATIC void
1745xfs_destroy_workqueues(void)
1746{
1747 destroy_workqueue(xfs_ail_wq);
1748 destroy_workqueue(xfs_syncd_wq);
1749}
1750
1751STATIC int __init
1802init_xfs_fs(void) 1752init_xfs_fs(void)
1803{ 1753{
1804 int error; 1754 int error;
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
1813 if (error) 1763 if (error)
1814 goto out; 1764 goto out;
1815 1765
1816 error = xfs_mru_cache_init(); 1766 error = xfs_init_workqueues();
1817 if (error) 1767 if (error)
1818 goto out_destroy_zones; 1768 goto out_destroy_zones;
1819 1769
1770 error = xfs_mru_cache_init();
1771 if (error)
1772 goto out_destroy_wq;
1773
1820 error = xfs_filestream_init(); 1774 error = xfs_filestream_init();
1821 if (error) 1775 if (error)
1822 goto out_mru_cache_uninit; 1776 goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
1833 if (error) 1787 if (error)
1834 goto out_cleanup_procfs; 1788 goto out_cleanup_procfs;
1835 1789
1790 error = xfs_init_workqueues();
1791 if (error)
1792 goto out_sysctl_unregister;
1793
1836 vfs_initquota(); 1794 vfs_initquota();
1837 1795
1838 error = register_filesystem(&xfs_fs_type); 1796 error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
1850 xfs_filestream_uninit(); 1808 xfs_filestream_uninit();
1851 out_mru_cache_uninit: 1809 out_mru_cache_uninit:
1852 xfs_mru_cache_uninit(); 1810 xfs_mru_cache_uninit();
1811 out_destroy_wq:
1812 xfs_destroy_workqueues();
1853 out_destroy_zones: 1813 out_destroy_zones:
1854 xfs_destroy_zones(); 1814 xfs_destroy_zones();
1855 out: 1815 out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
1866 xfs_buf_terminate(); 1826 xfs_buf_terminate();
1867 xfs_filestream_uninit(); 1827 xfs_filestream_uninit();
1868 xfs_mru_cache_uninit(); 1828 xfs_mru_cache_uninit();
1829 xfs_destroy_workqueues();
1869 xfs_destroy_zones(); 1830 xfs_destroy_zones();
1870} 1831}
1871 1832
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 594cd822d84d..e4f9c1b0836c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -22,6 +22,7 @@
22#include "xfs_log.h" 22#include "xfs_log.h"
23#include "xfs_inum.h" 23#include "xfs_inum.h"
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
25#include "xfs_sb.h" 26#include "xfs_sb.h"
26#include "xfs_ag.h" 27#include "xfs_ag.h"
27#include "xfs_mount.h" 28#include "xfs_mount.h"
@@ -39,6 +40,8 @@
39#include <linux/kthread.h> 40#include <linux/kthread.h>
40#include <linux/freezer.h> 41#include <linux/freezer.h>
41 42
43struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
44
42/* 45/*
43 * The inode lookup is done in batches to keep the amount of lock traffic and 46 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between 47 * radix tree lookups to a minimum. The batch size is a trade off between
@@ -401,7 +404,7 @@ xfs_quiesce_fs(
401/* 404/*
402 * Second stage of a quiesce. The data is already synced, now we have to take 405 * Second stage of a quiesce. The data is already synced, now we have to take
403 * care of the metadata. New transactions are already blocked, so we need to 406 * care of the metadata. New transactions are already blocked, so we need to
404 * wait for any remaining transactions to drain out before proceding. 407 * wait for any remaining transactions to drain out before proceeding.
405 */ 408 */
406void 409void
407xfs_quiesce_attr( 410xfs_quiesce_attr(
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
431 xfs_unmountfs_writesb(mp); 434 xfs_unmountfs_writesb(mp);
432} 435}
433 436
434/* 437static void
435 * Enqueue a work item to be picked up by the vfs xfssyncd thread. 438xfs_syncd_queue_sync(
436 * Doing this has two advantages: 439 struct xfs_mount *mp)
437 * - It saves on stack space, which is tight in certain situations
438 * - It can be used (with care) as a mechanism to avoid deadlocks.
439 * Flushing while allocating in a full filesystem requires both.
440 */
441STATIC void
442xfs_syncd_queue_work(
443 struct xfs_mount *mp,
444 void *data,
445 void (*syncer)(struct xfs_mount *, void *),
446 struct completion *completion)
447{
448 struct xfs_sync_work *work;
449
450 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
451 INIT_LIST_HEAD(&work->w_list);
452 work->w_syncer = syncer;
453 work->w_data = data;
454 work->w_mount = mp;
455 work->w_completion = completion;
456 spin_lock(&mp->m_sync_lock);
457 list_add_tail(&work->w_list, &mp->m_sync_list);
458 spin_unlock(&mp->m_sync_lock);
459 wake_up_process(mp->m_sync_task);
460}
461
462/*
463 * Flush delayed allocate data, attempting to free up reserved space
464 * from existing allocations. At this point a new allocation attempt
465 * has failed with ENOSPC and we are in the process of scratching our
466 * heads, looking about for more room...
467 */
468STATIC void
469xfs_flush_inodes_work(
470 struct xfs_mount *mp,
471 void *arg)
472{
473 struct inode *inode = arg;
474 xfs_sync_data(mp, SYNC_TRYLOCK);
475 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
476 iput(inode);
477}
478
479void
480xfs_flush_inodes(
481 xfs_inode_t *ip)
482{ 440{
483 struct inode *inode = VFS_I(ip); 441 queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
484 DECLARE_COMPLETION_ONSTACK(completion); 442 msecs_to_jiffies(xfs_syncd_centisecs * 10));
485
486 igrab(inode);
487 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
488 wait_for_completion(&completion);
489 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
490} 443}
491 444
492/* 445/*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
496 */ 449 */
497STATIC void 450STATIC void
498xfs_sync_worker( 451xfs_sync_worker(
499 struct xfs_mount *mp, 452 struct work_struct *work)
500 void *unused)
501{ 453{
454 struct xfs_mount *mp = container_of(to_delayed_work(work),
455 struct xfs_mount, m_sync_work);
502 int error; 456 int error;
503 457
504 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 458 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
508 error = xfs_fs_log_dummy(mp); 462 error = xfs_fs_log_dummy(mp);
509 else 463 else
510 xfs_log_force(mp, 0); 464 xfs_log_force(mp, 0);
511 xfs_reclaim_inodes(mp, 0);
512 error = xfs_qm_sync(mp, SYNC_TRYLOCK); 465 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
466
467 /* start pushing all the metadata that is currently dirty */
468 xfs_ail_push_all(mp->m_ail);
513 } 469 }
514 mp->m_sync_seq++; 470
515 wake_up(&mp->m_wait_single_sync_task); 471 /* queue us up again */
472 xfs_syncd_queue_sync(mp);
516} 473}
517 474
518STATIC int 475/*
519xfssyncd( 476 * Queue a new inode reclaim pass if there are reclaimable inodes and there
520 void *arg) 477 * isn't a reclaim pass already in progress. By default it runs every 5s based
478 * on the xfs syncd work default of 30s. Perhaps this should have it's own
479 * tunable, but that can be done if this method proves to be ineffective or too
480 * aggressive.
481 */
482static void
483xfs_syncd_queue_reclaim(
484 struct xfs_mount *mp)
521{ 485{
522 struct xfs_mount *mp = arg;
523 long timeleft;
524 xfs_sync_work_t *work, *n;
525 LIST_HEAD (tmp);
526
527 set_freezable();
528 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
529 for (;;) {
530 if (list_empty(&mp->m_sync_list))
531 timeleft = schedule_timeout_interruptible(timeleft);
532 /* swsusp */
533 try_to_freeze();
534 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
535 break;
536 486
537 spin_lock(&mp->m_sync_lock); 487 /*
538 /* 488 * We can have inodes enter reclaim after we've shut down the syncd
539 * We can get woken by laptop mode, to do a sync - 489 * workqueue during unmount, so don't allow reclaim work to be queued
540 * that's the (only!) case where the list would be 490 * during unmount.
541 * empty with time remaining. 491 */
542 */ 492 if (!(mp->m_super->s_flags & MS_ACTIVE))
543 if (!timeleft || list_empty(&mp->m_sync_list)) { 493 return;
544 if (!timeleft)
545 timeleft = xfs_syncd_centisecs *
546 msecs_to_jiffies(10);
547 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
548 list_add_tail(&mp->m_sync_work.w_list,
549 &mp->m_sync_list);
550 }
551 list_splice_init(&mp->m_sync_list, &tmp);
552 spin_unlock(&mp->m_sync_lock);
553 494
554 list_for_each_entry_safe(work, n, &tmp, w_list) { 495 rcu_read_lock();
555 (*work->w_syncer)(mp, work->w_data); 496 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
556 list_del(&work->w_list); 497 queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
557 if (work == &mp->m_sync_work) 498 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
558 continue;
559 if (work->w_completion)
560 complete(work->w_completion);
561 kmem_free(work);
562 }
563 } 499 }
500 rcu_read_unlock();
501}
564 502
565 return 0; 503/*
504 * This is a fast pass over the inode cache to try to get reclaim moving on as
505 * many inodes as possible in a short period of time. It kicks itself every few
506 * seconds, as well as being kicked by the inode cache shrinker when memory
507 * goes low. It scans as quickly as possible avoiding locked inodes or those
508 * already being flushed, and once done schedules a future pass.
509 */
510STATIC void
511xfs_reclaim_worker(
512 struct work_struct *work)
513{
514 struct xfs_mount *mp = container_of(to_delayed_work(work),
515 struct xfs_mount, m_reclaim_work);
516
517 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
518 xfs_syncd_queue_reclaim(mp);
519}
520
521/*
522 * Flush delayed allocate data, attempting to free up reserved space
523 * from existing allocations. At this point a new allocation attempt
524 * has failed with ENOSPC and we are in the process of scratching our
525 * heads, looking about for more room.
526 *
527 * Queue a new data flush if there isn't one already in progress and
528 * wait for completion of the flush. This means that we only ever have one
529 * inode flush in progress no matter how many ENOSPC events are occurring and
530 * so will prevent the system from bogging down due to every concurrent
531 * ENOSPC event scanning all the active inodes in the system for writeback.
532 */
533void
534xfs_flush_inodes(
535 struct xfs_inode *ip)
536{
537 struct xfs_mount *mp = ip->i_mount;
538
539 queue_work(xfs_syncd_wq, &mp->m_flush_work);
540 flush_work_sync(&mp->m_flush_work);
541}
542
543STATIC void
544xfs_flush_worker(
545 struct work_struct *work)
546{
547 struct xfs_mount *mp = container_of(work,
548 struct xfs_mount, m_flush_work);
549
550 xfs_sync_data(mp, SYNC_TRYLOCK);
551 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
566} 552}
567 553
568int 554int
569xfs_syncd_init( 555xfs_syncd_init(
570 struct xfs_mount *mp) 556 struct xfs_mount *mp)
571{ 557{
572 mp->m_sync_work.w_syncer = xfs_sync_worker; 558 INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
573 mp->m_sync_work.w_mount = mp; 559 INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
574 mp->m_sync_work.w_completion = NULL; 560 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
575 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname); 561
576 if (IS_ERR(mp->m_sync_task)) 562 xfs_syncd_queue_sync(mp);
577 return -PTR_ERR(mp->m_sync_task); 563 xfs_syncd_queue_reclaim(mp);
564
578 return 0; 565 return 0;
579} 566}
580 567
@@ -582,7 +569,9 @@ void
582xfs_syncd_stop( 569xfs_syncd_stop(
583 struct xfs_mount *mp) 570 struct xfs_mount *mp)
584{ 571{
585 kthread_stop(mp->m_sync_task); 572 cancel_delayed_work_sync(&mp->m_sync_work);
573 cancel_delayed_work_sync(&mp->m_reclaim_work);
574 cancel_work_sync(&mp->m_flush_work);
586} 575}
587 576
588void 577void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
601 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 590 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
602 XFS_ICI_RECLAIM_TAG); 591 XFS_ICI_RECLAIM_TAG);
603 spin_unlock(&ip->i_mount->m_perag_lock); 592 spin_unlock(&ip->i_mount->m_perag_lock);
593
594 /* schedule periodic background inode reclaim */
595 xfs_syncd_queue_reclaim(ip->i_mount);
596
604 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, 597 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
605 -1, _RET_IP_); 598 -1, _RET_IP_);
606 } 599 }
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
1017} 1010}
1018 1011
1019/* 1012/*
1020 * Shrinker infrastructure. 1013 * Inode cache shrinker.
1014 *
1015 * When called we make sure that there is a background (fast) inode reclaim in
1016 * progress, while we will throttle the speed of reclaim via doiing synchronous
1017 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1018 * them to be cleaned, which we hope will not be very long due to the
1019 * background walker having already kicked the IO off on those dirty inodes.
1021 */ 1020 */
1022static int 1021static int
1023xfs_reclaim_inode_shrink( 1022xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
1032 1031
1033 mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1032 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1034 if (nr_to_scan) { 1033 if (nr_to_scan) {
1034 /* kick background reclaimer and push the AIL */
1035 xfs_syncd_queue_reclaim(mp);
1036 xfs_ail_push_all(mp->m_ail);
1037
1035 if (!(gfp_mask & __GFP_FS)) 1038 if (!(gfp_mask & __GFP_FS))
1036 return -1; 1039 return -1;
1037 1040
1038 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); 1041 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
1042 &nr_to_scan);
1039 /* terminate if we don't exhaust the scan */ 1043 /* terminate if we don't exhaust the scan */
1040 if (nr_to_scan > 0) 1044 if (nr_to_scan > 0)
1041 return -1; 1045 return -1;
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 32ba6628290c..e3a6ad27415f 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
32#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ 32#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
33#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ 33#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
34 34
35extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
36
35int xfs_syncd_init(struct xfs_mount *mp); 37int xfs_syncd_init(struct xfs_mount *mp);
36void xfs_syncd_stop(struct xfs_mount *mp); 38void xfs_syncd_stop(struct xfs_mount *mp);
37 39
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 7e2416478503..6fa214603819 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -600,7 +600,7 @@ xfs_qm_dqread(
600 600
601 /* 601 /*
602 * Reservation counters are defined as reservation plus current usage 602 * Reservation counters are defined as reservation plus current usage
603 * to avoid having to add everytime. 603 * to avoid having to add every time.
604 */ 604 */
605 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); 605 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
606 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); 606 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 254ee062bd7d..69228aa8605a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
461 struct xfs_quotainfo *q = mp->m_quotainfo; 461 struct xfs_quotainfo *q = mp->m_quotainfo;
462 int recl; 462 int recl;
463 struct xfs_dquot *dqp; 463 struct xfs_dquot *dqp;
464 int niters;
465 int error; 464 int error;
466 465
467 if (!q) 466 if (!q)
468 return 0; 467 return 0;
469 niters = 0;
470again: 468again:
471 mutex_lock(&q->qi_dqlist_lock); 469 mutex_lock(&q->qi_dqlist_lock);
472 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { 470 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
1314{ 1312{
1315 xfs_buf_t *bp; 1313 xfs_buf_t *bp;
1316 int error; 1314 int error;
1317 int notcommitted;
1318 int incr;
1319 int type; 1315 int type;
1320 1316
1321 ASSERT(blkcnt > 0); 1317 ASSERT(blkcnt > 0);
1322 notcommitted = 0;
1323 incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
1324 XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
1325 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 1318 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1326 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 1319 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1327 error = 0; 1320 error = 0;
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index c9446f1c726d..567b29b9f1b3 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone;
65 * block in the dquot/xqm code. 65 * block in the dquot/xqm code.
66 */ 66 */
67#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 67#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
68/*
69 * When doing a quotacheck, we log dquot clusters of this many FSBs at most
70 * in a single transaction. We don't want to ask for too huge a log reservation.
71 */
72#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
73 68
74typedef xfs_dqhash_t xfs_dqlist_t; 69typedef xfs_dqhash_t xfs_dqlist_t;
75 70
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 774d7ec6df8e..a0a829addca9 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -134,7 +134,7 @@ xfs_qm_newmount(
134 */ 134 */
135 if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { 135 if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
136 /* 136 /*
137 * If an error occured, qm_mount_quotas code 137 * If an error occurred, qm_mount_quotas code
138 * has already disabled quotas. So, just finish 138 * has already disabled quotas. So, just finish
139 * mounting, and get on with the boring life 139 * mounting, and get on with the boring life
140 * without disk quotas. 140 * without disk quotas.
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index c82f06778a27..2dadb15d5ca9 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -172,7 +172,7 @@ xfs_qm_scall_quotaoff(
172 /* 172 /*
173 * Next we make the changes in the quota flag in the mount struct. 173 * Next we make the changes in the quota flag in the mount struct.
174 * This isn't protected by a particular lock directly, because we 174 * This isn't protected by a particular lock directly, because we
175 * don't want to take a mrlock everytime we depend on quotas being on. 175 * don't want to take a mrlock every time we depend on quotas being on.
176 */ 176 */
177 mp->m_qflags &= ~(flags); 177 mp->m_qflags &= ~(flags);
178 178
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
313{ 313{
314 int error; 314 int error;
315 uint qf; 315 uint qf;
316 uint accflags;
317 __int64_t sbflags; 316 __int64_t sbflags;
318 317
319 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 318 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
320 /* 319 /*
321 * Switching on quota accounting must be done at mount time. 320 * Switching on quota accounting must be done at mount time.
322 */ 321 */
323 accflags = flags & XFS_ALL_QUOTA_ACCT;
324 flags &= ~(XFS_ALL_QUOTA_ACCT); 322 flags &= ~(XFS_ALL_QUOTA_ACCT);
325 323
326 sbflags = 0; 324 sbflags = 0;
@@ -354,7 +352,7 @@ xfs_qm_scall_quotaon(
354 return XFS_ERROR(EINVAL); 352 return XFS_ERROR(EINVAL);
355 } 353 }
356 /* 354 /*
357 * If everything's upto-date incore, then don't waste time. 355 * If everything's up to-date incore, then don't waste time.
358 */ 356 */
359 if ((mp->m_qflags & flags) == flags) 357 if ((mp->m_qflags & flags) == flags)
360 return XFS_ERROR(EEXIST); 358 return XFS_ERROR(EEXIST);
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4bc3c649aee4..27d64d752eab 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2395,17 +2395,33 @@ xfs_free_extent(
2395 memset(&args, 0, sizeof(xfs_alloc_arg_t)); 2395 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2396 args.tp = tp; 2396 args.tp = tp;
2397 args.mp = tp->t_mountp; 2397 args.mp = tp->t_mountp;
2398
2399 /*
2400 * validate that the block number is legal - the enables us to detect
2401 * and handle a silent filesystem corruption rather than crashing.
2402 */
2398 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2403 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2399 ASSERT(args.agno < args.mp->m_sb.sb_agcount); 2404 if (args.agno >= args.mp->m_sb.sb_agcount)
2405 return EFSCORRUPTED;
2406
2400 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2407 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2408 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2409 return EFSCORRUPTED;
2410
2401 args.pag = xfs_perag_get(args.mp, args.agno); 2411 args.pag = xfs_perag_get(args.mp, args.agno);
2402 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) 2412 ASSERT(args.pag);
2413
2414 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2415 if (error)
2403 goto error0; 2416 goto error0;
2404#ifdef DEBUG 2417
2405 ASSERT(args.agbp != NULL); 2418 /* validate the extent size is legal now we have the agf locked */
2406 ASSERT((args.agbno + len) <= 2419 if (args.agbno + len >
2407 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); 2420 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2408#endif 2421 error = EFSCORRUPTED;
2422 goto error0;
2423 }
2424
2409 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); 2425 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2410error0: 2426error0:
2411 xfs_perag_put(args.pag); 2427 xfs_perag_put(args.pag);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index e5413d96f1af..7b7e005e3dcc 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -992,7 +992,7 @@ xfs_buf_iodone_callbacks(
992 lasttarg = XFS_BUF_TARGET(bp); 992 lasttarg = XFS_BUF_TARGET(bp);
993 993
994 /* 994 /*
995 * If the write was asynchronous then noone will be looking for the 995 * If the write was asynchronous then no one will be looking for the
996 * error. Clear the error state and write the buffer out again. 996 * error. Clear the error state and write the buffer out again.
997 * 997 *
998 * During sync or umount we'll write all pending buffers again 998 * During sync or umount we'll write all pending buffers again
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 742c8330994a..a37480a6e023 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2789,7 +2789,7 @@ xfs_iflush(
2789 2789
2790 /* 2790 /*
2791 * We can't flush the inode until it is unpinned, so wait for it if we 2791 * We can't flush the inode until it is unpinned, so wait for it if we
2792 * are allowed to block. We know noone new can pin it, because we are 2792 * are allowed to block. We know no one new can pin it, because we are
2793 * holding the inode lock shared and you need to hold it exclusively to 2793 * holding the inode lock shared and you need to hold it exclusively to
2794 * pin the inode. 2794 * pin the inode.
2795 * 2795 *
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f753200cef8d..ff4e2a30227d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -111,7 +111,7 @@ struct xfs_imap {
111 * Generally, we do not want to hold the i_rlock while holding the 111 * Generally, we do not want to hold the i_rlock while holding the
112 * i_ilock. Hierarchy is i_iolock followed by i_rlock. 112 * i_ilock. Hierarchy is i_iolock followed by i_rlock.
113 * 113 *
114 * xfs_iptr_t contains all the inode fields upto and including the 114 * xfs_iptr_t contains all the inode fields up to and including the
115 * i_mnext and i_mprev fields, it is used as a marker in the inode 115 * i_mnext and i_mprev fields, it is used as a marker in the inode
116 * chain off the mount structure by xfs_sync calls. 116 * chain off the mount structure by xfs_sync calls.
117 */ 117 */
@@ -336,7 +336,7 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
336 336
337/* 337/*
338 * Project quota id helpers (previously projid was 16bit only 338 * Project quota id helpers (previously projid was 16bit only
339 * and using two 16bit values to hold new 32bit projid was choosen 339 * and using two 16bit values to hold new 32bit projid was chosen
340 * to retain compatibility with "old" filesystems). 340 * to retain compatibility with "old" filesystems).
341 */ 341 */
342static inline prid_t 342static inline prid_t
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 46cc40131d4a..576fdfe81d60 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -198,6 +198,41 @@ xfs_inode_item_size(
198} 198}
199 199
200/* 200/*
201 * xfs_inode_item_format_extents - convert in-core extents to on-disk form
202 *
203 * For either the data or attr fork in extent format, we need to endian convert
204 * the in-core extent as we place them into the on-disk inode. In this case, we
205 * need to do this conversion before we write the extents into the log. Because
206 * we don't have the disk inode to write into here, we allocate a buffer and
207 * format the extents into it via xfs_iextents_copy(). We free the buffer in
208 * the unlock routine after the copy for the log has been made.
209 *
210 * In the case of the data fork, the in-core and on-disk fork sizes can be
211 * different due to delayed allocation extents. We only log on-disk extents
212 * here, so always use the physical fork size to determine the size of the
213 * buffer we need to allocate.
214 */
215STATIC void
216xfs_inode_item_format_extents(
217 struct xfs_inode *ip,
218 struct xfs_log_iovec *vecp,
219 int whichfork,
220 int type)
221{
222 xfs_bmbt_rec_t *ext_buffer;
223
224 ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
225 if (whichfork == XFS_DATA_FORK)
226 ip->i_itemp->ili_extents_buf = ext_buffer;
227 else
228 ip->i_itemp->ili_aextents_buf = ext_buffer;
229
230 vecp->i_addr = ext_buffer;
231 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
232 vecp->i_type = type;
233}
234
235/*
201 * This is called to fill in the vector of log iovecs for the 236 * This is called to fill in the vector of log iovecs for the
202 * given inode log item. It fills the first item with an inode 237 * given inode log item. It fills the first item with an inode
203 * log format structure, the second with the on-disk inode structure, 238 * log format structure, the second with the on-disk inode structure,
@@ -213,7 +248,6 @@ xfs_inode_item_format(
213 struct xfs_inode *ip = iip->ili_inode; 248 struct xfs_inode *ip = iip->ili_inode;
214 uint nvecs; 249 uint nvecs;
215 size_t data_bytes; 250 size_t data_bytes;
216 xfs_bmbt_rec_t *ext_buffer;
217 xfs_mount_t *mp; 251 xfs_mount_t *mp;
218 252
219 vecp->i_addr = &iip->ili_format; 253 vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
320 } else 354 } else
321#endif 355#endif
322 { 356 {
323 /* 357 xfs_inode_item_format_extents(ip, vecp,
324 * There are delayed allocation extents 358 XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
325 * in the inode, or we need to convert
326 * the extents to on disk format.
327 * Use xfs_iextents_copy()
328 * to copy only the real extents into
329 * a separate buffer. We'll free the
330 * buffer in the unlock routine.
331 */
332 ext_buffer = kmem_alloc(ip->i_df.if_bytes,
333 KM_SLEEP);
334 iip->ili_extents_buf = ext_buffer;
335 vecp->i_addr = ext_buffer;
336 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
337 XFS_DATA_FORK);
338 vecp->i_type = XLOG_REG_TYPE_IEXT;
339 } 359 }
340 ASSERT(vecp->i_len <= ip->i_df.if_bytes); 360 ASSERT(vecp->i_len <= ip->i_df.if_bytes);
341 iip->ili_format.ilf_dsize = vecp->i_len; 361 iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
445 */ 465 */
446 vecp->i_addr = ip->i_afp->if_u1.if_extents; 466 vecp->i_addr = ip->i_afp->if_u1.if_extents;
447 vecp->i_len = ip->i_afp->if_bytes; 467 vecp->i_len = ip->i_afp->if_bytes;
468 vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
448#else 469#else
449 ASSERT(iip->ili_aextents_buf == NULL); 470 ASSERT(iip->ili_aextents_buf == NULL);
450 /* 471 xfs_inode_item_format_extents(ip, vecp,
451 * Need to endian flip before logging 472 XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
452 */
453 ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
454 KM_SLEEP);
455 iip->ili_aextents_buf = ext_buffer;
456 vecp->i_addr = ext_buffer;
457 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
458 XFS_ATTR_FORK);
459#endif 473#endif
460 vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
461 iip->ili_format.ilf_asize = vecp->i_len; 474 iip->ili_format.ilf_asize = vecp->i_len;
462 vecp++; 475 vecp++;
463 nvecs++; 476 nvecs++;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dc1882adaf54..751e94fe1f77 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -204,7 +204,6 @@ xfs_bulkstat(
204 xfs_agi_t *agi; /* agi header data */ 204 xfs_agi_t *agi; /* agi header data */
205 xfs_agino_t agino; /* inode # in allocation group */ 205 xfs_agino_t agino; /* inode # in allocation group */
206 xfs_agnumber_t agno; /* allocation group number */ 206 xfs_agnumber_t agno; /* allocation group number */
207 xfs_daddr_t bno; /* inode cluster start daddr */
208 int chunkidx; /* current index into inode chunk */ 207 int chunkidx; /* current index into inode chunk */
209 int clustidx; /* current index into inode cluster */ 208 int clustidx; /* current index into inode cluster */
210 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 209 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
463 mp->m_sb.sb_inopblog); 462 mp->m_sb.sb_inopblog);
464 } 463 }
465 ino = XFS_AGINO_TO_INO(mp, agno, agino); 464 ino = XFS_AGINO_TO_INO(mp, agno, agino);
466 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
467 /* 465 /*
468 * Skip if this inode is free. 466 * Skip if this inode is free.
469 */ 467 */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 25efa9b8a602..b612ce4520ae 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
761 break; 761 break;
762 case XLOG_STATE_COVER_NEED: 762 case XLOG_STATE_COVER_NEED:
763 case XLOG_STATE_COVER_NEED2: 763 case XLOG_STATE_COVER_NEED2:
764 if (!xfs_trans_ail_tail(log->l_ailp) && 764 if (!xfs_ail_min_lsn(log->l_ailp) &&
765 xlog_iclogs_empty(log)) { 765 xlog_iclogs_empty(log)) {
766 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 766 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
767 log->l_covered_state = XLOG_STATE_COVER_DONE; 767 log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
801 xfs_lsn_t tail_lsn; 801 xfs_lsn_t tail_lsn;
802 struct log *log = mp->m_log; 802 struct log *log = mp->m_log;
803 803
804 tail_lsn = xfs_trans_ail_tail(mp->m_ail); 804 tail_lsn = xfs_ail_min_lsn(mp->m_ail);
805 if (!tail_lsn) 805 if (!tail_lsn)
806 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 806 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
807 807
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
1239 * the filesystem is shutting down. 1239 * the filesystem is shutting down.
1240 */ 1240 */
1241 if (!XLOG_FORCED_SHUTDOWN(log)) 1241 if (!XLOG_FORCED_SHUTDOWN(log))
1242 xfs_trans_ail_push(log->l_ailp, threshold_lsn); 1242 xfs_ail_push(log->l_ailp, threshold_lsn);
1243} 1243}
1244 1244
1245/* 1245/*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
3407 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3407 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3408} 3408}
3409 3409
3410/*
3411 * Check to make sure the grant write head didn't just over lap the tail. If
3412 * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3413 * the cycles differ by exactly one and check the byte count.
3414 *
3415 * This check is run unlocked, so can give false positives. Rather than assert
3416 * on failures, use a warn-once flag and a panic tag to allow the admin to
3417 * determine if they want to panic the machine when such an error occurs. For
3418 * debug kernels this will have the same effect as using an assert but, unlinke
3419 * an assert, it can be turned off at runtime.
3420 */
3410STATIC void 3421STATIC void
3411xlog_verify_grant_tail( 3422xlog_verify_grant_tail(
3412 struct log *log) 3423 struct log *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
3414 int tail_cycle, tail_blocks; 3425 int tail_cycle, tail_blocks;
3415 int cycle, space; 3426 int cycle, space;
3416 3427
3417 /*
3418 * Check to make sure the grant write head didn't just over lap the
3419 * tail. If the cycles are the same, we can't be overlapping.
3420 * Otherwise, make sure that the cycles differ by exactly one and
3421 * check the byte count.
3422 */
3423 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); 3428 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3424 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3429 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3425 if (tail_cycle != cycle) { 3430 if (tail_cycle != cycle) {
3426 ASSERT(cycle - 1 == tail_cycle); 3431 if (cycle - 1 != tail_cycle &&
3427 ASSERT(space <= BBTOB(tail_blocks)); 3432 !(log->l_flags & XLOG_TAIL_WARN)) {
3433 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3434 "%s: cycle - 1 != tail_cycle", __func__);
3435 log->l_flags |= XLOG_TAIL_WARN;
3436 }
3437
3438 if (space > BBTOB(tail_blocks) &&
3439 !(log->l_flags & XLOG_TAIL_WARN)) {
3440 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3441 "%s: space > BBTOB(tail_blocks)", __func__);
3442 log->l_flags |= XLOG_TAIL_WARN;
3443 }
3428 } 3444 }
3429} 3445}
3430 3446
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 15dbf1f9c2be..5864850e9e34 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
144#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ 144#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
145#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being 145#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
146 shutdown */ 146 shutdown */
147#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
147 148
148#ifdef __KERNEL__ 149#ifdef __KERNEL__
149/* 150/*
@@ -570,7 +571,7 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
570 * When we crack an atomic LSN, we sample it first so that the value will not 571 * When we crack an atomic LSN, we sample it first so that the value will not
571 * change while we are cracking it into the component values. This means we 572 * change while we are cracking it into the component values. This means we
572 * will always get consistent component values to work from. This should always 573 * will always get consistent component values to work from. This should always
573 * be used to smaple and crack LSNs taht are stored and updated in atomic 574 * be used to sample and crack LSNs that are stored and updated in atomic
574 * variables. 575 * variables.
575 */ 576 */
576static inline void 577static inline void
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0c4a5618e7af..5cc464a17c93 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -101,7 +101,7 @@ xlog_get_bp(
101 /* 101 /*
102 * We do log I/O in units of log sectors (a power-of-2 102 * We do log I/O in units of log sectors (a power-of-2
103 * multiple of the basic block size), so we round up the 103 * multiple of the basic block size), so we round up the
104 * requested size to acommodate the basic blocks required 104 * requested size to accommodate the basic blocks required
105 * for complete log sectors. 105 * for complete log sectors.
106 * 106 *
107 * In addition, the buffer may be used for a non-sector- 107 * In addition, the buffer may be used for a non-sector-
@@ -112,7 +112,7 @@ xlog_get_bp(
112 * an issue. Nor will this be a problem if the log I/O is 112 * an issue. Nor will this be a problem if the log I/O is
113 * done in basic blocks (sector size 1). But otherwise we 113 * done in basic blocks (sector size 1). But otherwise we
114 * extend the buffer by one extra log sector to ensure 114 * extend the buffer by one extra log sector to ensure
115 * there's space to accomodate this possiblility. 115 * there's space to accommodate this possibility.
116 */ 116 */
117 if (nbblks > 1 && log->l_sectBBsize > 1) 117 if (nbblks > 1 && log->l_sectBBsize > 1)
118 nbblks += log->l_sectBBsize; 118 nbblks += log->l_sectBBsize;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a62e8971539d..19af0ab0d0c6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
203 struct mutex m_icsb_mutex; /* balancer sync lock */ 203 struct mutex m_icsb_mutex; /* balancer sync lock */
204#endif 204#endif
205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
206 struct task_struct *m_sync_task; /* generalised sync thread */ 206 struct delayed_work m_sync_work; /* background sync work */
207 xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ 207 struct delayed_work m_reclaim_work; /* background inode reclaim */
208 struct list_head m_sync_list; /* sync thread work item list */ 208 struct work_struct m_flush_work; /* background inode flush */
209 spinlock_t m_sync_lock; /* work item list lock */
210 int m_sync_seq; /* sync thread generation no. */
211 wait_queue_head_t m_wait_single_sync_task;
212 __int64_t m_update_flags; /* sb flags we need to update 209 __int64_t m_update_flags; /* sb flags we need to update
213 on the next remount,rw */ 210 on the next remount,rw */
214 struct shrinker m_inode_shrink; /* inode reclaim shrinker */ 211 struct shrinker m_inode_shrink; /* inode reclaim shrinker */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e29..acdb92f14d51 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,74 +28,138 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
35 32
36#ifdef DEBUG 33#ifdef DEBUG
37STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); 34/*
38#else 35 * Check that the list is sorted as it should be.
36 */
37STATIC void
38xfs_ail_check(
39 struct xfs_ail *ailp,
40 xfs_log_item_t *lip)
41{
42 xfs_log_item_t *prev_lip;
43
44 if (list_empty(&ailp->xa_ail))
45 return;
46
47 /*
48 * Check the next and previous entries are valid.
49 */
50 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
51 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
52 if (&prev_lip->li_ail != &ailp->xa_ail)
53 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
54
55 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
56 if (&prev_lip->li_ail != &ailp->xa_ail)
57 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
58
59
60#ifdef XFS_TRANS_DEBUG
61 /*
62 * Walk the list checking lsn ordering, and that every entry has the
63 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
64 * when specifically debugging the transaction subsystem.
65 */
66 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
67 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
68 if (&prev_lip->li_ail != &ailp->xa_ail)
69 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
70 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
71 prev_lip = lip;
72 }
73#endif /* XFS_TRANS_DEBUG */
74}
75#else /* !DEBUG */
39#define xfs_ail_check(a,l) 76#define xfs_ail_check(a,l)
40#endif /* DEBUG */ 77#endif /* DEBUG */
41 78
79/*
80 * Return a pointer to the first item in the AIL. If the AIL is empty, then
81 * return NULL.
82 */
83static xfs_log_item_t *
84xfs_ail_min(
85 struct xfs_ail *ailp)
86{
87 if (list_empty(&ailp->xa_ail))
88 return NULL;
89
90 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
91}
92
93 /*
94 * Return a pointer to the last item in the AIL. If the AIL is empty, then
95 * return NULL.
96 */
97static xfs_log_item_t *
98xfs_ail_max(
99 struct xfs_ail *ailp)
100{
101 if (list_empty(&ailp->xa_ail))
102 return NULL;
103
104 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
105}
106
107/*
108 * Return a pointer to the item which follows the given item in the AIL. If
109 * the given item is the last item in the list, then return NULL.
110 */
111static xfs_log_item_t *
112xfs_ail_next(
113 struct xfs_ail *ailp,
114 xfs_log_item_t *lip)
115{
116 if (lip->li_ail.next == &ailp->xa_ail)
117 return NULL;
118
119 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
120}
42 121
43/* 122/*
44 * This is called by the log manager code to determine the LSN 123 * This is called by the log manager code to determine the LSN of the tail of
45 * of the tail of the log. This is exactly the LSN of the first 124 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
46 * item in the AIL. If the AIL is empty, then this function 125 * is empty, then this function returns 0.
47 * returns 0.
48 * 126 *
49 * We need the AIL lock in order to get a coherent read of the 127 * We need the AIL lock in order to get a coherent read of the lsn of the last
50 * lsn of the last item in the AIL. 128 * item in the AIL.
51 */ 129 */
52xfs_lsn_t 130xfs_lsn_t
53xfs_trans_ail_tail( 131xfs_ail_min_lsn(
54 struct xfs_ail *ailp) 132 struct xfs_ail *ailp)
55{ 133{
56 xfs_lsn_t lsn; 134 xfs_lsn_t lsn = 0;
57 xfs_log_item_t *lip; 135 xfs_log_item_t *lip;
58 136
59 spin_lock(&ailp->xa_lock); 137 spin_lock(&ailp->xa_lock);
60 lip = xfs_ail_min(ailp); 138 lip = xfs_ail_min(ailp);
61 if (lip == NULL) { 139 if (lip)
62 lsn = (xfs_lsn_t)0;
63 } else {
64 lsn = lip->li_lsn; 140 lsn = lip->li_lsn;
65 }
66 spin_unlock(&ailp->xa_lock); 141 spin_unlock(&ailp->xa_lock);
67 142
68 return lsn; 143 return lsn;
69} 144}
70 145
71/* 146/*
72 * xfs_trans_push_ail 147 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
73 *
74 * This routine is called to move the tail of the AIL forward. It does this by
75 * trying to flush items in the AIL whose lsns are below the given
76 * threshold_lsn.
77 *
78 * the push is run asynchronously in a separate thread, so we return the tail
79 * of the log right now instead of the tail after the push. This means we will
80 * either continue right away, or we will sleep waiting on the async thread to
81 * do its work.
82 *
83 * We do this unlocked - we only need to know whether there is anything in the
84 * AIL at the time we are called. We don't need to access the contents of
85 * any of the objects, so the lock is not needed.
86 */ 148 */
87void 149static xfs_lsn_t
88xfs_trans_ail_push( 150xfs_ail_max_lsn(
89 struct xfs_ail *ailp, 151 struct xfs_ail *ailp)
90 xfs_lsn_t threshold_lsn)
91{ 152{
92 xfs_log_item_t *lip; 153 xfs_lsn_t lsn = 0;
154 xfs_log_item_t *lip;
93 155
94 lip = xfs_ail_min(ailp); 156 spin_lock(&ailp->xa_lock);
95 if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 157 lip = xfs_ail_max(ailp);
96 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) 158 if (lip)
97 xfsaild_wakeup(ailp, threshold_lsn); 159 lsn = lip->li_lsn;
98 } 160 spin_unlock(&ailp->xa_lock);
161
162 return lsn;
99} 163}
100 164
101/* 165/*
@@ -236,16 +300,57 @@ out:
236} 300}
237 301
238/* 302/*
239 * xfsaild_push does the work of pushing on the AIL. Returning a timeout of 303 * splice the log item list into the AIL at the given LSN.
240 * zero indicates that the caller should sleep until woken.
241 */ 304 */
242long 305static void
243xfsaild_push( 306xfs_ail_splice(
244 struct xfs_ail *ailp, 307 struct xfs_ail *ailp,
245 xfs_lsn_t *last_lsn) 308 struct list_head *list,
309 xfs_lsn_t lsn)
246{ 310{
247 long tout = 0; 311 xfs_log_item_t *next_lip;
248 xfs_lsn_t last_pushed_lsn = *last_lsn; 312
313 /* If the list is empty, just insert the item. */
314 if (list_empty(&ailp->xa_ail)) {
315 list_splice(list, &ailp->xa_ail);
316 return;
317 }
318
319 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
320 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
321 break;
322 }
323
324 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
325 XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
326
327 list_splice_init(list, &next_lip->li_ail);
328}
329
330/*
331 * Delete the given item from the AIL. Return a pointer to the item.
332 */
333static void
334xfs_ail_delete(
335 struct xfs_ail *ailp,
336 xfs_log_item_t *lip)
337{
338 xfs_ail_check(ailp, lip);
339 list_del(&lip->li_ail);
340 xfs_trans_ail_cursor_clear(ailp, lip);
341}
342
343/*
344 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
345 * to run at a later time if there is more work to do to complete the push.
346 */
347STATIC void
348xfs_ail_worker(
349 struct work_struct *work)
350{
351 struct xfs_ail *ailp = container_of(to_delayed_work(work),
352 struct xfs_ail, xa_work);
353 long tout;
249 xfs_lsn_t target = ailp->xa_target; 354 xfs_lsn_t target = ailp->xa_target;
250 xfs_lsn_t lsn; 355 xfs_lsn_t lsn;
251 xfs_log_item_t *lip; 356 xfs_log_item_t *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
256 361
257 spin_lock(&ailp->xa_lock); 362 spin_lock(&ailp->xa_lock);
258 xfs_trans_ail_cursor_init(ailp, cur); 363 xfs_trans_ail_cursor_init(ailp, cur);
259 lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); 364 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
260 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 365 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
261 /* 366 /*
262 * AIL is empty or our push has reached the end. 367 * AIL is empty or our push has reached the end.
263 */ 368 */
264 xfs_trans_ail_cursor_done(ailp, cur); 369 xfs_trans_ail_cursor_done(ailp, cur);
265 spin_unlock(&ailp->xa_lock); 370 spin_unlock(&ailp->xa_lock);
266 *last_lsn = 0; 371 ailp->xa_last_pushed_lsn = 0;
267 return tout; 372 return;
268 } 373 }
269 374
270 XFS_STATS_INC(xs_push_ail); 375 XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
301 case XFS_ITEM_SUCCESS: 406 case XFS_ITEM_SUCCESS:
302 XFS_STATS_INC(xs_push_ail_success); 407 XFS_STATS_INC(xs_push_ail_success);
303 IOP_PUSH(lip); 408 IOP_PUSH(lip);
304 last_pushed_lsn = lsn; 409 ailp->xa_last_pushed_lsn = lsn;
305 break; 410 break;
306 411
307 case XFS_ITEM_PUSHBUF: 412 case XFS_ITEM_PUSHBUF:
308 XFS_STATS_INC(xs_push_ail_pushbuf); 413 XFS_STATS_INC(xs_push_ail_pushbuf);
309 IOP_PUSHBUF(lip); 414 IOP_PUSHBUF(lip);
310 last_pushed_lsn = lsn; 415 ailp->xa_last_pushed_lsn = lsn;
311 push_xfsbufd = 1; 416 push_xfsbufd = 1;
312 break; 417 break;
313 418
@@ -319,7 +424,7 @@ xfsaild_push(
319 424
320 case XFS_ITEM_LOCKED: 425 case XFS_ITEM_LOCKED:
321 XFS_STATS_INC(xs_push_ail_locked); 426 XFS_STATS_INC(xs_push_ail_locked);
322 last_pushed_lsn = lsn; 427 ailp->xa_last_pushed_lsn = lsn;
323 stuck++; 428 stuck++;
324 break; 429 break;
325 430
@@ -374,9 +479,23 @@ xfsaild_push(
374 wake_up_process(mp->m_ddev_targp->bt_task); 479 wake_up_process(mp->m_ddev_targp->bt_task);
375 } 480 }
376 481
482 /* assume we have more work to do in a short while */
483 tout = 10;
377 if (!count) { 484 if (!count) {
378 /* We're past our target or empty, so idle */ 485 /* We're past our target or empty, so idle */
379 last_pushed_lsn = 0; 486 ailp->xa_last_pushed_lsn = 0;
487
488 /*
489 * Check for an updated push target before clearing the
490 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
491 * work to do. Wait a bit longer before starting that work.
492 */
493 smp_rmb();
494 if (ailp->xa_target == target) {
495 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
496 return;
497 }
498 tout = 50;
380 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 499 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
381 /* 500 /*
382 * We reached the target so wait a bit longer for I/O to 501 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
384 * start the next scan from the start of the AIL. 503 * start the next scan from the start of the AIL.
385 */ 504 */
386 tout = 50; 505 tout = 50;
387 last_pushed_lsn = 0; 506 ailp->xa_last_pushed_lsn = 0;
388 } else if ((stuck * 100) / count > 90) { 507 } else if ((stuck * 100) / count > 90) {
389 /* 508 /*
390 * Either there is a lot of contention on the AIL or we 509 * Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
396 * continuing from where we were. 515 * continuing from where we were.
397 */ 516 */
398 tout = 20; 517 tout = 20;
399 } else {
400 /* more to do, but wait a short while before continuing */
401 tout = 10;
402 } 518 }
403 *last_lsn = last_pushed_lsn; 519
404 return tout; 520 /* There is more to do, requeue us. */
521 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
522 msecs_to_jiffies(tout));
523}
524
525/*
526 * This routine is called to move the tail of the AIL forward. It does this by
527 * trying to flush items in the AIL whose lsns are below the given
528 * threshold_lsn.
529 *
530 * The push is run asynchronously in a workqueue, which means the caller needs
531 * to handle waiting on the async flush for space to become available.
532 * We don't want to interrupt any push that is in progress, hence we only queue
533 * work if we set the pushing bit approriately.
534 *
535 * We do this unlocked - we only need to know whether there is anything in the
536 * AIL at the time we are called. We don't need to access the contents of
537 * any of the objects, so the lock is not needed.
538 */
539void
540xfs_ail_push(
541 struct xfs_ail *ailp,
542 xfs_lsn_t threshold_lsn)
543{
544 xfs_log_item_t *lip;
545
546 lip = xfs_ail_min(ailp);
547 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
548 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
549 return;
550
551 /*
552 * Ensure that the new target is noticed in push code before it clears
553 * the XFS_AIL_PUSHING_BIT.
554 */
555 smp_wmb();
556 ailp->xa_target = threshold_lsn;
557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
405} 559}
406 560
561/*
562 * Push out all items in the AIL immediately
563 */
564void
565xfs_ail_push_all(
566 struct xfs_ail *ailp)
567{
568 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
569
570 if (threshold_lsn)
571 xfs_ail_push(ailp, threshold_lsn);
572}
407 573
408/* 574/*
409 * This is to be called when an item is unlocked that may have 575 * This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
615 xfs_mount_t *mp) 781 xfs_mount_t *mp)
616{ 782{
617 struct xfs_ail *ailp; 783 struct xfs_ail *ailp;
618 int error;
619 784
620 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 785 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
621 if (!ailp) 786 if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
624 ailp->xa_mount = mp; 789 ailp->xa_mount = mp;
625 INIT_LIST_HEAD(&ailp->xa_ail); 790 INIT_LIST_HEAD(&ailp->xa_ail);
626 spin_lock_init(&ailp->xa_lock); 791 spin_lock_init(&ailp->xa_lock);
627 error = xfsaild_start(ailp); 792 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
628 if (error)
629 goto out_free_ailp;
630 mp->m_ail = ailp; 793 mp->m_ail = ailp;
631 return 0; 794 return 0;
632
633out_free_ailp:
634 kmem_free(ailp);
635 return error;
636} 795}
637 796
638void 797void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
641{ 800{
642 struct xfs_ail *ailp = mp->m_ail; 801 struct xfs_ail *ailp = mp->m_ail;
643 802
644 xfsaild_stop(ailp); 803 cancel_delayed_work_sync(&ailp->xa_work);
645 kmem_free(ailp); 804 kmem_free(ailp);
646} 805}
647
648/*
649 * splice the log item list into the AIL at the given LSN.
650 */
651STATIC void
652xfs_ail_splice(
653 struct xfs_ail *ailp,
654 struct list_head *list,
655 xfs_lsn_t lsn)
656{
657 xfs_log_item_t *next_lip;
658
659 /*
660 * If the list is empty, just insert the item.
661 */
662 if (list_empty(&ailp->xa_ail)) {
663 list_splice(list, &ailp->xa_ail);
664 return;
665 }
666
667 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
668 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
669 break;
670 }
671
672 ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
673 (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
674
675 list_splice_init(list, &next_lip->li_ail);
676 return;
677}
678
679/*
680 * Delete the given item from the AIL. Return a pointer to the item.
681 */
682STATIC void
683xfs_ail_delete(
684 struct xfs_ail *ailp,
685 xfs_log_item_t *lip)
686{
687 xfs_ail_check(ailp, lip);
688 list_del(&lip->li_ail);
689 xfs_trans_ail_cursor_clear(ailp, lip);
690}
691
692/*
693 * Return a pointer to the first item in the AIL.
694 * If the AIL is empty, then return NULL.
695 */
696STATIC xfs_log_item_t *
697xfs_ail_min(
698 struct xfs_ail *ailp)
699{
700 if (list_empty(&ailp->xa_ail))
701 return NULL;
702
703 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
704}
705
706/*
707 * Return a pointer to the item which follows
708 * the given item in the AIL. If the given item
709 * is the last item in the list, then return NULL.
710 */
711STATIC xfs_log_item_t *
712xfs_ail_next(
713 struct xfs_ail *ailp,
714 xfs_log_item_t *lip)
715{
716 if (lip->li_ail.next == &ailp->xa_ail)
717 return NULL;
718
719 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
720}
721
722#ifdef DEBUG
723/*
724 * Check that the list is sorted as it should be.
725 */
726STATIC void
727xfs_ail_check(
728 struct xfs_ail *ailp,
729 xfs_log_item_t *lip)
730{
731 xfs_log_item_t *prev_lip;
732
733 if (list_empty(&ailp->xa_ail))
734 return;
735
736 /*
737 * Check the next and previous entries are valid.
738 */
739 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
740 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
741 if (&prev_lip->li_ail != &ailp->xa_ail)
742 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
743
744 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
745 if (&prev_lip->li_ail != &ailp->xa_ail)
746 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
747
748
749#ifdef XFS_TRANS_DEBUG
750 /*
751 * Walk the list checking lsn ordering, and that every entry has the
752 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
753 * when specifically debugging the transaction subsystem.
754 */
755 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
756 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
757 if (&prev_lip->li_ail != &ailp->xa_ail)
758 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
759 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
760 prev_lip = lip;
761 }
762#endif /* XFS_TRANS_DEBUG */
763}
764#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 16084d8ea231..048b0c689d3e 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -81,7 +81,7 @@ xfs_trans_ijoin(
81 * 81 *
82 * 82 *
83 * Grabs a reference to the inode which will be dropped when the transaction 83 * Grabs a reference to the inode which will be dropped when the transaction
84 * is commited. The inode will also be unlocked at that point. The inode 84 * is committed. The inode will also be unlocked at that point. The inode
85 * must be locked, and it cannot be associated with any transaction. 85 * must be locked, and it cannot be associated with any transaction.
86 */ 86 */
87void 87void
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c238fa3..6b164e9e9a1f 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct list_head xa_ail; 67 struct list_head xa_ail;
68 uint xa_gen;
69 struct task_struct *xa_task;
70 xfs_lsn_t xa_target; 68 xfs_lsn_t xa_target;
71 struct xfs_ail_cursor xa_cursors; 69 struct xfs_ail_cursor xa_cursors;
72 spinlock_t xa_lock; 70 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
73}; 74};
74 75
76#define XFS_AIL_PUSHING_BIT 0
77
75/* 78/*
76 * From xfs_trans_ail.c 79 * From xfs_trans_ail.c
77 */ 80 */
81
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83
78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
79 struct xfs_log_item **log_items, int nr_items, 85 struct xfs_log_item **log_items, int nr_items,
80 xfs_lsn_t lsn) __releases(ailp->xa_lock); 86 xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
98 xfs_trans_ail_delete_bulk(ailp, &lip, 1); 104 xfs_trans_ail_delete_bulk(ailp, &lip, 1);
99} 105}
100 106
101void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); 107void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
108void xfs_ail_push_all(struct xfs_ail *);
109xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
110
102void xfs_trans_unlocked_item(struct xfs_ail *, 111void xfs_trans_unlocked_item(struct xfs_ail *,
103 xfs_log_item_t *); 112 xfs_log_item_t *);
104 113
105xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
106
107struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, 114struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
108 struct xfs_ail_cursor *cur, 115 struct xfs_ail_cursor *cur,
109 xfs_lsn_t lsn); 116 xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
112void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, 119void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
113 struct xfs_ail_cursor *cur); 120 struct xfs_ail_cursor *cur);
114 121
115long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
116void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
117int xfsaild_start(struct xfs_ail *);
118void xfsaild_stop(struct xfs_ail *);
119
120#if BITS_PER_LONG != 64 122#if BITS_PER_LONG != 64
121static inline void 123static inline void
122xfs_trans_ail_copy_lsn( 124xfs_trans_ail_copy_lsn(
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index c48b4217ec47..b7a5fe7c52c8 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -953,7 +953,7 @@ xfs_release(
953 * If we previously truncated this file and removed old data 953 * If we previously truncated this file and removed old data
954 * in the process, we want to initiate "early" writeout on 954 * in the process, we want to initiate "early" writeout on
955 * the last close. This is an attempt to combat the notorious 955 * the last close. This is an attempt to combat the notorious
956 * NULL files problem which is particularly noticable from a 956 * NULL files problem which is particularly noticeable from a
957 * truncate down, buffered (re-)write (delalloc), followed by 957 * truncate down, buffered (re-)write (delalloc), followed by
958 * a crash. What we are effectively doing here is 958 * a crash. What we are effectively doing here is
959 * significantly reducing the time window where we'd otherwise 959 * significantly reducing the time window where we'd otherwise
@@ -982,7 +982,7 @@ xfs_release(
982 * 982 *
983 * Further, check if the inode is being opened, written and 983 * Further, check if the inode is being opened, written and
984 * closed frequently and we have delayed allocation blocks 984 * closed frequently and we have delayed allocation blocks
985 * oustanding (e.g. streaming writes from the NFS server), 985 * outstanding (e.g. streaming writes from the NFS server),
986 * truncating the blocks past EOF will cause fragmentation to 986 * truncating the blocks past EOF will cause fragmentation to
987 * occur. 987 * occur.
988 * 988 *