aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/Kconfig19
-rw-r--r--fs/adfs/inode.c1
-rw-r--r--fs/adfs/super.c6
-rw-r--r--fs/affs/super.c6
-rw-r--r--fs/afs/inode.c1
-rw-r--r--fs/afs/vlocation.c3
-rw-r--r--fs/afs/volume.c3
-rw-r--r--fs/autofs/inode.c6
-rw-r--r--fs/autofs/symlink.c2
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/autofs4/inode.c1
-rw-r--r--fs/autofs4/root.c38
-rw-r--r--fs/befs/linuxvfs.c5
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/inode.c10
-rw-r--r--fs/binfmt_elf.c13
-rw-r--r--fs/binfmt_misc.c9
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/char_dev.c20
-rw-r--r--fs/cifs/cifsfs.c21
-rw-r--r--fs/cifs/readdir.c5
-rw-r--r--fs/coda/coda_linux.c2
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/inode.c3
-rw-r--r--fs/compat.c5
-rw-r--r--fs/configfs/file.c3
-rw-r--r--fs/configfs/inode.c4
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/debugfs/file.c60
-rw-r--r--fs/debugfs/inode.c20
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/efs/super.c6
-rw-r--r--fs/eventpoll.c1
-rw-r--r--fs/exec.c53
-rw-r--r--fs/ext2/acl.c4
-rw-r--r--fs/ext2/ialloc.c1
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/ext2/super.c38
-rw-r--r--fs/ext2/xattr.c3
-rw-r--r--fs/ext3/acl.c6
-rw-r--r--fs/ext3/balloc.c350
-rw-r--r--fs/ext3/bitmap.c2
-rw-r--r--fs/ext3/dir.c19
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/fsync.c6
-rw-r--r--fs/ext3/hash.c8
-rw-r--r--fs/ext3/ialloc.c55
-rw-r--r--fs/ext3/inode.c77
-rw-r--r--fs/ext3/namei.c50
-rw-r--r--fs/ext3/resize.c42
-rw-r--r--fs/ext3/super.c110
-rw-r--r--fs/ext3/xattr.c16
-rw-r--r--fs/fat/cache.c3
-rw-r--r--fs/fat/inode.c38
-rw-r--r--fs/file.c6
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/freevxfs/vxfs.h2
-rw-r--r--fs/freevxfs/vxfs_inode.c5
-rw-r--r--fs/fuse/control.c6
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/hfs/bnode.c3
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/super.c6
-rw-r--r--fs/hfsplus/bnode.c3
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/super.c3
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/hpfs/buffer.c2
-rw-r--r--fs/hpfs/inode.c1
-rw-r--r--fs/hpfs/super.c6
-rw-r--r--fs/hppfs/hppfs_kern.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/inode.c6
-rw-r--r--fs/isofs/inode.c10
-rw-r--r--fs/jbd/checkpoint.c33
-rw-r--r--fs/jbd/commit.c182
-rw-r--r--fs/jbd/journal.c74
-rw-r--r--fs/jbd/recovery.c56
-rw-r--r--fs/jbd/revoke.c70
-rw-r--r--fs/jbd/transaction.c134
-rw-r--r--fs/jffs/inode-v23.c44
-rw-r--r--fs/jffs/intrep.c11
-rw-r--r--fs/jffs/jffs_fm.c6
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/super.c3
-rw-r--r--fs/jfs/jfs_extent.c2
-rw-r--r--fs/jfs/jfs_imap.c1
-rw-r--r--fs/jfs/jfs_inode.c1
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/libfs.c4
-rw-r--r--fs/lockd/clntlock.c2
-rw-r--r--fs/lockd/clntproc.c2
-rw-r--r--fs/lockd/host.c4
-rw-r--r--fs/lockd/svcsubs.c3
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/inode.c13
-rw-r--r--fs/namei.c50
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/ncpfs/inode.c7
-rw-r--r--fs/ncpfs/symlink.c4
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/inode.c9
-rw-r--r--fs/nfs/namespace.c12
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsd/nfs4idmap.c3
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/ntfs/dir.c5
-rw-r--r--fs/ntfs/inode.c6
-rw-r--r--fs/ntfs/mft.c9
-rw-r--r--fs/ntfs/super.c28
-rw-r--r--fs/ntfs/unistr.c4
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/partitions/efi.c9
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/kcore.c6
-rw-r--r--fs/proc/nommu.c20
-rw-r--r--fs/proc/proc_misc.c11
-rw-r--r--fs/proc/task_mmu.c5
-rw-r--r--fs/proc/task_nommu.c74
-rw-r--r--fs/qnx4/inode.c8
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/super.c25
-rw-r--r--fs/romfs/inode.c3
-rw-r--r--fs/smbfs/inode.c5
-rw-r--r--fs/smbfs/proc.c1
-rw-r--r--fs/smbfs/request.c3
-rw-r--r--fs/stat.c3
-rw-r--r--fs/sysfs/bin.c13
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/inode.c12
-rw-r--r--fs/sysfs/symlink.c14
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/sysv/super.c6
-rw-r--r--fs/udf/ialloc.c7
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/super.c3
-rw-r--r--fs/ufs/ialloc.c1
-rw-r--r--fs/ufs/inode.c1
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/xfs/linux-2.6/kmem.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c1
159 files changed, 1281 insertions, 1087 deletions
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index eae50c9d6dc4..7a7ec2d1d2f4 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -204,7 +204,6 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
204 inode->i_mode = mode; 204 inode->i_mode = mode;
205 inode->i_uid = current->fsuid; 205 inode->i_uid = current->fsuid;
206 inode->i_gid = current->fsgid; 206 inode->i_gid = current->fsgid;
207 inode->i_blksize = sb->s_blocksize;
208 inode->i_blocks = 0; 207 inode->i_blocks = 0;
209 inode->i_rdev = 0; 208 inode->i_rdev = 0;
210 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 209 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -950,9 +949,8 @@ v9fs_stat2inode(struct v9fs_stat *stat, struct inode *inode,
950 949
951 inode->i_size = stat->length; 950 inode->i_size = stat->length;
952 951
953 inode->i_blksize = sb->s_blocksize;
954 inode->i_blocks = 952 inode->i_blocks =
955 (inode->i_size + inode->i_blksize - 1) >> sb->s_blocksize_bits; 953 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
956} 954}
957 955
958/** 956/**
diff --git a/fs/Kconfig b/fs/Kconfig
index ca9affd676ae..c968b9c7e581 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -827,6 +827,25 @@ config PROC_VMCORE
827 help 827 help
828 Exports the dump image of crashed kernel in ELF format. 828 Exports the dump image of crashed kernel in ELF format.
829 829
830config PROC_SYSCTL
831 bool "Sysctl support (/proc/sys)" if EMBEDDED
832 depends on PROC_FS
833 select SYSCTL
834 default y
835 ---help---
836 The sysctl interface provides a means of dynamically changing
837 certain kernel parameters and variables on the fly without requiring
838 a recompile of the kernel or reboot of the system. The primary
839 interface is through /proc/sys. If you say Y here a tree of
840 modifiable sysctl entries will be generated beneath the
841 /proc/sys directory. They are explained in the files
842 in <file:Documentation/sysctl/>. Note that enabling this
843 option will enlarge the kernel by at least 8 KB.
844
845 As it is generally a good thing, you should say Y here unless
846 building a kernel for install/rescue disks or your system is very
847 limited in memory.
848
830config SYSFS 849config SYSFS
831 bool "sysfs file system support" if EMBEDDED 850 bool "sysfs file system support" if EMBEDDED
832 default y 851 default y
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 534f3eecc985..7e7a04be1278 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -269,7 +269,6 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
269 inode->i_ino = obj->file_id; 269 inode->i_ino = obj->file_id;
270 inode->i_size = obj->size; 270 inode->i_size = obj->size;
271 inode->i_nlink = 2; 271 inode->i_nlink = 2;
272 inode->i_blksize = PAGE_SIZE;
273 inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >> 272 inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >>
274 sb->s_blocksize_bits; 273 sb->s_blocksize_bits;
275 274
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 82011019494c..9ade139086fc 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -251,8 +251,7 @@ static int init_inodecache(void)
251 251
252static void destroy_inodecache(void) 252static void destroy_inodecache(void)
253{ 253{
254 if (kmem_cache_destroy(adfs_inode_cachep)) 254 kmem_cache_destroy(adfs_inode_cachep);
255 printk(KERN_INFO "adfs_inode_cache: not all structures were freed\n");
256} 255}
257 256
258static struct super_operations adfs_sops = { 257static struct super_operations adfs_sops = {
@@ -339,11 +338,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
339 338
340 sb->s_flags |= MS_NODIRATIME; 339 sb->s_flags |= MS_NODIRATIME;
341 340
342 asb = kmalloc(sizeof(*asb), GFP_KERNEL); 341 asb = kzalloc(sizeof(*asb), GFP_KERNEL);
343 if (!asb) 342 if (!asb)
344 return -ENOMEM; 343 return -ENOMEM;
345 sb->s_fs_info = asb; 344 sb->s_fs_info = asb;
346 memset(asb, 0, sizeof(*asb));
347 345
348 /* set default options */ 346 /* set default options */
349 asb->s_uid = 0; 347 asb->s_uid = 0;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 17352011ab67..5ea72c3a16c3 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -109,8 +109,7 @@ static int init_inodecache(void)
109 109
110static void destroy_inodecache(void) 110static void destroy_inodecache(void)
111{ 111{
112 if (kmem_cache_destroy(affs_inode_cachep)) 112 kmem_cache_destroy(affs_inode_cachep);
113 printk(KERN_INFO "affs_inode_cache: not all structures were freed\n");
114} 113}
115 114
116static struct super_operations affs_sops = { 115static struct super_operations affs_sops = {
@@ -280,11 +279,10 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
280 sb->s_op = &affs_sops; 279 sb->s_op = &affs_sops;
281 sb->s_flags |= MS_NODIRATIME; 280 sb->s_flags |= MS_NODIRATIME;
282 281
283 sbi = kmalloc(sizeof(struct affs_sb_info), GFP_KERNEL); 282 sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
284 if (!sbi) 283 if (!sbi)
285 return -ENOMEM; 284 return -ENOMEM;
286 sb->s_fs_info = sbi; 285 sb->s_fs_info = sbi;
287 memset(sbi, 0, sizeof(*sbi));
288 init_MUTEX(&sbi->s_bmlock); 286 init_MUTEX(&sbi->s_bmlock);
289 287
290 if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, 288 if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4ebb30a50ed5..6f37754906c2 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -72,7 +72,6 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
72 inode->i_ctime.tv_sec = vnode->status.mtime_server; 72 inode->i_ctime.tv_sec = vnode->status.mtime_server;
73 inode->i_ctime.tv_nsec = 0; 73 inode->i_ctime.tv_nsec = 0;
74 inode->i_atime = inode->i_mtime = inode->i_ctime; 74 inode->i_atime = inode->i_mtime = inode->i_ctime;
75 inode->i_blksize = PAGE_CACHE_SIZE;
76 inode->i_blocks = 0; 75 inode->i_blocks = 0;
77 inode->i_version = vnode->fid.unique; 76 inode->i_version = vnode->fid.unique;
78 inode->i_mapping->a_ops = &afs_fs_aops; 77 inode->i_mapping->a_ops = &afs_fs_aops;
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 331f730a1fb3..782ee7c600ca 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -281,11 +281,10 @@ int afs_vlocation_lookup(struct afs_cell *cell,
281 spin_unlock(&cell->vl_gylock); 281 spin_unlock(&cell->vl_gylock);
282 282
283 /* not in the cell's in-memory lists - create a new record */ 283 /* not in the cell's in-memory lists - create a new record */
284 vlocation = kmalloc(sizeof(struct afs_vlocation), GFP_KERNEL); 284 vlocation = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
285 if (!vlocation) 285 if (!vlocation)
286 return -ENOMEM; 286 return -ENOMEM;
287 287
288 memset(vlocation, 0, sizeof(struct afs_vlocation));
289 atomic_set(&vlocation->usage, 1); 288 atomic_set(&vlocation->usage, 1);
290 INIT_LIST_HEAD(&vlocation->link); 289 INIT_LIST_HEAD(&vlocation->link);
291 rwlock_init(&vlocation->lock); 290 rwlock_init(&vlocation->lock);
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 0ff4b86476e3..768c6dbd323a 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -186,11 +186,10 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
186 _debug("creating new volume record"); 186 _debug("creating new volume record");
187 187
188 ret = -ENOMEM; 188 ret = -ENOMEM;
189 volume = kmalloc(sizeof(struct afs_volume), GFP_KERNEL); 189 volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
190 if (!volume) 190 if (!volume)
191 goto error_up; 191 goto error_up;
192 192
193 memset(volume, 0, sizeof(struct afs_volume));
194 atomic_set(&volume->usage, 1); 193 atomic_set(&volume->usage, 1);
195 volume->type = type; 194 volume->type = type;
196 volume->type_force = force; 195 volume->type_force = force;
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index af2efbbb5d76..2c9759baad61 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -129,10 +129,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
129 struct autofs_sb_info *sbi; 129 struct autofs_sb_info *sbi;
130 int minproto, maxproto; 130 int minproto, maxproto;
131 131
132 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); 132 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
133 if ( !sbi ) 133 if ( !sbi )
134 goto fail_unlock; 134 goto fail_unlock;
135 memset(sbi, 0, sizeof(*sbi));
136 DPRINTK(("autofs: starting up, sbi = %p\n",sbi)); 135 DPRINTK(("autofs: starting up, sbi = %p\n",sbi));
137 136
138 s->s_fs_info = sbi; 137 s->s_fs_info = sbi;
@@ -217,7 +216,6 @@ static void autofs_read_inode(struct inode *inode)
217 inode->i_nlink = 2; 216 inode->i_nlink = 2;
218 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 217 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
219 inode->i_blocks = 0; 218 inode->i_blocks = 0;
220 inode->i_blksize = 1024;
221 219
222 if ( ino == AUTOFS_ROOT_INO ) { 220 if ( ino == AUTOFS_ROOT_INO ) {
223 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; 221 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -242,7 +240,7 @@ static void autofs_read_inode(struct inode *inode)
242 240
243 inode->i_op = &autofs_symlink_inode_operations; 241 inode->i_op = &autofs_symlink_inode_operations;
244 sl = &sbi->symlink[n]; 242 sl = &sbi->symlink[n];
245 inode->u.generic_ip = sl; 243 inode->i_private = sl;
246 inode->i_mode = S_IFLNK | S_IRWXUGO; 244 inode->i_mode = S_IFLNK | S_IRWXUGO;
247 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = sl->mtime; 245 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = sl->mtime;
248 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0; 246 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
diff --git a/fs/autofs/symlink.c b/fs/autofs/symlink.c
index 52e8772b066e..c74f2eb65775 100644
--- a/fs/autofs/symlink.c
+++ b/fs/autofs/symlink.c
@@ -15,7 +15,7 @@
15/* Nothing to release.. */ 15/* Nothing to release.. */
16static void *autofs_follow_link(struct dentry *dentry, struct nameidata *nd) 16static void *autofs_follow_link(struct dentry *dentry, struct nameidata *nd)
17{ 17{
18 char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data; 18 char *s=((struct autofs_symlink *)dentry->d_inode->i_private)->data;
19 nd_set_link(nd, s); 19 nd_set_link(nd, s);
20 return NULL; 20 return NULL;
21} 21}
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 8dbd44f10e9d..d96e5c14a9ca 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -32,7 +32,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
32 32
33 if (!do_now) { 33 if (!do_now) {
34 /* Too young to die */ 34 /* Too young to die */
35 if (time_after(ino->last_used + timeout, now)) 35 if (!timeout || time_after(ino->last_used + timeout, now))
36 return 0; 36 return 0;
37 37
38 /* update last_used here :- 38 /* update last_used here :-
@@ -253,7 +253,7 @@ static struct dentry *autofs4_expire_direct(struct super_block *sb,
253 struct dentry *root = dget(sb->s_root); 253 struct dentry *root = dget(sb->s_root);
254 int do_now = how & AUTOFS_EXP_IMMEDIATE; 254 int do_now = how & AUTOFS_EXP_IMMEDIATE;
255 255
256 if (!sbi->exp_timeout || !root) 256 if (!root)
257 return NULL; 257 return NULL;
258 258
259 now = jiffies; 259 now = jiffies;
@@ -293,7 +293,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
293 int do_now = how & AUTOFS_EXP_IMMEDIATE; 293 int do_now = how & AUTOFS_EXP_IMMEDIATE;
294 int exp_leaves = how & AUTOFS_EXP_LEAVES; 294 int exp_leaves = how & AUTOFS_EXP_LEAVES;
295 295
296 if ( !sbi->exp_timeout || !root ) 296 if (!root)
297 return NULL; 297 return NULL;
298 298
299 now = jiffies; 299 now = jiffies;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 11a6a9ae51b7..800ce876caec 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -447,7 +447,6 @@ struct inode *autofs4_get_inode(struct super_block *sb,
447 inode->i_uid = 0; 447 inode->i_uid = 0;
448 inode->i_gid = 0; 448 inode->i_gid = 0;
449 } 449 }
450 inode->i_blksize = PAGE_CACHE_SIZE;
451 inode->i_blocks = 0; 450 inode->i_blocks = 0;
452 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 451 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
453 452
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 5100f984783f..27e17f96cada 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -137,7 +137,9 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
137 nd.flags = LOOKUP_DIRECTORY; 137 nd.flags = LOOKUP_DIRECTORY;
138 ret = (dentry->d_op->d_revalidate)(dentry, &nd); 138 ret = (dentry->d_op->d_revalidate)(dentry, &nd);
139 139
140 if (!ret) { 140 if (ret <= 0) {
141 if (ret < 0)
142 status = ret;
141 dcache_dir_close(inode, file); 143 dcache_dir_close(inode, file);
142 goto out; 144 goto out;
143 } 145 }
@@ -400,13 +402,23 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
400 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 402 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
401 int oz_mode = autofs4_oz_mode(sbi); 403 int oz_mode = autofs4_oz_mode(sbi);
402 int flags = nd ? nd->flags : 0; 404 int flags = nd ? nd->flags : 0;
403 int status = 0; 405 int status = 1;
404 406
405 /* Pending dentry */ 407 /* Pending dentry */
406 if (autofs4_ispending(dentry)) { 408 if (autofs4_ispending(dentry)) {
407 if (!oz_mode) 409 /* The daemon never causes a mount to trigger */
408 status = try_to_fill_dentry(dentry, flags); 410 if (oz_mode)
409 return !status; 411 return 1;
412
413 /*
414 * A zero status is success otherwise we have a
415 * negative error code.
416 */
417 status = try_to_fill_dentry(dentry, flags);
418 if (status == 0)
419 return 1;
420
421 return status;
410 } 422 }
411 423
412 /* Negative dentry.. invalidate if "old" */ 424 /* Negative dentry.. invalidate if "old" */
@@ -421,9 +433,19 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
421 DPRINTK("dentry=%p %.*s, emptydir", 433 DPRINTK("dentry=%p %.*s, emptydir",
422 dentry, dentry->d_name.len, dentry->d_name.name); 434 dentry, dentry->d_name.len, dentry->d_name.name);
423 spin_unlock(&dcache_lock); 435 spin_unlock(&dcache_lock);
424 if (!oz_mode) 436 /* The daemon never causes a mount to trigger */
425 status = try_to_fill_dentry(dentry, flags); 437 if (oz_mode)
426 return !status; 438 return 1;
439
440 /*
441 * A zero status is success otherwise we have a
442 * negative error code.
443 */
444 status = try_to_fill_dentry(dentry, flags);
445 if (status == 0)
446 return 1;
447
448 return status;
427 } 449 }
428 spin_unlock(&dcache_lock); 450 spin_unlock(&dcache_lock);
429 451
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 50cfca5c7efd..57020c7a7e65 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -365,7 +365,6 @@ befs_read_inode(struct inode *inode)
365 inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */ 365 inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
366 inode->i_ctime = inode->i_mtime; 366 inode->i_ctime = inode->i_mtime;
367 inode->i_atime = inode->i_mtime; 367 inode->i_atime = inode->i_mtime;
368 inode->i_blksize = befs_sb->block_size;
369 368
370 befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num); 369 befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
371 befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent); 370 befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
@@ -446,9 +445,7 @@ befs_init_inodecache(void)
446static void 445static void
447befs_destroy_inodecache(void) 446befs_destroy_inodecache(void)
448{ 447{
449 if (kmem_cache_destroy(befs_inode_cachep)) 448 kmem_cache_destroy(befs_inode_cachep);
450 printk(KERN_ERR "befs_destroy_inodecache: "
451 "not all structures were freed\n");
452} 449}
453 450
454/* 451/*
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 26fad9621738..dcf04cb13283 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -102,7 +102,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode,
102 inode->i_uid = current->fsuid; 102 inode->i_uid = current->fsuid;
103 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid; 103 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
104 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 104 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
105 inode->i_blocks = inode->i_blksize = 0; 105 inode->i_blocks = 0;
106 inode->i_op = &bfs_file_inops; 106 inode->i_op = &bfs_file_inops;
107 inode->i_fop = &bfs_file_operations; 107 inode->i_fop = &bfs_file_operations;
108 inode->i_mapping->a_ops = &bfs_aops; 108 inode->i_mapping->a_ops = &bfs_aops;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index cf74f3d4d966..ed27ffb3459e 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -76,7 +76,6 @@ static void bfs_read_inode(struct inode * inode)
76 inode->i_size = BFS_FILESIZE(di); 76 inode->i_size = BFS_FILESIZE(di);
77 inode->i_blocks = BFS_FILEBLOCKS(di); 77 inode->i_blocks = BFS_FILEBLOCKS(di);
78 if (inode->i_size || inode->i_blocks) dprintf("Registered inode with %lld size, %ld blocks\n", inode->i_size, inode->i_blocks); 78 if (inode->i_size || inode->i_blocks) dprintf("Registered inode with %lld size, %ld blocks\n", inode->i_size, inode->i_blocks);
79 inode->i_blksize = PAGE_SIZE;
80 inode->i_atime.tv_sec = le32_to_cpu(di->i_atime); 79 inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
81 inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime); 80 inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
82 inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime); 81 inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime);
@@ -268,8 +267,7 @@ static int init_inodecache(void)
268 267
269static void destroy_inodecache(void) 268static void destroy_inodecache(void)
270{ 269{
271 if (kmem_cache_destroy(bfs_inode_cachep)) 270 kmem_cache_destroy(bfs_inode_cachep);
272 printk(KERN_INFO "bfs_inode_cache: not all structures were freed\n");
273} 271}
274 272
275static struct super_operations bfs_sops = { 273static struct super_operations bfs_sops = {
@@ -311,11 +309,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
311 unsigned i, imap_len; 309 unsigned i, imap_len;
312 struct bfs_sb_info * info; 310 struct bfs_sb_info * info;
313 311
314 info = kmalloc(sizeof(*info), GFP_KERNEL); 312 info = kzalloc(sizeof(*info), GFP_KERNEL);
315 if (!info) 313 if (!info)
316 return -ENOMEM; 314 return -ENOMEM;
317 s->s_fs_info = info; 315 s->s_fs_info = info;
318 memset(info, 0, sizeof(*info));
319 316
320 sb_set_blocksize(s, BFS_BSIZE); 317 sb_set_blocksize(s, BFS_BSIZE);
321 318
@@ -338,10 +335,9 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
338 + BFS_ROOT_INO - 1; 335 + BFS_ROOT_INO - 1;
339 336
340 imap_len = info->si_lasti/8 + 1; 337 imap_len = info->si_lasti/8 + 1;
341 info->si_imap = kmalloc(imap_len, GFP_KERNEL); 338 info->si_imap = kzalloc(imap_len, GFP_KERNEL);
342 if (!info->si_imap) 339 if (!info->si_imap)
343 goto out; 340 goto out;
344 memset(info->si_imap, 0, imap_len);
345 for (i=0; i<BFS_ROOT_INO; i++) 341 for (i=0; i<BFS_ROOT_INO; i++)
346 set_bit(i, info->si_imap); 342 set_bit(i, info->si_imap);
347 343
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 672a3b90bc55..dfd8cfb7fb5d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -515,7 +515,8 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
515{ 515{
516 unsigned int random_variable = 0; 516 unsigned int random_variable = 0;
517 517
518 if (current->flags & PF_RANDOMIZE) { 518 if ((current->flags & PF_RANDOMIZE) &&
519 !(current->personality & ADDR_NO_RANDOMIZE)) {
519 random_variable = get_random_int() & STACK_RND_MASK; 520 random_variable = get_random_int() & STACK_RND_MASK;
520 random_variable <<= PAGE_SHIFT; 521 random_variable <<= PAGE_SHIFT;
521 } 522 }
@@ -1262,7 +1263,7 @@ static void fill_elf_header(struct elfhdr *elf, int segs)
1262 return; 1263 return;
1263} 1264}
1264 1265
1265static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 1266static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1266{ 1267{
1267 phdr->p_type = PT_NOTE; 1268 phdr->p_type = PT_NOTE;
1268 phdr->p_offset = offset; 1269 phdr->p_offset = offset;
@@ -1428,7 +1429,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1428 int i; 1429 int i;
1429 struct vm_area_struct *vma; 1430 struct vm_area_struct *vma;
1430 struct elfhdr *elf = NULL; 1431 struct elfhdr *elf = NULL;
1431 off_t offset = 0, dataoff; 1432 loff_t offset = 0, dataoff;
1432 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; 1433 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1433 int numnote; 1434 int numnote;
1434 struct memelfnote *notes = NULL; 1435 struct memelfnote *notes = NULL;
@@ -1661,11 +1662,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1661 ELF_CORE_WRITE_EXTRA_DATA; 1662 ELF_CORE_WRITE_EXTRA_DATA;
1662#endif 1663#endif
1663 1664
1664 if ((off_t)file->f_pos != offset) { 1665 if (file->f_pos != offset) {
1665 /* Sanity check */ 1666 /* Sanity check */
1666 printk(KERN_WARNING 1667 printk(KERN_WARNING
1667 "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", 1668 "elf_core_dump: file->f_pos (%Ld) != offset (%Ld)\n",
1668 (off_t)file->f_pos, offset); 1669 file->f_pos, offset);
1669 } 1670 }
1670 1671
1671end_coredump: 1672end_coredump:
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 34ebbc191e46..66ba137f8661 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -507,7 +507,6 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
507 inode->i_mode = mode; 507 inode->i_mode = mode;
508 inode->i_uid = 0; 508 inode->i_uid = 0;
509 inode->i_gid = 0; 509 inode->i_gid = 0;
510 inode->i_blksize = PAGE_CACHE_SIZE;
511 inode->i_blocks = 0; 510 inode->i_blocks = 0;
512 inode->i_atime = inode->i_mtime = inode->i_ctime = 511 inode->i_atime = inode->i_mtime = inode->i_ctime =
513 current_fs_time(inode->i_sb); 512 current_fs_time(inode->i_sb);
@@ -517,7 +516,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
517 516
518static void bm_clear_inode(struct inode *inode) 517static void bm_clear_inode(struct inode *inode)
519{ 518{
520 kfree(inode->u.generic_ip); 519 kfree(inode->i_private);
521} 520}
522 521
523static void kill_node(Node *e) 522static void kill_node(Node *e)
@@ -545,7 +544,7 @@ static void kill_node(Node *e)
545static ssize_t 544static ssize_t
546bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) 545bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
547{ 546{
548 Node *e = file->f_dentry->d_inode->u.generic_ip; 547 Node *e = file->f_dentry->d_inode->i_private;
549 loff_t pos = *ppos; 548 loff_t pos = *ppos;
550 ssize_t res; 549 ssize_t res;
551 char *page; 550 char *page;
@@ -579,7 +578,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
579 size_t count, loff_t *ppos) 578 size_t count, loff_t *ppos)
580{ 579{
581 struct dentry *root; 580 struct dentry *root;
582 Node *e = file->f_dentry->d_inode->u.generic_ip; 581 Node *e = file->f_dentry->d_inode->i_private;
583 int res = parse_command(buffer, count); 582 int res = parse_command(buffer, count);
584 583
585 switch (res) { 584 switch (res) {
@@ -646,7 +645,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
646 } 645 }
647 646
648 e->dentry = dget(dentry); 647 e->dentry = dget(dentry);
649 inode->u.generic_ip = e; 648 inode->i_private = e;
650 inode->i_fop = &bm_entry_operations; 649 inode->i_fop = &bm_entry_operations;
651 650
652 d_instantiate(dentry, inode); 651 d_instantiate(dentry, inode);
diff --git a/fs/buffer.c b/fs/buffer.c
index 71649ef9b658..3b6d701073e7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page)
2987 2987
2988 spin_lock(&mapping->private_lock); 2988 spin_lock(&mapping->private_lock);
2989 ret = drop_buffers(page, &buffers_to_free); 2989 ret = drop_buffers(page, &buffers_to_free);
2990 spin_unlock(&mapping->private_lock);
2990 if (ret) { 2991 if (ret) {
2991 /* 2992 /*
2992 * If the filesystem writes its buffers by hand (eg ext3) 2993 * If the filesystem writes its buffers by hand (eg ext3)
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page)
2998 */ 2999 */
2999 clear_page_dirty(page); 3000 clear_page_dirty(page);
3000 } 3001 }
3001 spin_unlock(&mapping->private_lock);
3002out: 3002out:
3003 if (buffers_to_free) { 3003 if (buffers_to_free) {
3004 struct buffer_head *bh = buffers_to_free; 3004 struct buffer_head *bh = buffers_to_free;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 3483d3cf8087..0009346d827f 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -19,11 +19,30 @@
19#include <linux/kobj_map.h> 19#include <linux/kobj_map.h>
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/backing-dev.h>
22 23
23#ifdef CONFIG_KMOD 24#ifdef CONFIG_KMOD
24#include <linux/kmod.h> 25#include <linux/kmod.h>
25#endif 26#endif
26 27
28/*
29 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
30 * devices
31 * - permits shared-mmap for read, write and/or exec
32 * - does not permit private mmap in NOMMU mode (can't do COW)
33 * - no readahead or I/O queue unplugging required
34 */
35struct backing_dev_info directly_mappable_cdev_bdi = {
36 .capabilities = (
37#ifdef CONFIG_MMU
38 /* permit private copies of the data to be taken */
39 BDI_CAP_MAP_COPY |
40#endif
41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
44};
45
27static struct kobj_map *cdev_map; 46static struct kobj_map *cdev_map;
28 47
29static DEFINE_MUTEX(chrdevs_lock); 48static DEFINE_MUTEX(chrdevs_lock);
@@ -461,3 +480,4 @@ EXPORT_SYMBOL(cdev_del);
461EXPORT_SYMBOL(cdev_add); 480EXPORT_SYMBOL(cdev_add);
462EXPORT_SYMBOL(register_chrdev); 481EXPORT_SYMBOL(register_chrdev);
463EXPORT_SYMBOL(unregister_chrdev); 482EXPORT_SYMBOL(unregister_chrdev);
483EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c3ef1c0d0e68..22bcf4d7e7ae 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -253,7 +253,6 @@ cifs_alloc_inode(struct super_block *sb)
253 file data or metadata */ 253 file data or metadata */
254 cifs_inode->clientCanCacheRead = FALSE; 254 cifs_inode->clientCanCacheRead = FALSE;
255 cifs_inode->clientCanCacheAll = FALSE; 255 cifs_inode->clientCanCacheAll = FALSE;
256 cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
257 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 256 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
258 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; 257 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
259 INIT_LIST_HEAD(&cifs_inode->openFileList); 258 INIT_LIST_HEAD(&cifs_inode->openFileList);
@@ -699,8 +698,7 @@ cifs_init_inodecache(void)
699static void 698static void
700cifs_destroy_inodecache(void) 699cifs_destroy_inodecache(void)
701{ 700{
702 if (kmem_cache_destroy(cifs_inode_cachep)) 701 kmem_cache_destroy(cifs_inode_cachep);
703 printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
704} 702}
705 703
706static int 704static int
@@ -778,13 +776,9 @@ static void
778cifs_destroy_request_bufs(void) 776cifs_destroy_request_bufs(void)
779{ 777{
780 mempool_destroy(cifs_req_poolp); 778 mempool_destroy(cifs_req_poolp);
781 if (kmem_cache_destroy(cifs_req_cachep)) 779 kmem_cache_destroy(cifs_req_cachep);
782 printk(KERN_WARNING
783 "cifs_destroy_request_cache: error not all structures were freed\n");
784 mempool_destroy(cifs_sm_req_poolp); 780 mempool_destroy(cifs_sm_req_poolp);
785 if (kmem_cache_destroy(cifs_sm_req_cachep)) 781 kmem_cache_destroy(cifs_sm_req_cachep);
786 printk(KERN_WARNING
787 "cifs_destroy_request_cache: cifs_small_rq free error\n");
788} 782}
789 783
790static int 784static int
@@ -819,13 +813,8 @@ static void
819cifs_destroy_mids(void) 813cifs_destroy_mids(void)
820{ 814{
821 mempool_destroy(cifs_mid_poolp); 815 mempool_destroy(cifs_mid_poolp);
822 if (kmem_cache_destroy(cifs_mid_cachep)) 816 kmem_cache_destroy(cifs_mid_cachep);
823 printk(KERN_WARNING 817 kmem_cache_destroy(cifs_oplock_cachep);
824 "cifs_destroy_mids: error not all structures were freed\n");
825
826 if (kmem_cache_destroy(cifs_oplock_cachep))
827 printk(KERN_WARNING
828 "error not all oplock structures were freed\n");
829} 818}
830 819
831static int cifs_oplock_thread(void * dummyarg) 820static int cifs_oplock_thread(void * dummyarg)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 9aeb58a7d369..b27b34537bf2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -216,10 +216,9 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
216 216
217 if (allocation_size < end_of_file) 217 if (allocation_size < end_of_file)
218 cFYI(1, ("May be sparse file, allocation less than file size")); 218 cFYI(1, ("May be sparse file, allocation less than file size"));
219 cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld", 219 cFYI(1, ("File Size %ld and blocks %llu",
220 (unsigned long)tmp_inode->i_size, 220 (unsigned long)tmp_inode->i_size,
221 (unsigned long long)tmp_inode->i_blocks, 221 (unsigned long long)tmp_inode->i_blocks));
222 tmp_inode->i_blksize));
223 if (S_ISREG(tmp_inode->i_mode)) { 222 if (S_ISREG(tmp_inode->i_mode)) {
224 cFYI(1, ("File inode")); 223 cFYI(1, ("File inode"));
225 tmp_inode->i_op = &cifs_file_inode_ops; 224 tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 5597080cb811..95a54253c047 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -110,8 +110,6 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
110 inode->i_nlink = attr->va_nlink; 110 inode->i_nlink = attr->va_nlink;
111 if (attr->va_size != -1) 111 if (attr->va_size != -1)
112 inode->i_size = attr->va_size; 112 inode->i_size = attr->va_size;
113 if (attr->va_blocksize != -1)
114 inode->i_blksize = attr->va_blocksize;
115 if (attr->va_size != -1) 113 if (attr->va_size != -1)
116 inode->i_blocks = (attr->va_size + 511) >> 9; 114 inode->i_blocks = (attr->va_size + 511) >> 9;
117 if (attr->va_atime.tv_sec != -1) 115 if (attr->va_atime.tv_sec != -1)
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 71f2ea632e53..8651ea6a23b7 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -513,7 +513,7 @@ static int coda_venus_readdir(struct file *filp, filldir_t filldir,
513 ino_t ino; 513 ino_t ino;
514 int ret, i; 514 int ret, i;
515 515
516 vdir = (struct venus_dirent *)kmalloc(sizeof(*vdir), GFP_KERNEL); 516 vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
517 if (!vdir) return -ENOMEM; 517 if (!vdir) return -ENOMEM;
518 518
519 i = filp->f_pos; 519 i = filp->f_pos;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 87f1dc8aa24b..88d123321164 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -80,8 +80,7 @@ int coda_init_inodecache(void)
80 80
81void coda_destroy_inodecache(void) 81void coda_destroy_inodecache(void)
82{ 82{
83 if (kmem_cache_destroy(coda_inode_cachep)) 83 kmem_cache_destroy(coda_inode_cachep);
84 printk(KERN_INFO "coda_inode_cache: not all structures were freed\n");
85} 84}
86 85
87static int coda_remount(struct super_block *sb, int *flags, char *data) 86static int coda_remount(struct super_block *sb, int *flags, char *data)
diff --git a/fs/compat.c b/fs/compat.c
index e31e9cf96647..ce982f6e8c80 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1855,7 +1855,7 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
1855 1855
1856 } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec)); 1856 } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
1857 1857
1858 if (tsp && !(current->personality & STICKY_TIMEOUTS)) { 1858 if (ret == 0 && tsp && !(current->personality & STICKY_TIMEOUTS)) {
1859 struct compat_timespec rts; 1859 struct compat_timespec rts;
1860 1860
1861 rts.tv_sec = timeout / HZ; 1861 rts.tv_sec = timeout / HZ;
@@ -1866,7 +1866,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
1866 } 1866 }
1867 if (compat_timespec_compare(&rts, &ts) >= 0) 1867 if (compat_timespec_compare(&rts, &ts) >= 0)
1868 rts = ts; 1868 rts = ts;
1869 copy_to_user(tsp, &rts, sizeof(rts)); 1869 if (copy_to_user(tsp, &rts, sizeof(rts)))
1870 ret = -EFAULT;
1870 } 1871 }
1871 1872
1872 if (ret == -ERESTARTNOHAND) { 1873 if (ret == -ERESTARTNOHAND) {
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index f499803743e0..85105e50f7db 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -274,9 +274,8 @@ static int check_perm(struct inode * inode, struct file * file)
274 /* No error? Great, allocate a buffer for the file, and store it 274 /* No error? Great, allocate a buffer for the file, and store it
275 * it in file->private_data for easy access. 275 * it in file->private_data for easy access.
276 */ 276 */
277 buffer = kmalloc(sizeof(struct configfs_buffer),GFP_KERNEL); 277 buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
278 if (buffer) { 278 if (buffer) {
279 memset(buffer,0,sizeof(struct configfs_buffer));
280 init_MUTEX(&buffer->sem); 279 init_MUTEX(&buffer->sem);
281 buffer->needs_read_fill = 1; 280 buffer->needs_read_fill = 1;
282 buffer->ops = ops; 281 buffer->ops = ops;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index e14488ca6411..fb18917954a9 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,11 +76,10 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
76 76
77 if (!sd_iattr) { 77 if (!sd_iattr) {
78 /* setting attributes for the first time, allocate now */ 78 /* setting attributes for the first time, allocate now */
79 sd_iattr = kmalloc(sizeof(struct iattr), GFP_KERNEL); 79 sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
80 if (!sd_iattr) 80 if (!sd_iattr)
81 return -ENOMEM; 81 return -ENOMEM;
82 /* assign default attributes */ 82 /* assign default attributes */
83 memset(sd_iattr, 0, sizeof(struct iattr));
84 sd_iattr->ia_mode = sd->s_mode; 83 sd_iattr->ia_mode = sd->s_mode;
85 sd_iattr->ia_uid = 0; 84 sd_iattr->ia_uid = 0;
86 sd_iattr->ia_gid = 0; 85 sd_iattr->ia_gid = 0;
@@ -136,7 +135,6 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
136{ 135{
137 struct inode * inode = new_inode(configfs_sb); 136 struct inode * inode = new_inode(configfs_sb);
138 if (inode) { 137 if (inode) {
139 inode->i_blksize = PAGE_CACHE_SIZE;
140 inode->i_blocks = 0; 138 inode->i_blocks = 0;
141 inode->i_mapping->a_ops = &configfs_aops; 139 inode->i_mapping->a_ops = &configfs_aops;
142 inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; 140 inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 223c0431042d..ad96b6990715 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -73,7 +73,6 @@ static int cramfs_iget5_set(struct inode *inode, void *opaque)
73 inode->i_uid = cramfs_inode->uid; 73 inode->i_uid = cramfs_inode->uid;
74 inode->i_size = cramfs_inode->size; 74 inode->i_size = cramfs_inode->size;
75 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 75 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
76 inode->i_blksize = PAGE_CACHE_SIZE;
77 inode->i_gid = cramfs_inode->gid; 76 inode->i_gid = cramfs_inode->gid;
78 /* Struct copy intentional */ 77 /* Struct copy intentional */
79 inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; 78 inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
@@ -242,11 +241,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
242 241
243 sb->s_flags |= MS_RDONLY; 242 sb->s_flags |= MS_RDONLY;
244 243
245 sbi = kmalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 244 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
246 if (!sbi) 245 if (!sbi)
247 return -ENOMEM; 246 return -ENOMEM;
248 sb->s_fs_info = sbi; 247 sb->s_fs_info = sbi;
249 memset(sbi, 0, sizeof(struct cramfs_sb_info));
250 248
251 /* Invalidate the read buffers on mount: think disk change.. */ 249 /* Invalidate the read buffers on mount: think disk change.. */
252 mutex_lock(&read_mutex); 250 mutex_lock(&read_mutex);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 39640fd03458..bf3901ab1744 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -32,8 +32,8 @@ static ssize_t default_write_file(struct file *file, const char __user *buf,
32 32
33static int default_open(struct inode *inode, struct file *file) 33static int default_open(struct inode *inode, struct file *file)
34{ 34{
35 if (inode->u.generic_ip) 35 if (inode->i_private)
36 file->private_data = inode->u.generic_ip; 36 file->private_data = inode->i_private;
37 37
38 return 0; 38 return 0;
39} 39}
@@ -55,12 +55,11 @@ static u64 debugfs_u8_get(void *data)
55DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); 55DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
56 56
57/** 57/**
58 * debugfs_create_u8 - create a file in the debugfs filesystem that is used to read and write an unsigned 8 bit value. 58 * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value
59 *
60 * @name: a pointer to a string containing the name of the file to create. 59 * @name: a pointer to a string containing the name of the file to create.
61 * @mode: the permission that the file should have 60 * @mode: the permission that the file should have
62 * @parent: a pointer to the parent dentry for this file. This should be a 61 * @parent: a pointer to the parent dentry for this file. This should be a
63 * directory dentry if set. If this paramater is NULL, then the 62 * directory dentry if set. If this parameter is %NULL, then the
64 * file will be created in the root of the debugfs filesystem. 63 * file will be created in the root of the debugfs filesystem.
65 * @value: a pointer to the variable that the file should read to and write 64 * @value: a pointer to the variable that the file should read to and write
66 * from. 65 * from.
@@ -72,11 +71,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
72 * This function will return a pointer to a dentry if it succeeds. This 71 * This function will return a pointer to a dentry if it succeeds. This
73 * pointer must be passed to the debugfs_remove() function when the file is 72 * pointer must be passed to the debugfs_remove() function when the file is
74 * to be removed (no automatic cleanup happens if your module is unloaded, 73 * to be removed (no automatic cleanup happens if your module is unloaded,
75 * you are responsible here.) If an error occurs, NULL will be returned. 74 * you are responsible here.) If an error occurs, %NULL will be returned.
76 * 75 *
77 * If debugfs is not enabled in the kernel, the value -ENODEV will be 76 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
78 * returned. It is not wise to check for this value, but rather, check for 77 * returned. It is not wise to check for this value, but rather, check for
79 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 78 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
80 * code. 79 * code.
81 */ 80 */
82struct dentry *debugfs_create_u8(const char *name, mode_t mode, 81struct dentry *debugfs_create_u8(const char *name, mode_t mode,
@@ -97,12 +96,11 @@ static u64 debugfs_u16_get(void *data)
97DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); 96DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
98 97
99/** 98/**
100 * debugfs_create_u16 - create a file in the debugfs filesystem that is used to read and write an unsigned 16 bit value. 99 * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value
101 *
102 * @name: a pointer to a string containing the name of the file to create. 100 * @name: a pointer to a string containing the name of the file to create.
103 * @mode: the permission that the file should have 101 * @mode: the permission that the file should have
104 * @parent: a pointer to the parent dentry for this file. This should be a 102 * @parent: a pointer to the parent dentry for this file. This should be a
105 * directory dentry if set. If this paramater is NULL, then the 103 * directory dentry if set. If this parameter is %NULL, then the
106 * file will be created in the root of the debugfs filesystem. 104 * file will be created in the root of the debugfs filesystem.
107 * @value: a pointer to the variable that the file should read to and write 105 * @value: a pointer to the variable that the file should read to and write
108 * from. 106 * from.
@@ -114,11 +112,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
114 * This function will return a pointer to a dentry if it succeeds. This 112 * This function will return a pointer to a dentry if it succeeds. This
115 * pointer must be passed to the debugfs_remove() function when the file is 113 * pointer must be passed to the debugfs_remove() function when the file is
116 * to be removed (no automatic cleanup happens if your module is unloaded, 114 * to be removed (no automatic cleanup happens if your module is unloaded,
117 * you are responsible here.) If an error occurs, NULL will be returned. 115 * you are responsible here.) If an error occurs, %NULL will be returned.
118 * 116 *
119 * If debugfs is not enabled in the kernel, the value -ENODEV will be 117 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
120 * returned. It is not wise to check for this value, but rather, check for 118 * returned. It is not wise to check for this value, but rather, check for
121 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 119 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
122 * code. 120 * code.
123 */ 121 */
124struct dentry *debugfs_create_u16(const char *name, mode_t mode, 122struct dentry *debugfs_create_u16(const char *name, mode_t mode,
@@ -139,12 +137,11 @@ static u64 debugfs_u32_get(void *data)
139DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); 137DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
140 138
141/** 139/**
142 * debugfs_create_u32 - create a file in the debugfs filesystem that is used to read and write an unsigned 32 bit value. 140 * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value
143 *
144 * @name: a pointer to a string containing the name of the file to create. 141 * @name: a pointer to a string containing the name of the file to create.
145 * @mode: the permission that the file should have 142 * @mode: the permission that the file should have
146 * @parent: a pointer to the parent dentry for this file. This should be a 143 * @parent: a pointer to the parent dentry for this file. This should be a
147 * directory dentry if set. If this paramater is NULL, then the 144 * directory dentry if set. If this parameter is %NULL, then the
148 * file will be created in the root of the debugfs filesystem. 145 * file will be created in the root of the debugfs filesystem.
149 * @value: a pointer to the variable that the file should read to and write 146 * @value: a pointer to the variable that the file should read to and write
150 * from. 147 * from.
@@ -156,11 +153,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
156 * This function will return a pointer to a dentry if it succeeds. This 153 * This function will return a pointer to a dentry if it succeeds. This
157 * pointer must be passed to the debugfs_remove() function when the file is 154 * pointer must be passed to the debugfs_remove() function when the file is
158 * to be removed (no automatic cleanup happens if your module is unloaded, 155 * to be removed (no automatic cleanup happens if your module is unloaded,
159 * you are responsible here.) If an error occurs, NULL will be returned. 156 * you are responsible here.) If an error occurs, %NULL will be returned.
160 * 157 *
161 * If debugfs is not enabled in the kernel, the value -ENODEV will be 158 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
162 * returned. It is not wise to check for this value, but rather, check for 159 * returned. It is not wise to check for this value, but rather, check for
163 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 160 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
164 * code. 161 * code.
165 */ 162 */
166struct dentry *debugfs_create_u32(const char *name, mode_t mode, 163struct dentry *debugfs_create_u32(const char *name, mode_t mode,
@@ -219,12 +216,11 @@ static const struct file_operations fops_bool = {
219}; 216};
220 217
221/** 218/**
222 * debugfs_create_bool - create a file in the debugfs filesystem that is used to read and write a boolean value. 219 * debugfs_create_bool - create a debugfs file that is used to read and write a boolean value
223 *
224 * @name: a pointer to a string containing the name of the file to create. 220 * @name: a pointer to a string containing the name of the file to create.
225 * @mode: the permission that the file should have 221 * @mode: the permission that the file should have
226 * @parent: a pointer to the parent dentry for this file. This should be a 222 * @parent: a pointer to the parent dentry for this file. This should be a
227 * directory dentry if set. If this paramater is NULL, then the 223 * directory dentry if set. If this parameter is %NULL, then the
228 * file will be created in the root of the debugfs filesystem. 224 * file will be created in the root of the debugfs filesystem.
229 * @value: a pointer to the variable that the file should read to and write 225 * @value: a pointer to the variable that the file should read to and write
230 * from. 226 * from.
@@ -236,11 +232,11 @@ static const struct file_operations fops_bool = {
236 * This function will return a pointer to a dentry if it succeeds. This 232 * This function will return a pointer to a dentry if it succeeds. This
237 * pointer must be passed to the debugfs_remove() function when the file is 233 * pointer must be passed to the debugfs_remove() function when the file is
238 * to be removed (no automatic cleanup happens if your module is unloaded, 234 * to be removed (no automatic cleanup happens if your module is unloaded,
239 * you are responsible here.) If an error occurs, NULL will be returned. 235 * you are responsible here.) If an error occurs, %NULL will be returned.
240 * 236 *
241 * If debugfs is not enabled in the kernel, the value -ENODEV will be 237 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
242 * returned. It is not wise to check for this value, but rather, check for 238 * returned. It is not wise to check for this value, but rather, check for
243 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 239 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
244 * code. 240 * code.
245 */ 241 */
246struct dentry *debugfs_create_bool(const char *name, mode_t mode, 242struct dentry *debugfs_create_bool(const char *name, mode_t mode,
@@ -264,13 +260,11 @@ static struct file_operations fops_blob = {
264}; 260};
265 261
266/** 262/**
267 * debugfs_create_blob - create a file in the debugfs filesystem that is 263 * debugfs_create_blob - create a debugfs file that is used to read and write a binary blob
268 * used to read and write a binary blob.
269 *
270 * @name: a pointer to a string containing the name of the file to create. 264 * @name: a pointer to a string containing the name of the file to create.
271 * @mode: the permission that the file should have 265 * @mode: the permission that the file should have
272 * @parent: a pointer to the parent dentry for this file. This should be a 266 * @parent: a pointer to the parent dentry for this file. This should be a
273 * directory dentry if set. If this paramater is NULL, then the 267 * directory dentry if set. If this parameter is %NULL, then the
274 * file will be created in the root of the debugfs filesystem. 268 * file will be created in the root of the debugfs filesystem.
275 * @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer 269 * @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer
276 * to the blob data and the size of the data. 270 * to the blob data and the size of the data.
@@ -282,11 +276,11 @@ static struct file_operations fops_blob = {
282 * This function will return a pointer to a dentry if it succeeds. This 276 * This function will return a pointer to a dentry if it succeeds. This
283 * pointer must be passed to the debugfs_remove() function when the file is 277 * pointer must be passed to the debugfs_remove() function when the file is
284 * to be removed (no automatic cleanup happens if your module is unloaded, 278 * to be removed (no automatic cleanup happens if your module is unloaded,
285 * you are responsible here.) If an error occurs, NULL will be returned. 279 * you are responsible here.) If an error occurs, %NULL will be returned.
286 * 280 *
287 * If debugfs is not enabled in the kernel, the value -ENODEV will be 281 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
288 * returned. It is not wise to check for this value, but rather, check for 282 * returned. It is not wise to check for this value, but rather, check for
289 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 283 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
290 * code. 284 * code.
291 */ 285 */
292struct dentry *debugfs_create_blob(const char *name, mode_t mode, 286struct dentry *debugfs_create_blob(const char *name, mode_t mode,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e8ae3042b806..269e649e6dc6 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -40,7 +40,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
40 inode->i_mode = mode; 40 inode->i_mode = mode;
41 inode->i_uid = 0; 41 inode->i_uid = 0;
42 inode->i_gid = 0; 42 inode->i_gid = 0;
43 inode->i_blksize = PAGE_CACHE_SIZE;
44 inode->i_blocks = 0; 43 inode->i_blocks = 0;
45 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 44 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
46 switch (mode & S_IFMT) { 45 switch (mode & S_IFMT) {
@@ -162,14 +161,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
162 161
163/** 162/**
164 * debugfs_create_file - create a file in the debugfs filesystem 163 * debugfs_create_file - create a file in the debugfs filesystem
165 *
166 * @name: a pointer to a string containing the name of the file to create. 164 * @name: a pointer to a string containing the name of the file to create.
167 * @mode: the permission that the file should have 165 * @mode: the permission that the file should have
168 * @parent: a pointer to the parent dentry for this file. This should be a 166 * @parent: a pointer to the parent dentry for this file. This should be a
169 * directory dentry if set. If this paramater is NULL, then the 167 * directory dentry if set. If this paramater is NULL, then the
170 * file will be created in the root of the debugfs filesystem. 168 * file will be created in the root of the debugfs filesystem.
171 * @data: a pointer to something that the caller will want to get to later 169 * @data: a pointer to something that the caller will want to get to later
172 * on. The inode.u.generic_ip pointer will point to this value on 170 * on. The inode.i_private pointer will point to this value on
173 * the open() call. 171 * the open() call.
174 * @fops: a pointer to a struct file_operations that should be used for 172 * @fops: a pointer to a struct file_operations that should be used for
175 * this file. 173 * this file.
@@ -182,11 +180,11 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
182 * This function will return a pointer to a dentry if it succeeds. This 180 * This function will return a pointer to a dentry if it succeeds. This
183 * pointer must be passed to the debugfs_remove() function when the file is 181 * pointer must be passed to the debugfs_remove() function when the file is
184 * to be removed (no automatic cleanup happens if your module is unloaded, 182 * to be removed (no automatic cleanup happens if your module is unloaded,
185 * you are responsible here.) If an error occurs, NULL will be returned. 183 * you are responsible here.) If an error occurs, %NULL will be returned.
186 * 184 *
187 * If debugfs is not enabled in the kernel, the value -ENODEV will be 185 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
188 * returned. It is not wise to check for this value, but rather, check for 186 * returned. It is not wise to check for this value, but rather, check for
189 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 187 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
190 * code. 188 * code.
191 */ 189 */
192struct dentry *debugfs_create_file(const char *name, mode_t mode, 190struct dentry *debugfs_create_file(const char *name, mode_t mode,
@@ -210,7 +208,7 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode,
210 208
211 if (dentry->d_inode) { 209 if (dentry->d_inode) {
212 if (data) 210 if (data)
213 dentry->d_inode->u.generic_ip = data; 211 dentry->d_inode->i_private = data;
214 if (fops) 212 if (fops)
215 dentry->d_inode->i_fop = fops; 213 dentry->d_inode->i_fop = fops;
216 } 214 }
@@ -221,7 +219,6 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
221 219
222/** 220/**
223 * debugfs_create_dir - create a directory in the debugfs filesystem 221 * debugfs_create_dir - create a directory in the debugfs filesystem
224 *
225 * @name: a pointer to a string containing the name of the directory to 222 * @name: a pointer to a string containing the name of the directory to
226 * create. 223 * create.
227 * @parent: a pointer to the parent dentry for this file. This should be a 224 * @parent: a pointer to the parent dentry for this file. This should be a
@@ -233,11 +230,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
233 * This function will return a pointer to a dentry if it succeeds. This 230 * This function will return a pointer to a dentry if it succeeds. This
234 * pointer must be passed to the debugfs_remove() function when the file is 231 * pointer must be passed to the debugfs_remove() function when the file is
235 * to be removed (no automatic cleanup happens if your module is unloaded, 232 * to be removed (no automatic cleanup happens if your module is unloaded,
236 * you are responsible here.) If an error occurs, NULL will be returned. 233 * you are responsible here.) If an error occurs, %NULL will be returned.
237 * 234 *
238 * If debugfs is not enabled in the kernel, the value -ENODEV will be 235 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
239 * returned. It is not wise to check for this value, but rather, check for 236 * returned. It is not wise to check for this value, but rather, check for
240 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 237 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
241 * code. 238 * code.
242 */ 239 */
243struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) 240struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
@@ -250,7 +247,6 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir);
250 247
251/** 248/**
252 * debugfs_remove - removes a file or directory from the debugfs filesystem 249 * debugfs_remove - removes a file or directory from the debugfs filesystem
253 *
254 * @dentry: a pointer to a the dentry of the file or directory to be 250 * @dentry: a pointer to a the dentry of the file or directory to be
255 * removed. 251 * removed.
256 * 252 *
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index f7aef5bb584a..5f7b5a6025bf 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -113,7 +113,6 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
113 inode->i_ino = 1; 113 inode->i_ino = 1;
114 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 114 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
115 inode->i_blocks = 0; 115 inode->i_blocks = 0;
116 inode->i_blksize = 1024;
117 inode->i_uid = inode->i_gid = 0; 116 inode->i_uid = inode->i_gid = 0;
118 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; 117 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
119 inode->i_op = &simple_dir_inode_operations; 118 inode->i_op = &simple_dir_inode_operations;
@@ -172,12 +171,11 @@ int devpts_pty_new(struct tty_struct *tty)
172 return -ENOMEM; 171 return -ENOMEM;
173 172
174 inode->i_ino = number+2; 173 inode->i_ino = number+2;
175 inode->i_blksize = 1024;
176 inode->i_uid = config.setuid ? config.uid : current->fsuid; 174 inode->i_uid = config.setuid ? config.uid : current->fsuid;
177 inode->i_gid = config.setgid ? config.gid : current->fsgid; 175 inode->i_gid = config.setgid ? config.gid : current->fsgid;
178 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 176 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
179 init_special_inode(inode, S_IFCHR|config.mode, device); 177 init_special_inode(inode, S_IFCHR|config.mode, device);
180 inode->u.generic_ip = tty; 178 inode->i_private = tty;
181 179
182 dentry = get_node(number); 180 dentry = get_node(number);
183 if (!IS_ERR(dentry) && !dentry->d_inode) 181 if (!IS_ERR(dentry) && !dentry->d_inode)
@@ -196,7 +194,7 @@ struct tty_struct *devpts_get_tty(int number)
196 tty = NULL; 194 tty = NULL;
197 if (!IS_ERR(dentry)) { 195 if (!IS_ERR(dentry)) {
198 if (dentry->d_inode) 196 if (dentry->d_inode)
199 tty = dentry->d_inode->u.generic_ip; 197 tty = dentry->d_inode->i_private;
200 dput(dentry); 198 dput(dentry);
201 } 199 }
202 200
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 8ac2462ae5dd..b3f50651eb6b 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -90,8 +90,7 @@ static int init_inodecache(void)
90 90
91static void destroy_inodecache(void) 91static void destroy_inodecache(void)
92{ 92{
93 if (kmem_cache_destroy(efs_inode_cachep)) 93 kmem_cache_destroy(efs_inode_cachep);
94 printk(KERN_INFO "efs_inode_cache: not all structures were freed\n");
95} 94}
96 95
97static void efs_put_super(struct super_block *s) 96static void efs_put_super(struct super_block *s)
@@ -248,11 +247,10 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
248 struct buffer_head *bh; 247 struct buffer_head *bh;
249 struct inode *root; 248 struct inode *root;
250 249
251 sb = kmalloc(sizeof(struct efs_sb_info), GFP_KERNEL); 250 sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
252 if (!sb) 251 if (!sb)
253 return -ENOMEM; 252 return -ENOMEM;
254 s->s_fs_info = sb; 253 s->s_fs_info = sb;
255 memset(sb, 0, sizeof(struct efs_sb_info));
256 254
257 s->s_magic = EFS_SUPER_MAGIC; 255 s->s_magic = EFS_SUPER_MAGIC;
258 if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) { 256 if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3a3567433b92..8d544334bcd2 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1590,7 +1590,6 @@ static struct inode *ep_eventpoll_inode(void)
1590 inode->i_uid = current->fsuid; 1590 inode->i_uid = current->fsuid;
1591 inode->i_gid = current->fsgid; 1591 inode->i_gid = current->fsgid;
1592 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1592 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1593 inode->i_blksize = PAGE_SIZE;
1594 return inode; 1593 return inode;
1595 1594
1596eexit_1: 1595eexit_1:
diff --git a/fs/exec.c b/fs/exec.c
index 54135df2a966..97df6e0aeaee 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -595,7 +595,7 @@ static int de_thread(struct task_struct *tsk)
595 if (!newsighand) 595 if (!newsighand)
596 return -ENOMEM; 596 return -ENOMEM;
597 597
598 if (thread_group_empty(current)) 598 if (thread_group_empty(tsk))
599 goto no_thread_group; 599 goto no_thread_group;
600 600
601 /* 601 /*
@@ -620,17 +620,17 @@ static int de_thread(struct task_struct *tsk)
620 * Reparenting needs write_lock on tasklist_lock, 620 * Reparenting needs write_lock on tasklist_lock,
621 * so it is safe to do it under read_lock. 621 * so it is safe to do it under read_lock.
622 */ 622 */
623 if (unlikely(current->group_leader == child_reaper)) 623 if (unlikely(tsk->group_leader == child_reaper))
624 child_reaper = current; 624 child_reaper = tsk;
625 625
626 zap_other_threads(current); 626 zap_other_threads(tsk);
627 read_unlock(&tasklist_lock); 627 read_unlock(&tasklist_lock);
628 628
629 /* 629 /*
630 * Account for the thread group leader hanging around: 630 * Account for the thread group leader hanging around:
631 */ 631 */
632 count = 1; 632 count = 1;
633 if (!thread_group_leader(current)) { 633 if (!thread_group_leader(tsk)) {
634 count = 2; 634 count = 2;
635 /* 635 /*
636 * The SIGALRM timer survives the exec, but needs to point 636 * The SIGALRM timer survives the exec, but needs to point
@@ -639,14 +639,14 @@ static int de_thread(struct task_struct *tsk)
639 * synchronize with any firing (by calling del_timer_sync) 639 * synchronize with any firing (by calling del_timer_sync)
640 * before we can safely let the old group leader die. 640 * before we can safely let the old group leader die.
641 */ 641 */
642 sig->tsk = current; 642 sig->tsk = tsk;
643 spin_unlock_irq(lock); 643 spin_unlock_irq(lock);
644 if (hrtimer_cancel(&sig->real_timer)) 644 if (hrtimer_cancel(&sig->real_timer))
645 hrtimer_restart(&sig->real_timer); 645 hrtimer_restart(&sig->real_timer);
646 spin_lock_irq(lock); 646 spin_lock_irq(lock);
647 } 647 }
648 while (atomic_read(&sig->count) > count) { 648 while (atomic_read(&sig->count) > count) {
649 sig->group_exit_task = current; 649 sig->group_exit_task = tsk;
650 sig->notify_count = count; 650 sig->notify_count = count;
651 __set_current_state(TASK_UNINTERRUPTIBLE); 651 __set_current_state(TASK_UNINTERRUPTIBLE);
652 spin_unlock_irq(lock); 652 spin_unlock_irq(lock);
@@ -662,13 +662,13 @@ static int de_thread(struct task_struct *tsk)
662 * do is to wait for the thread group leader to become inactive, 662 * do is to wait for the thread group leader to become inactive,
663 * and to assume its PID: 663 * and to assume its PID:
664 */ 664 */
665 if (!thread_group_leader(current)) { 665 if (!thread_group_leader(tsk)) {
666 /* 666 /*
667 * Wait for the thread group leader to be a zombie. 667 * Wait for the thread group leader to be a zombie.
668 * It should already be zombie at this point, most 668 * It should already be zombie at this point, most
669 * of the time. 669 * of the time.
670 */ 670 */
671 leader = current->group_leader; 671 leader = tsk->group_leader;
672 while (leader->exit_state != EXIT_ZOMBIE) 672 while (leader->exit_state != EXIT_ZOMBIE)
673 yield(); 673 yield();
674 674
@@ -682,12 +682,12 @@ static int de_thread(struct task_struct *tsk)
682 * When we take on its identity by switching to its PID, we 682 * When we take on its identity by switching to its PID, we
683 * also take its birthdate (always earlier than our own). 683 * also take its birthdate (always earlier than our own).
684 */ 684 */
685 current->start_time = leader->start_time; 685 tsk->start_time = leader->start_time;
686 686
687 write_lock_irq(&tasklist_lock); 687 write_lock_irq(&tasklist_lock);
688 688
689 BUG_ON(leader->tgid != current->tgid); 689 BUG_ON(leader->tgid != tsk->tgid);
690 BUG_ON(current->pid == current->tgid); 690 BUG_ON(tsk->pid == tsk->tgid);
691 /* 691 /*
692 * An exec() starts a new thread group with the 692 * An exec() starts a new thread group with the
693 * TGID of the previous thread group. Rehash the 693 * TGID of the previous thread group. Rehash the
@@ -696,24 +696,21 @@ static int de_thread(struct task_struct *tsk)
696 */ 696 */
697 697
698 /* Become a process group leader with the old leader's pid. 698 /* Become a process group leader with the old leader's pid.
699 * Note: The old leader also uses thispid until release_task 699 * The old leader becomes a thread of the this thread group.
700 * Note: The old leader also uses this pid until release_task
700 * is called. Odd but simple and correct. 701 * is called. Odd but simple and correct.
701 */ 702 */
702 detach_pid(current, PIDTYPE_PID); 703 detach_pid(tsk, PIDTYPE_PID);
703 current->pid = leader->pid; 704 tsk->pid = leader->pid;
704 attach_pid(current, PIDTYPE_PID, current->pid); 705 attach_pid(tsk, PIDTYPE_PID, tsk->pid);
705 attach_pid(current, PIDTYPE_PGID, current->signal->pgrp); 706 transfer_pid(leader, tsk, PIDTYPE_PGID);
706 attach_pid(current, PIDTYPE_SID, current->signal->session); 707 transfer_pid(leader, tsk, PIDTYPE_SID);
707 list_replace_rcu(&leader->tasks, &current->tasks); 708 list_replace_rcu(&leader->tasks, &tsk->tasks);
708 709
709 current->group_leader = current; 710 tsk->group_leader = tsk;
710 leader->group_leader = current; 711 leader->group_leader = tsk;
711 712
712 /* Reduce leader to a thread */ 713 tsk->exit_signal = SIGCHLD;
713 detach_pid(leader, PIDTYPE_PGID);
714 detach_pid(leader, PIDTYPE_SID);
715
716 current->exit_signal = SIGCHLD;
717 714
718 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 715 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
719 leader->exit_state = EXIT_DEAD; 716 leader->exit_state = EXIT_DEAD;
@@ -753,7 +750,7 @@ no_thread_group:
753 spin_lock(&oldsighand->siglock); 750 spin_lock(&oldsighand->siglock);
754 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); 751 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
755 752
756 rcu_assign_pointer(current->sighand, newsighand); 753 rcu_assign_pointer(tsk->sighand, newsighand);
757 recalc_sigpending(); 754 recalc_sigpending();
758 755
759 spin_unlock(&newsighand->siglock); 756 spin_unlock(&newsighand->siglock);
@@ -764,7 +761,7 @@ no_thread_group:
764 kmem_cache_free(sighand_cachep, oldsighand); 761 kmem_cache_free(sighand_cachep, oldsighand);
765 } 762 }
766 763
767 BUG_ON(!thread_group_leader(current)); 764 BUG_ON(!thread_group_leader(tsk));
768 return 0; 765 return 0;
769} 766}
770 767
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index da52b4a5db64..7c420b800c34 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -89,8 +89,8 @@ ext2_acl_to_disk(const struct posix_acl *acl, size_t *size)
89 size_t n; 89 size_t n;
90 90
91 *size = ext2_acl_size(acl->a_count); 91 *size = ext2_acl_size(acl->a_count);
92 ext_acl = (ext2_acl_header *)kmalloc(sizeof(ext2_acl_header) + 92 ext_acl = kmalloc(sizeof(ext2_acl_header) + acl->a_count *
93 acl->a_count * sizeof(ext2_acl_entry), GFP_KERNEL); 93 sizeof(ext2_acl_entry), GFP_KERNEL);
94 if (!ext_acl) 94 if (!ext_acl)
95 return ERR_PTR(-ENOMEM); 95 return ERR_PTR(-ENOMEM);
96 ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION); 96 ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 695f69ccf908..2cb545bf0f3c 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -574,7 +574,6 @@ got:
574 inode->i_mode = mode; 574 inode->i_mode = mode;
575 575
576 inode->i_ino = ino; 576 inode->i_ino = ino;
577 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
578 inode->i_blocks = 0; 577 inode->i_blocks = 0;
579 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 578 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
580 memset(ei->i_data, 0, sizeof(ei->i_data)); 579 memset(ei->i_data, 0, sizeof(ei->i_data));
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fb4d3220eb8d..dd4e14c221e0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1094,7 +1094,6 @@ void ext2_read_inode (struct inode * inode)
1094 brelse (bh); 1094 brelse (bh);
1095 goto bad_inode; 1095 goto bad_inode;
1096 } 1096 }
1097 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
1098 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 1097 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1099 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 1098 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1100 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 1099 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 4286ff6330b6..513cd421ac0b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -184,8 +184,7 @@ static int init_inodecache(void)
184 184
185static void destroy_inodecache(void) 185static void destroy_inodecache(void)
186{ 186{
187 if (kmem_cache_destroy(ext2_inode_cachep)) 187 kmem_cache_destroy(ext2_inode_cachep);
188 printk(KERN_INFO "ext2_inode_cache: not all structures were freed\n");
189} 188}
190 189
191static void ext2_clear_inode(struct inode *inode) 190static void ext2_clear_inode(struct inode *inode)
@@ -544,17 +543,24 @@ static int ext2_check_descriptors (struct super_block * sb)
544 int i; 543 int i;
545 int desc_block = 0; 544 int desc_block = 0;
546 struct ext2_sb_info *sbi = EXT2_SB(sb); 545 struct ext2_sb_info *sbi = EXT2_SB(sb);
547 unsigned long block = le32_to_cpu(sbi->s_es->s_first_data_block); 546 unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
547 unsigned long last_block;
548 struct ext2_group_desc * gdp = NULL; 548 struct ext2_group_desc * gdp = NULL;
549 549
550 ext2_debug ("Checking group descriptors"); 550 ext2_debug ("Checking group descriptors");
551 551
552 for (i = 0; i < sbi->s_groups_count; i++) 552 for (i = 0; i < sbi->s_groups_count; i++)
553 { 553 {
554 if (i == sbi->s_groups_count - 1)
555 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
556 else
557 last_block = first_block +
558 (EXT2_BLOCKS_PER_GROUP(sb) - 1);
559
554 if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0) 560 if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
555 gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data; 561 gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
556 if (le32_to_cpu(gdp->bg_block_bitmap) < block || 562 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
557 le32_to_cpu(gdp->bg_block_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb)) 563 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
558 { 564 {
559 ext2_error (sb, "ext2_check_descriptors", 565 ext2_error (sb, "ext2_check_descriptors",
560 "Block bitmap for group %d" 566 "Block bitmap for group %d"
@@ -562,8 +568,8 @@ static int ext2_check_descriptors (struct super_block * sb)
562 i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); 568 i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
563 return 0; 569 return 0;
564 } 570 }
565 if (le32_to_cpu(gdp->bg_inode_bitmap) < block || 571 if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
566 le32_to_cpu(gdp->bg_inode_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb)) 572 le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
567 { 573 {
568 ext2_error (sb, "ext2_check_descriptors", 574 ext2_error (sb, "ext2_check_descriptors",
569 "Inode bitmap for group %d" 575 "Inode bitmap for group %d"
@@ -571,9 +577,9 @@ static int ext2_check_descriptors (struct super_block * sb)
571 i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); 577 i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
572 return 0; 578 return 0;
573 } 579 }
574 if (le32_to_cpu(gdp->bg_inode_table) < block || 580 if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
575 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >= 581 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
576 block + EXT2_BLOCKS_PER_GROUP(sb)) 582 last_block)
577 { 583 {
578 ext2_error (sb, "ext2_check_descriptors", 584 ext2_error (sb, "ext2_check_descriptors",
579 "Inode table for group %d" 585 "Inode table for group %d"
@@ -581,7 +587,7 @@ static int ext2_check_descriptors (struct super_block * sb)
581 i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); 587 i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
582 return 0; 588 return 0;
583 } 589 }
584 block += EXT2_BLOCKS_PER_GROUP(sb); 590 first_block += EXT2_BLOCKS_PER_GROUP(sb);
585 gdp++; 591 gdp++;
586 } 592 }
587 return 1; 593 return 1;
@@ -648,11 +654,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
648 int i, j; 654 int i, j;
649 __le32 features; 655 __le32 features;
650 656
651 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); 657 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
652 if (!sbi) 658 if (!sbi)
653 return -ENOMEM; 659 return -ENOMEM;
654 sb->s_fs_info = sbi; 660 sb->s_fs_info = sbi;
655 memset(sbi, 0, sizeof(*sbi));
656 661
657 /* 662 /*
658 * See what the current blocksize for the device is, and 663 * See what the current blocksize for the device is, and
@@ -861,10 +866,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
861 866
862 if (EXT2_BLOCKS_PER_GROUP(sb) == 0) 867 if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
863 goto cantfind_ext2; 868 goto cantfind_ext2;
864 sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) - 869 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
865 le32_to_cpu(es->s_first_data_block) + 870 le32_to_cpu(es->s_first_data_block) - 1)
866 EXT2_BLOCKS_PER_GROUP(sb) - 1) / 871 / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
867 EXT2_BLOCKS_PER_GROUP(sb);
868 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / 872 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
869 EXT2_DESC_PER_BLOCK(sb); 873 EXT2_DESC_PER_BLOCK(sb);
870 sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); 874 sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 86ae8e93adb9..af52a7f8b291 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -521,11 +521,10 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
521 } 521 }
522 } else { 522 } else {
523 /* Allocate a buffer where we construct the new block. */ 523 /* Allocate a buffer where we construct the new block. */
524 header = kmalloc(sb->s_blocksize, GFP_KERNEL); 524 header = kzalloc(sb->s_blocksize, GFP_KERNEL);
525 error = -ENOMEM; 525 error = -ENOMEM;
526 if (header == NULL) 526 if (header == NULL)
527 goto cleanup; 527 goto cleanup;
528 memset(header, 0, sb->s_blocksize);
529 end = (char *)header + sb->s_blocksize; 528 end = (char *)header + sb->s_blocksize;
530 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); 529 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
531 header->h_blocks = header->h_refcount = cpu_to_le32(1); 530 header->h_blocks = header->h_refcount = cpu_to_le32(1);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 0d21d558b87a..1e5038d9a01b 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -90,8 +90,8 @@ ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
90 size_t n; 90 size_t n;
91 91
92 *size = ext3_acl_size(acl->a_count); 92 *size = ext3_acl_size(acl->a_count);
93 ext_acl = (ext3_acl_header *)kmalloc(sizeof(ext3_acl_header) + 93 ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
94 acl->a_count * sizeof(ext3_acl_entry), GFP_KERNEL); 94 sizeof(ext3_acl_entry), GFP_KERNEL);
95 if (!ext_acl) 95 if (!ext_acl)
96 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
97 ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION); 97 ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
@@ -258,7 +258,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
258 default: 258 default:
259 return -EINVAL; 259 return -EINVAL;
260 } 260 }
261 if (acl) { 261 if (acl) {
262 value = ext3_acl_to_disk(acl, &size); 262 value = ext3_acl_to_disk(acl, &size);
263 if (IS_ERR(value)) 263 if (IS_ERR(value))
264 return (int)PTR_ERR(value); 264 return (int)PTR_ERR(value);
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 063d994bda0b..b41a7d7e20f0 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -38,6 +38,13 @@
38 38
39#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 39#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
40 40
41/**
42 * ext3_get_group_desc() -- load group descriptor from disk
43 * @sb: super block
44 * @block_group: given block group
45 * @bh: pointer to the buffer head to store the block
46 * group descriptor
47 */
41struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, 48struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
42 unsigned int block_group, 49 unsigned int block_group,
43 struct buffer_head ** bh) 50 struct buffer_head ** bh)
@@ -73,8 +80,12 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
73 return desc + offset; 80 return desc + offset;
74} 81}
75 82
76/* 83/**
77 * Read the bitmap for a given block_group, reading into the specified 84 * read_block_bitmap()
85 * @sb: super block
86 * @block_group: given block group
87 *
88 * Read the bitmap for a given block_group, reading into the specified
78 * slot in the superblock's bitmap cache. 89 * slot in the superblock's bitmap cache.
79 * 90 *
80 * Return buffer_head on success or NULL in case of failure. 91 * Return buffer_head on success or NULL in case of failure.
@@ -103,15 +114,22 @@ error_out:
103 * Operations include: 114 * Operations include:
104 * dump, find, add, remove, is_empty, find_next_reservable_window, etc. 115 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
105 * 116 *
106 * We use sorted double linked list for the per-filesystem reservation 117 * We use a red-black tree to represent per-filesystem reservation
107 * window list. (like in vm_region). 118 * windows.
119 *
120 */
121
122/**
123 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
124 * @rb_root: root of per-filesystem reservation rb tree
125 * @verbose: verbose mode
126 * @fn: function which wishes to dump the reservation map
108 * 127 *
109 * Initially, we keep those small operations in the abstract functions, 128 * If verbose is turned on, it will print the whole block reservation
110 * so later if we need a better searching tree than double linked-list, 129 * windows(start, end). Otherwise, it will only print out the "bad" windows,
111 * we could easily switch to that without changing too much 130 * those windows that overlap with their immediate neighbors.
112 * code.
113 */ 131 */
114#if 0 132#if 1
115static void __rsv_window_dump(struct rb_root *root, int verbose, 133static void __rsv_window_dump(struct rb_root *root, int verbose,
116 const char *fn) 134 const char *fn)
117{ 135{
@@ -129,7 +147,7 @@ restart:
129 rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); 147 rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
130 if (verbose) 148 if (verbose)
131 printk("reservation window 0x%p " 149 printk("reservation window 0x%p "
132 "start: %d, end: %d\n", 150 "start: %lu, end: %lu\n",
133 rsv, rsv->rsv_start, rsv->rsv_end); 151 rsv, rsv->rsv_start, rsv->rsv_end);
134 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { 152 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
135 printk("Bad reservation %p (start >= end)\n", 153 printk("Bad reservation %p (start >= end)\n",
@@ -161,6 +179,22 @@ restart:
161#define rsv_window_dump(root, verbose) do {} while (0) 179#define rsv_window_dump(root, verbose) do {} while (0)
162#endif 180#endif
163 181
182/**
183 * goal_in_my_reservation()
184 * @rsv: inode's reservation window
185 * @grp_goal: given goal block relative to the allocation block group
186 * @group: the current allocation block group
187 * @sb: filesystem super block
188 *
189 * Test if the given goal block (group relative) is within the file's
190 * own block reservation window range.
191 *
192 * If the reservation window is outside the goal allocation group, return 0;
193 * grp_goal (given goal block) could be -1, which means no specific
194 * goal block. In this case, always return 1.
195 * If the goal block is within the reservation window, return 1;
196 * otherwise, return 0;
197 */
164static int 198static int
165goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, 199goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
166 unsigned int group, struct super_block * sb) 200 unsigned int group, struct super_block * sb)
@@ -168,7 +202,7 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
168 ext3_fsblk_t group_first_block, group_last_block; 202 ext3_fsblk_t group_first_block, group_last_block;
169 203
170 group_first_block = ext3_group_first_block_no(sb, group); 204 group_first_block = ext3_group_first_block_no(sb, group);
171 group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; 205 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
172 206
173 if ((rsv->_rsv_start > group_last_block) || 207 if ((rsv->_rsv_start > group_last_block) ||
174 (rsv->_rsv_end < group_first_block)) 208 (rsv->_rsv_end < group_first_block))
@@ -179,7 +213,11 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
179 return 1; 213 return 1;
180} 214}
181 215
182/* 216/**
217 * search_reserve_window()
218 * @rb_root: root of reservation tree
219 * @goal: target allocation block
220 *
183 * Find the reserved window which includes the goal, or the previous one 221 * Find the reserved window which includes the goal, or the previous one
184 * if the goal is not in any window. 222 * if the goal is not in any window.
185 * Returns NULL if there are no windows or if all windows start after the goal. 223 * Returns NULL if there are no windows or if all windows start after the goal.
@@ -216,6 +254,13 @@ search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
216 return rsv; 254 return rsv;
217} 255}
218 256
257/**
258 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
259 * @sb: super block
260 * @rsv: reservation window to add
261 *
262 * Must be called with rsv_lock hold.
263 */
219void ext3_rsv_window_add(struct super_block *sb, 264void ext3_rsv_window_add(struct super_block *sb,
220 struct ext3_reserve_window_node *rsv) 265 struct ext3_reserve_window_node *rsv)
221{ 266{
@@ -236,14 +281,25 @@ void ext3_rsv_window_add(struct super_block *sb,
236 p = &(*p)->rb_left; 281 p = &(*p)->rb_left;
237 else if (start > this->rsv_end) 282 else if (start > this->rsv_end)
238 p = &(*p)->rb_right; 283 p = &(*p)->rb_right;
239 else 284 else {
285 rsv_window_dump(root, 1);
240 BUG(); 286 BUG();
287 }
241 } 288 }
242 289
243 rb_link_node(node, parent, p); 290 rb_link_node(node, parent, p);
244 rb_insert_color(node, root); 291 rb_insert_color(node, root);
245} 292}
246 293
294/**
295 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
296 * @sb: super block
297 * @rsv: reservation window to remove
298 *
299 * Mark the block reservation window as not allocated, and unlink it
300 * from the filesystem reservation window rb tree. Must be called with
301 * rsv_lock hold.
302 */
247static void rsv_window_remove(struct super_block *sb, 303static void rsv_window_remove(struct super_block *sb,
248 struct ext3_reserve_window_node *rsv) 304 struct ext3_reserve_window_node *rsv)
249{ 305{
@@ -253,11 +309,39 @@ static void rsv_window_remove(struct super_block *sb,
253 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); 309 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
254} 310}
255 311
312/*
313 * rsv_is_empty() -- Check if the reservation window is allocated.
314 * @rsv: given reservation window to check
315 *
316 * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
317 */
256static inline int rsv_is_empty(struct ext3_reserve_window *rsv) 318static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
257{ 319{
258 /* a valid reservation end block could not be 0 */ 320 /* a valid reservation end block could not be 0 */
259 return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); 321 return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
260} 322}
323
324/**
325 * ext3_init_block_alloc_info()
326 * @inode: file inode structure
327 *
328 * Allocate and initialize the reservation window structure, and
329 * link the window to the ext3 inode structure at last
330 *
331 * The reservation window structure is only dynamically allocated
332 * and linked to ext3 inode the first time the open file
333 * needs a new block. So, before every ext3_new_block(s) call, for
334 * regular files, we should check whether the reservation window
335 * structure exists or not. In the latter case, this function is called.
336 * Fail to do so will result in block reservation being turned off for that
337 * open file.
338 *
339 * This function is called from ext3_get_blocks_handle(), also called
340 * when setting the reservation window size through ioctl before the file
341 * is open for write (needs block allocation).
342 *
343 * Needs truncate_mutex protection prior to call this function.
344 */
261void ext3_init_block_alloc_info(struct inode *inode) 345void ext3_init_block_alloc_info(struct inode *inode)
262{ 346{
263 struct ext3_inode_info *ei = EXT3_I(inode); 347 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -271,7 +355,7 @@ void ext3_init_block_alloc_info(struct inode *inode)
271 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 355 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
272 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 356 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
273 357
274 /* 358 /*
275 * if filesystem is mounted with NORESERVATION, the goal 359 * if filesystem is mounted with NORESERVATION, the goal
276 * reservation window size is set to zero to indicate 360 * reservation window size is set to zero to indicate
277 * block reservation is off 361 * block reservation is off
@@ -287,6 +371,19 @@ void ext3_init_block_alloc_info(struct inode *inode)
287 ei->i_block_alloc_info = block_i; 371 ei->i_block_alloc_info = block_i;
288} 372}
289 373
374/**
375 * ext3_discard_reservation()
376 * @inode: inode
377 *
378 * Discard(free) block reservation window on last file close, or truncate
379 * or at last iput().
380 *
381 * It is being called in three cases:
382 * ext3_release_file(): last writer close the file
383 * ext3_clear_inode(): last iput(), when nobody link to this file.
384 * ext3_truncate(): when the block indirect map is about to change.
385 *
386 */
290void ext3_discard_reservation(struct inode *inode) 387void ext3_discard_reservation(struct inode *inode)
291{ 388{
292 struct ext3_inode_info *ei = EXT3_I(inode); 389 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -306,7 +403,14 @@ void ext3_discard_reservation(struct inode *inode)
306 } 403 }
307} 404}
308 405
309/* Free given blocks, update quota and i_blocks field */ 406/**
407 * ext3_free_blocks_sb() -- Free given blocks and update quota
408 * @handle: handle to this transaction
409 * @sb: super block
410 * @block: start physcial block to free
411 * @count: number of blocks to free
412 * @pdquot_freed_blocks: pointer to quota
413 */
310void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, 414void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
311 ext3_fsblk_t block, unsigned long count, 415 ext3_fsblk_t block, unsigned long count,
312 unsigned long *pdquot_freed_blocks) 416 unsigned long *pdquot_freed_blocks)
@@ -419,8 +523,8 @@ do_more:
419 } 523 }
420 /* @@@ This prevents newly-allocated data from being 524 /* @@@ This prevents newly-allocated data from being
421 * freed and then reallocated within the same 525 * freed and then reallocated within the same
422 * transaction. 526 * transaction.
423 * 527 *
424 * Ideally we would want to allow that to happen, but to 528 * Ideally we would want to allow that to happen, but to
425 * do so requires making journal_forget() capable of 529 * do so requires making journal_forget() capable of
426 * revoking the queued write of a data block, which 530 * revoking the queued write of a data block, which
@@ -433,7 +537,7 @@ do_more:
433 * safe not to set the allocation bit in the committed 537 * safe not to set the allocation bit in the committed
434 * bitmap, because we know that there is no outstanding 538 * bitmap, because we know that there is no outstanding
435 * activity on the buffer any more and so it is safe to 539 * activity on the buffer any more and so it is safe to
436 * reallocate it. 540 * reallocate it.
437 */ 541 */
438 BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); 542 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
439 J_ASSERT_BH(bitmap_bh, 543 J_ASSERT_BH(bitmap_bh,
@@ -490,7 +594,13 @@ error_return:
490 return; 594 return;
491} 595}
492 596
493/* Free given blocks, update quota and i_blocks field */ 597/**
598 * ext3_free_blocks() -- Free given blocks and update quota
599 * @handle: handle for this transaction
600 * @inode: inode
601 * @block: start physical block to free
602 * @count: number of blocks to count
603 */
494void ext3_free_blocks(handle_t *handle, struct inode *inode, 604void ext3_free_blocks(handle_t *handle, struct inode *inode,
495 ext3_fsblk_t block, unsigned long count) 605 ext3_fsblk_t block, unsigned long count)
496{ 606{
@@ -508,7 +618,11 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
508 return; 618 return;
509} 619}
510 620
511/* 621/**
622 * ext3_test_allocatable()
623 * @nr: given allocation block group
624 * @bh: bufferhead contains the bitmap of the given block group
625 *
512 * For ext3 allocations, we must not reuse any blocks which are 626 * For ext3 allocations, we must not reuse any blocks which are
513 * allocated in the bitmap buffer's "last committed data" copy. This 627 * allocated in the bitmap buffer's "last committed data" copy. This
514 * prevents deletes from freeing up the page for reuse until we have 628 * prevents deletes from freeing up the page for reuse until we have
@@ -518,7 +632,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
518 * data would allow the old block to be overwritten before the 632 * data would allow the old block to be overwritten before the
519 * transaction committed (because we force data to disk before commit). 633 * transaction committed (because we force data to disk before commit).
520 * This would lead to corruption if we crashed between overwriting the 634 * This would lead to corruption if we crashed between overwriting the
521 * data and committing the delete. 635 * data and committing the delete.
522 * 636 *
523 * @@@ We may want to make this allocation behaviour conditional on 637 * @@@ We may want to make this allocation behaviour conditional on
524 * data-writes at some point, and disable it for metadata allocations or 638 * data-writes at some point, and disable it for metadata allocations or
@@ -541,6 +655,16 @@ static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
541 return ret; 655 return ret;
542} 656}
543 657
658/**
659 * bitmap_search_next_usable_block()
660 * @start: the starting block (group relative) of the search
661 * @bh: bufferhead contains the block group bitmap
662 * @maxblocks: the ending block (group relative) of the reservation
663 *
664 * The bitmap search --- search forward alternately through the actual
665 * bitmap on disk and the last-committed copy in journal, until we find a
666 * bit free in both bitmaps.
667 */
544static ext3_grpblk_t 668static ext3_grpblk_t
545bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, 669bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
546 ext3_grpblk_t maxblocks) 670 ext3_grpblk_t maxblocks)
@@ -548,11 +672,6 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
548 ext3_grpblk_t next; 672 ext3_grpblk_t next;
549 struct journal_head *jh = bh2jh(bh); 673 struct journal_head *jh = bh2jh(bh);
550 674
551 /*
552 * The bitmap search --- search forward alternately through the actual
553 * bitmap and the last-committed copy until we find a bit free in
554 * both
555 */
556 while (start < maxblocks) { 675 while (start < maxblocks) {
557 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); 676 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
558 if (next >= maxblocks) 677 if (next >= maxblocks)
@@ -562,14 +681,20 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
562 jbd_lock_bh_state(bh); 681 jbd_lock_bh_state(bh);
563 if (jh->b_committed_data) 682 if (jh->b_committed_data)
564 start = ext3_find_next_zero_bit(jh->b_committed_data, 683 start = ext3_find_next_zero_bit(jh->b_committed_data,
565 maxblocks, next); 684 maxblocks, next);
566 jbd_unlock_bh_state(bh); 685 jbd_unlock_bh_state(bh);
567 } 686 }
568 return -1; 687 return -1;
569} 688}
570 689
571/* 690/**
572 * Find an allocatable block in a bitmap. We honour both the bitmap and 691 * find_next_usable_block()
692 * @start: the starting block (group relative) to find next
693 * allocatable block in bitmap.
694 * @bh: bufferhead contains the block group bitmap
695 * @maxblocks: the ending block (group relative) for the search
696 *
697 * Find an allocatable block in a bitmap. We honor both the bitmap and
573 * its last-committed copy (if that exists), and perform the "most 698 * its last-committed copy (if that exists), and perform the "most
574 * appropriate allocation" algorithm of looking for a free block near 699 * appropriate allocation" algorithm of looking for a free block near
575 * the initial goal; then for a free byte somewhere in the bitmap; then 700 * the initial goal; then for a free byte somewhere in the bitmap; then
@@ -584,7 +709,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
584 709
585 if (start > 0) { 710 if (start > 0) {
586 /* 711 /*
587 * The goal was occupied; search forward for a free 712 * The goal was occupied; search forward for a free
588 * block within the next XX blocks. 713 * block within the next XX blocks.
589 * 714 *
590 * end_goal is more or less random, but it has to be 715 * end_goal is more or less random, but it has to be
@@ -620,7 +745,11 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
620 return here; 745 return here;
621} 746}
622 747
623/* 748/**
749 * claim_block()
750 * @block: the free block (group relative) to allocate
751 * @bh: the bufferhead containts the block group bitmap
752 *
624 * We think we can allocate this block in this bitmap. Try to set the bit. 753 * We think we can allocate this block in this bitmap. Try to set the bit.
625 * If that succeeds then check that nobody has allocated and then freed the 754 * If that succeeds then check that nobody has allocated and then freed the
626 * block since we saw that is was not marked in b_committed_data. If it _was_ 755 * block since we saw that is was not marked in b_committed_data. If it _was_
@@ -646,7 +775,26 @@ claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
646 return ret; 775 return ret;
647} 776}
648 777
649/* 778/**
779 * ext3_try_to_allocate()
780 * @sb: superblock
781 * @handle: handle to this transaction
782 * @group: given allocation block group
783 * @bitmap_bh: bufferhead holds the block bitmap
784 * @grp_goal: given target block within the group
785 * @count: target number of blocks to allocate
786 * @my_rsv: reservation window
787 *
788 * Attempt to allocate blocks within a give range. Set the range of allocation
789 * first, then find the first free bit(s) from the bitmap (within the range),
790 * and at last, allocate the blocks by claiming the found free bit as allocated.
791 *
792 * To set the range of this allocation:
793 * if there is a reservation window, only try to allocate block(s) from the
794 * file's own reservation window;
795 * Otherwise, the allocation range starts from the give goal block, ends at
796 * the block group's last block.
797 *
650 * If we failed to allocate the desired block then we may end up crossing to a 798 * If we failed to allocate the desired block then we may end up crossing to a
651 * new bitmap. In that case we must release write access to the old one via 799 * new bitmap. In that case we must release write access to the old one via
652 * ext3_journal_release_buffer(), else we'll run out of credits. 800 * ext3_journal_release_buffer(), else we'll run out of credits.
@@ -703,7 +851,8 @@ repeat:
703 } 851 }
704 start = grp_goal; 852 start = grp_goal;
705 853
706 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { 854 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
855 grp_goal, bitmap_bh)) {
707 /* 856 /*
708 * The block was allocated by another thread, or it was 857 * The block was allocated by another thread, or it was
709 * allocated and then freed by another thread 858 * allocated and then freed by another thread
@@ -718,7 +867,8 @@ repeat:
718 grp_goal++; 867 grp_goal++;
719 while (num < *count && grp_goal < end 868 while (num < *count && grp_goal < end
720 && ext3_test_allocatable(grp_goal, bitmap_bh) 869 && ext3_test_allocatable(grp_goal, bitmap_bh)
721 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { 870 && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
871 grp_goal, bitmap_bh)) {
722 num++; 872 num++;
723 grp_goal++; 873 grp_goal++;
724 } 874 }
@@ -730,12 +880,12 @@ fail_access:
730} 880}
731 881
732/** 882/**
733 * find_next_reservable_window(): 883 * find_next_reservable_window():
734 * find a reservable space within the given range. 884 * find a reservable space within the given range.
735 * It does not allocate the reservation window for now: 885 * It does not allocate the reservation window for now:
736 * alloc_new_reservation() will do the work later. 886 * alloc_new_reservation() will do the work later.
737 * 887 *
738 * @search_head: the head of the searching list; 888 * @search_head: the head of the searching list;
739 * This is not necessarily the list head of the whole filesystem 889 * This is not necessarily the list head of the whole filesystem
740 * 890 *
741 * We have both head and start_block to assist the search 891 * We have both head and start_block to assist the search
@@ -743,12 +893,12 @@ fail_access:
743 * but we will shift to the place where start_block is, 893 * but we will shift to the place where start_block is,
744 * then start from there, when looking for a reservable space. 894 * then start from there, when looking for a reservable space.
745 * 895 *
746 * @size: the target new reservation window size 896 * @size: the target new reservation window size
747 * 897 *
748 * @group_first_block: the first block we consider to start 898 * @group_first_block: the first block we consider to start
749 * the real search from 899 * the real search from
750 * 900 *
751 * @last_block: 901 * @last_block:
752 * the maximum block number that our goal reservable space 902 * the maximum block number that our goal reservable space
753 * could start from. This is normally the last block in this 903 * could start from. This is normally the last block in this
754 * group. The search will end when we found the start of next 904 * group. The search will end when we found the start of next
@@ -756,10 +906,10 @@ fail_access:
756 * This could handle the cross boundary reservation window 906 * This could handle the cross boundary reservation window
757 * request. 907 * request.
758 * 908 *
759 * basically we search from the given range, rather than the whole 909 * basically we search from the given range, rather than the whole
760 * reservation double linked list, (start_block, last_block) 910 * reservation double linked list, (start_block, last_block)
761 * to find a free region that is of my size and has not 911 * to find a free region that is of my size and has not
762 * been reserved. 912 * been reserved.
763 * 913 *
764 */ 914 */
765static int find_next_reservable_window( 915static int find_next_reservable_window(
@@ -812,7 +962,7 @@ static int find_next_reservable_window(
812 /* 962 /*
813 * Found a reserveable space big enough. We could 963 * Found a reserveable space big enough. We could
814 * have a reservation across the group boundary here 964 * have a reservation across the group boundary here
815 */ 965 */
816 break; 966 break;
817 } 967 }
818 } 968 }
@@ -848,7 +998,7 @@ static int find_next_reservable_window(
848} 998}
849 999
850/** 1000/**
851 * alloc_new_reservation()--allocate a new reservation window 1001 * alloc_new_reservation()--allocate a new reservation window
852 * 1002 *
853 * To make a new reservation, we search part of the filesystem 1003 * To make a new reservation, we search part of the filesystem
854 * reservation list (the list that inside the group). We try to 1004 * reservation list (the list that inside the group). We try to
@@ -897,7 +1047,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
897 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; 1047 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
898 1048
899 group_first_block = ext3_group_first_block_no(sb, group); 1049 group_first_block = ext3_group_first_block_no(sb, group);
900 group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; 1050 group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
901 1051
902 if (grp_goal < 0) 1052 if (grp_goal < 0)
903 start_block = group_first_block; 1053 start_block = group_first_block;
@@ -929,9 +1079,10 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
929 if ((my_rsv->rsv_alloc_hit > 1079 if ((my_rsv->rsv_alloc_hit >
930 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { 1080 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
931 /* 1081 /*
932 * if we previously allocation hit ration is greater than half 1082 * if the previously allocation hit ratio is
933 * we double the size of reservation window next time 1083 * greater than 1/2, then we double the size of
934 * otherwise keep the same 1084 * the reservation window the next time,
1085 * otherwise we keep the same size window
935 */ 1086 */
936 size = size * 2; 1087 size = size * 2;
937 if (size > EXT3_MAX_RESERVE_BLOCKS) 1088 if (size > EXT3_MAX_RESERVE_BLOCKS)
@@ -1010,6 +1161,23 @@ retry:
1010 goto retry; 1161 goto retry;
1011} 1162}
1012 1163
1164/**
1165 * try_to_extend_reservation()
1166 * @my_rsv: given reservation window
1167 * @sb: super block
1168 * @size: the delta to extend
1169 *
1170 * Attempt to expand the reservation window large enough to have
1171 * required number of free blocks
1172 *
1173 * Since ext3_try_to_allocate() will always allocate blocks within
1174 * the reservation window range, if the window size is too small,
1175 * multiple blocks allocation has to stop at the end of the reservation
1176 * window. To make this more efficient, given the total number of
1177 * blocks needed and the current size of the window, we try to
1178 * expand the reservation window size if necessary on a best-effort
1179 * basis before ext3_new_blocks() tries to allocate blocks,
1180 */
1013static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, 1181static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1014 struct super_block *sb, int size) 1182 struct super_block *sb, int size)
1015{ 1183{
@@ -1035,7 +1203,17 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1035 spin_unlock(rsv_lock); 1203 spin_unlock(rsv_lock);
1036} 1204}
1037 1205
1038/* 1206/**
1207 * ext3_try_to_allocate_with_rsv()
1208 * @sb: superblock
1209 * @handle: handle to this transaction
1210 * @group: given allocation block group
1211 * @bitmap_bh: bufferhead holds the block bitmap
1212 * @grp_goal: given target block within the group
1213 * @count: target number of blocks to allocate
1214 * @my_rsv: reservation window
1215 * @errp: pointer to store the error code
1216 *
1039 * This is the main function used to allocate a new block and its reservation 1217 * This is the main function used to allocate a new block and its reservation
1040 * window. 1218 * window.
1041 * 1219 *
@@ -1051,9 +1229,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1051 * reservation), and there are lots of free blocks, but they are all 1229 * reservation), and there are lots of free blocks, but they are all
1052 * being reserved. 1230 * being reserved.
1053 * 1231 *
1054 * We use a sorted double linked list for the per-filesystem reservation list. 1232 * We use a red-black tree for the per-filesystem reservation list.
1055 * The insert, remove and find a free space(non-reserved) operations for the
1056 * sorted double linked list should be fast.
1057 * 1233 *
1058 */ 1234 */
1059static ext3_grpblk_t 1235static ext3_grpblk_t
@@ -1063,7 +1239,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1063 struct ext3_reserve_window_node * my_rsv, 1239 struct ext3_reserve_window_node * my_rsv,
1064 unsigned long *count, int *errp) 1240 unsigned long *count, int *errp)
1065{ 1241{
1066 ext3_fsblk_t group_first_block; 1242 ext3_fsblk_t group_first_block, group_last_block;
1067 ext3_grpblk_t ret = 0; 1243 ext3_grpblk_t ret = 0;
1068 int fatal; 1244 int fatal;
1069 unsigned long num = *count; 1245 unsigned long num = *count;
@@ -1100,6 +1276,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1100 * first block is the block number of the first block in this group 1276 * first block is the block number of the first block in this group
1101 */ 1277 */
1102 group_first_block = ext3_group_first_block_no(sb, group); 1278 group_first_block = ext3_group_first_block_no(sb, group);
1279 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1103 1280
1104 /* 1281 /*
1105 * Basically we will allocate a new block from inode's reservation 1282 * Basically we will allocate a new block from inode's reservation
@@ -1118,7 +1295,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1118 */ 1295 */
1119 while (1) { 1296 while (1) {
1120 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1297 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1121 !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) { 1298 !goal_in_my_reservation(&my_rsv->rsv_window,
1299 grp_goal, group, sb)) {
1122 if (my_rsv->rsv_goal_size < *count) 1300 if (my_rsv->rsv_goal_size < *count)
1123 my_rsv->rsv_goal_size = *count; 1301 my_rsv->rsv_goal_size = *count;
1124 ret = alloc_new_reservation(my_rsv, grp_goal, sb, 1302 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
@@ -1126,17 +1304,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1126 if (ret < 0) 1304 if (ret < 0)
1127 break; /* failed */ 1305 break; /* failed */
1128 1306
1129 if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) 1307 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1308 grp_goal, group, sb))
1130 grp_goal = -1; 1309 grp_goal = -1;
1131 } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count) 1310 } else if (grp_goal > 0 &&
1311 (my_rsv->rsv_end-grp_goal+1) < *count)
1132 try_to_extend_reservation(my_rsv, sb, 1312 try_to_extend_reservation(my_rsv, sb,
1133 *count-my_rsv->rsv_end + grp_goal - 1); 1313 *count-my_rsv->rsv_end + grp_goal - 1);
1134 1314
1135 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) 1315 if ((my_rsv->rsv_start > group_last_block) ||
1136 || (my_rsv->rsv_end < group_first_block)) 1316 (my_rsv->rsv_end < group_first_block)) {
1317 rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
1137 BUG(); 1318 BUG();
1138 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal, 1319 }
1139 &num, &my_rsv->rsv_window); 1320 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1321 grp_goal, &num, &my_rsv->rsv_window);
1140 if (ret >= 0) { 1322 if (ret >= 0) {
1141 my_rsv->rsv_alloc_hit += num; 1323 my_rsv->rsv_alloc_hit += num;
1142 *count = num; 1324 *count = num;
@@ -1161,6 +1343,12 @@ out:
1161 return ret; 1343 return ret;
1162} 1344}
1163 1345
1346/**
1347 * ext3_has_free_blocks()
1348 * @sbi: in-core super block structure.
1349 *
1350 * Check if filesystem has at least 1 free block available for allocation.
1351 */
1164static int ext3_has_free_blocks(struct ext3_sb_info *sbi) 1352static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1165{ 1353{
1166 ext3_fsblk_t free_blocks, root_blocks; 1354 ext3_fsblk_t free_blocks, root_blocks;
@@ -1175,11 +1363,17 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1175 return 1; 1363 return 1;
1176} 1364}
1177 1365
1178/* 1366/**
1367 * ext3_should_retry_alloc()
1368 * @sb: super block
1369 * @retries number of attemps has been made
1370 *
1179 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if 1371 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1180 * it is profitable to retry the operation, this function will wait 1372 * it is profitable to retry the operation, this function will wait
1181 * for the current or commiting transaction to complete, and then 1373 * for the current or commiting transaction to complete, and then
1182 * return TRUE. 1374 * return TRUE.
1375 *
1376 * if the total number of retries exceed three times, return FALSE.
1183 */ 1377 */
1184int ext3_should_retry_alloc(struct super_block *sb, int *retries) 1378int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1185{ 1379{
@@ -1191,13 +1385,19 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1191 return journal_force_commit_nested(EXT3_SB(sb)->s_journal); 1385 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1192} 1386}
1193 1387
1194/* 1388/**
1195 * ext3_new_block uses a goal block to assist allocation. If the goal is 1389 * ext3_new_blocks() -- core block(s) allocation function
1196 * free, or there is a free block within 32 blocks of the goal, that block 1390 * @handle: handle to this transaction
1197 * is allocated. Otherwise a forward search is made for a free block; within 1391 * @inode: file inode
1198 * each block group the search first looks for an entire free byte in the block 1392 * @goal: given target block(filesystem wide)
1199 * bitmap, and then for any free bit if that fails. 1393 * @count: target number of blocks to allocate
1200 * This function also updates quota and i_blocks field. 1394 * @errp: error code
1395 *
1396 * ext3_new_blocks uses a goal block to assist allocation. It tries to
1397 * allocate block(s) from the block group contains the goal block first. If that
1398 * fails, it will try to allocate block(s) from other block groups without
1399 * any specific goal block.
1400 *
1201 */ 1401 */
1202ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, 1402ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1203 ext3_fsblk_t goal, unsigned long *count, int *errp) 1403 ext3_fsblk_t goal, unsigned long *count, int *errp)
@@ -1303,7 +1503,7 @@ retry_alloc:
1303 smp_rmb(); 1503 smp_rmb();
1304 1504
1305 /* 1505 /*
1306 * Now search the rest of the groups. We assume that 1506 * Now search the rest of the groups. We assume that
1307 * i and gdp correctly point to the last group visited. 1507 * i and gdp correctly point to the last group visited.
1308 */ 1508 */
1309 for (bgi = 0; bgi < ngroups; bgi++) { 1509 for (bgi = 0; bgi < ngroups; bgi++) {
@@ -1428,7 +1628,7 @@ allocated:
1428 1628
1429 spin_lock(sb_bgl_lock(sbi, group_no)); 1629 spin_lock(sb_bgl_lock(sbi, group_no));
1430 gdp->bg_free_blocks_count = 1630 gdp->bg_free_blocks_count =
1431 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); 1631 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1432 spin_unlock(sb_bgl_lock(sbi, group_no)); 1632 spin_unlock(sb_bgl_lock(sbi, group_no));
1433 percpu_counter_mod(&sbi->s_freeblocks_counter, -num); 1633 percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
1434 1634
@@ -1471,6 +1671,12 @@ ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1471 return ext3_new_blocks(handle, inode, goal, &count, errp); 1671 return ext3_new_blocks(handle, inode, goal, &count, errp);
1472} 1672}
1473 1673
1674/**
1675 * ext3_count_free_blocks() -- count filesystem free blocks
1676 * @sb: superblock
1677 *
1678 * Adds up the number of free blocks from each block group.
1679 */
1474ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) 1680ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1475{ 1681{
1476 ext3_fsblk_t desc_count; 1682 ext3_fsblk_t desc_count;
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index ce4f82b9e528..b9176eed98d1 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
20 unsigned int i; 20 unsigned int i;
21 unsigned long sum = 0; 21 unsigned long sum = 0;
22 22
23 if (!map) 23 if (!map)
24 return (0); 24 return (0);
25 for (i = 0; i < numchars; i++) 25 for (i = 0; i < numchars; i++)
26 sum += nibblemap[map->b_data[i] & 0xf] + 26 sum += nibblemap[map->b_data[i] & 0xf] +
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index fbb0d4ed07d4..429acbb4e064 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
59 59
60 return (ext3_filetype_table[filetype]); 60 return (ext3_filetype_table[filetype]);
61} 61}
62 62
63 63
64int ext3_check_dir_entry (const char * function, struct inode * dir, 64int ext3_check_dir_entry (const char * function, struct inode * dir,
65 struct ext3_dir_entry_2 * de, 65 struct ext3_dir_entry_2 * de,
@@ -67,7 +67,7 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
67 unsigned long offset) 67 unsigned long offset)
68{ 68{
69 const char * error_msg = NULL; 69 const char * error_msg = NULL;
70 const int rlen = le16_to_cpu(de->rec_len); 70 const int rlen = le16_to_cpu(de->rec_len);
71 71
72 if (rlen < EXT3_DIR_REC_LEN(1)) 72 if (rlen < EXT3_DIR_REC_LEN(1))
73 error_msg = "rec_len is smaller than minimal"; 73 error_msg = "rec_len is smaller than minimal";
@@ -162,7 +162,7 @@ revalidate:
162 * to make sure. */ 162 * to make sure. */
163 if (filp->f_version != inode->i_version) { 163 if (filp->f_version != inode->i_version) {
164 for (i = 0; i < sb->s_blocksize && i < offset; ) { 164 for (i = 0; i < sb->s_blocksize && i < offset; ) {
165 de = (struct ext3_dir_entry_2 *) 165 de = (struct ext3_dir_entry_2 *)
166 (bh->b_data + i); 166 (bh->b_data + i);
167 /* It's too expensive to do a full 167 /* It's too expensive to do a full
168 * dirent test each time round this 168 * dirent test each time round this
@@ -181,7 +181,7 @@ revalidate:
181 filp->f_version = inode->i_version; 181 filp->f_version = inode->i_version;
182 } 182 }
183 183
184 while (!error && filp->f_pos < inode->i_size 184 while (!error && filp->f_pos < inode->i_size
185 && offset < sb->s_blocksize) { 185 && offset < sb->s_blocksize) {
186 de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); 186 de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
187 if (!ext3_check_dir_entry ("ext3_readdir", inode, de, 187 if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
@@ -229,7 +229,7 @@ out:
229/* 229/*
230 * These functions convert from the major/minor hash to an f_pos 230 * These functions convert from the major/minor hash to an f_pos
231 * value. 231 * value.
232 * 232 *
233 * Currently we only use major hash numer. This is unfortunate, but 233 * Currently we only use major hash numer. This is unfortunate, but
234 * on 32-bit machines, the same VFS interface is used for lseek and 234 * on 32-bit machines, the same VFS interface is used for lseek and
235 * llseek, so if we use the 64 bit offset, then the 32-bit versions of 235 * llseek, so if we use the 64 bit offset, then the 32-bit versions of
@@ -250,7 +250,7 @@ out:
250struct fname { 250struct fname {
251 __u32 hash; 251 __u32 hash;
252 __u32 minor_hash; 252 __u32 minor_hash;
253 struct rb_node rb_hash; 253 struct rb_node rb_hash;
254 struct fname *next; 254 struct fname *next;
255 __u32 inode; 255 __u32 inode;
256 __u8 name_len; 256 __u8 name_len;
@@ -343,10 +343,9 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
343 343
344 /* Create and allocate the fname structure */ 344 /* Create and allocate the fname structure */
345 len = sizeof(struct fname) + dirent->name_len + 1; 345 len = sizeof(struct fname) + dirent->name_len + 1;
346 new_fn = kmalloc(len, GFP_KERNEL); 346 new_fn = kzalloc(len, GFP_KERNEL);
347 if (!new_fn) 347 if (!new_fn)
348 return -ENOMEM; 348 return -ENOMEM;
349 memset(new_fn, 0, len);
350 new_fn->hash = hash; 349 new_fn->hash = hash;
351 new_fn->minor_hash = minor_hash; 350 new_fn->minor_hash = minor_hash;
352 new_fn->inode = le32_to_cpu(dirent->inode); 351 new_fn->inode = le32_to_cpu(dirent->inode);
@@ -410,7 +409,7 @@ static int call_filldir(struct file * filp, void * dirent,
410 curr_pos = hash2pos(fname->hash, fname->minor_hash); 409 curr_pos = hash2pos(fname->hash, fname->minor_hash);
411 while (fname) { 410 while (fname) {
412 error = filldir(dirent, fname->name, 411 error = filldir(dirent, fname->name,
413 fname->name_len, curr_pos, 412 fname->name_len, curr_pos,
414 fname->inode, 413 fname->inode,
415 get_dtype(sb, fname->file_type)); 414 get_dtype(sb, fname->file_type));
416 if (error) { 415 if (error) {
@@ -465,7 +464,7 @@ static int ext3_dx_readdir(struct file * filp,
465 /* 464 /*
466 * Fill the rbtree if we have no more entries, 465 * Fill the rbtree if we have no more entries,
467 * or the inode has changed since we last read in the 466 * or the inode has changed since we last read in the
468 * cached entries. 467 * cached entries.
469 */ 468 */
470 if ((!info->curr_node) || 469 if ((!info->curr_node) ||
471 (filp->f_version != inode->i_version)) { 470 (filp->f_version != inode->i_version)) {
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 1efefb630ea9..994efd189f4e 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
100 100
101force_commit: 101force_commit:
102 err = ext3_force_commit(inode->i_sb); 102 err = ext3_force_commit(inode->i_sb);
103 if (err) 103 if (err)
104 return err; 104 return err;
105 return ret; 105 return ret;
106} 106}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 49382a208e05..dd1fd3c0fc05 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -8,14 +8,14 @@
8 * Universite Pierre et Marie Curie (Paris VI) 8 * Universite Pierre et Marie Curie (Paris VI)
9 * from 9 * from
10 * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds 10 * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
11 * 11 *
12 * ext3fs fsync primitive 12 * ext3fs fsync primitive
13 * 13 *
14 * Big-endian to little-endian byte-swapping/bitmaps by 14 * Big-endian to little-endian byte-swapping/bitmaps by
15 * David S. Miller (davem@caip.rutgers.edu), 1995 15 * David S. Miller (davem@caip.rutgers.edu), 1995
16 * 16 *
17 * Removed unnecessary code duplication for little endian machines 17 * Removed unnecessary code duplication for little endian machines
18 * and excessive __inline__s. 18 * and excessive __inline__s.
19 * Andi Kleen, 1997 19 * Andi Kleen, 1997
20 * 20 *
21 * Major simplications and cleanup - we only need to do the metadata, because 21 * Major simplications and cleanup - we only need to do the metadata, because
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 5a2d1235ead0..deeb27b5ba83 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2002 by Theodore Ts'o 4 * Copyright (C) 2002 by Theodore Ts'o
5 * 5 *
6 * This file is released under the GPL v2. 6 * This file is released under the GPL v2.
7 * 7 *
8 * This file may be redistributed under the terms of the GNU Public 8 * This file may be redistributed under the terms of the GNU Public
9 * License. 9 * License.
10 */ 10 */
@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
80 * Returns the hash of a filename. If len is 0 and name is NULL, then 80 * Returns the hash of a filename. If len is 0 and name is NULL, then
81 * this function can be used to test whether or not a hash version is 81 * this function can be used to test whether or not a hash version is
82 * supported. 82 * supported.
83 * 83 *
84 * The seed is an 4 longword (32 bits) "secret" which can be used to 84 * The seed is an 4 longword (32 bits) "secret" which can be used to
85 * uniquify a hash. If the seed is all zero's, then some default seed 85 * uniquify a hash. If the seed is all zero's, then some default seed
86 * may be used. 86 * may be used.
87 * 87 *
88 * A particular hash version specifies whether or not the seed is 88 * A particular hash version specifies whether or not the seed is
89 * represented, and whether or not the returned hash is 32 bits or 64 89 * represented, and whether or not the returned hash is 32 bits or 64
90 * bits. 32 bit hashes will return 0 for the minor hash. 90 * bits. 32 bit hashes will return 0 for the minor hash.
@@ -95,7 +95,7 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
95 __u32 minor_hash = 0; 95 __u32 minor_hash = 0;
96 const char *p; 96 const char *p;
97 int i; 97 int i;
98 __u32 in[8], buf[4]; 98 __u32 in[8], buf[4];
99 99
100 /* Initialize the default seed for the hash checksum functions */ 100 /* Initialize the default seed for the hash checksum functions */
101 buf[0] = 0x67452301; 101 buf[0] = 0x67452301;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 36546ed36a14..e45dbd651736 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -202,7 +202,7 @@ error_return:
202static int find_group_dir(struct super_block *sb, struct inode *parent) 202static int find_group_dir(struct super_block *sb, struct inode *parent)
203{ 203{
204 int ngroups = EXT3_SB(sb)->s_groups_count; 204 int ngroups = EXT3_SB(sb)->s_groups_count;
205 int freei, avefreei; 205 unsigned int freei, avefreei;
206 struct ext3_group_desc *desc, *best_desc = NULL; 206 struct ext3_group_desc *desc, *best_desc = NULL;
207 struct buffer_head *bh; 207 struct buffer_head *bh;
208 int group, best_group = -1; 208 int group, best_group = -1;
@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
216 continue; 216 continue;
217 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 217 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
218 continue; 218 continue;
219 if (!best_desc || 219 if (!best_desc ||
220 (le16_to_cpu(desc->bg_free_blocks_count) > 220 (le16_to_cpu(desc->bg_free_blocks_count) >
221 le16_to_cpu(best_desc->bg_free_blocks_count))) { 221 le16_to_cpu(best_desc->bg_free_blocks_count))) {
222 best_group = group; 222 best_group = group;
@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
226 return best_group; 226 return best_group;
227} 227}
228 228
229/* 229/*
230 * Orlov's allocator for directories. 230 * Orlov's allocator for directories.
231 * 231 *
232 * We always try to spread first-level directories. 232 * We always try to spread first-level directories.
233 * 233 *
234 * If there are blockgroups with both free inodes and free blocks counts 234 * If there are blockgroups with both free inodes and free blocks counts
235 * not worse than average we return one with smallest directory count. 235 * not worse than average we return one with smallest directory count.
236 * Otherwise we simply return a random group. 236 * Otherwise we simply return a random group.
237 * 237 *
238 * For the rest rules look so: 238 * For the rest rules look so:
239 * 239 *
240 * It's OK to put directory into a group unless 240 * It's OK to put directory into a group unless
241 * it has too many directories already (max_dirs) or 241 * it has too many directories already (max_dirs) or
242 * it has too few free inodes left (min_inodes) or 242 * it has too few free inodes left (min_inodes) or
243 * it has too few free blocks left (min_blocks) or 243 * it has too few free blocks left (min_blocks) or
244 * it's already running too large debt (max_debt). 244 * it's already running too large debt (max_debt).
245 * Parent's group is prefered, if it doesn't satisfy these 245 * Parent's group is prefered, if it doesn't satisfy these
246 * conditions we search cyclically through the rest. If none 246 * conditions we search cyclically through the rest. If none
247 * of the groups look good we just look for a group with more 247 * of the groups look good we just look for a group with more
248 * free inodes than average (starting at parent's group). 248 * free inodes than average (starting at parent's group).
249 * 249 *
250 * Debt is incremented each time we allocate a directory and decremented 250 * Debt is incremented each time we allocate a directory and decremented
251 * when we allocate an inode, within 0--255. 251 * when we allocate an inode, within 0--255.
252 */ 252 */
253 253
254#define INODE_COST 64 254#define INODE_COST 64
255#define BLOCK_COST 256 255#define BLOCK_COST 256
@@ -261,10 +261,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
261 struct ext3_super_block *es = sbi->s_es; 261 struct ext3_super_block *es = sbi->s_es;
262 int ngroups = sbi->s_groups_count; 262 int ngroups = sbi->s_groups_count;
263 int inodes_per_group = EXT3_INODES_PER_GROUP(sb); 263 int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
264 int freei, avefreei; 264 unsigned int freei, avefreei;
265 ext3_fsblk_t freeb, avefreeb; 265 ext3_fsblk_t freeb, avefreeb;
266 ext3_fsblk_t blocks_per_dir; 266 ext3_fsblk_t blocks_per_dir;
267 int ndirs; 267 unsigned int ndirs;
268 int max_debt, max_dirs, min_inodes; 268 int max_debt, max_dirs, min_inodes;
269 ext3_grpblk_t min_blocks; 269 ext3_grpblk_t min_blocks;
270 int group = -1, i; 270 int group = -1, i;
@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
454 group = find_group_dir(sb, dir); 454 group = find_group_dir(sb, dir);
455 else 455 else
456 group = find_group_orlov(sb, dir); 456 group = find_group_orlov(sb, dir);
457 } else 457 } else
458 group = find_group_other(sb, dir); 458 group = find_group_other(sb, dir);
459 459
460 err = -ENOSPC; 460 err = -ENOSPC;
@@ -559,7 +559,6 @@ got:
559 559
560 inode->i_ino = ino; 560 inode->i_ino = ino;
561 /* This is the optimal IO size (for stat), not the fs block size */ 561 /* This is the optimal IO size (for stat), not the fs block size */
562 inode->i_blksize = PAGE_SIZE;
563 inode->i_blocks = 0; 562 inode->i_blocks = 0;
564 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 563 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
565 564
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 84be02e93652..dcf4f1dd108b 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -13,11 +13,11 @@
13 * Copyright (C) 1991, 1992 Linus Torvalds 13 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 14 *
15 * Goal-directed block allocation by Stephen Tweedie 15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998 16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by 17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995 18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz) 20 * (jj@sunsite.ms.mff.cuni.cz)
21 * 21 *
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23 */ 23 */
@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
55/* 55/*
56 * The ext3 forget function must perform a revoke if we are freeing data 56 * The ext3 forget function must perform a revoke if we are freeing data
57 * which has been journaled. Metadata (eg. indirect blocks) must be 57 * which has been journaled. Metadata (eg. indirect blocks) must be
58 * revoked in all cases. 58 * revoked in all cases.
59 * 59 *
60 * "bh" may be NULL: a metadata block may have been freed from memory 60 * "bh" may be NULL: a metadata block may have been freed from memory
61 * but there may still be a record of it in the journal, and that record 61 * but there may still be a record of it in the journal, and that record
@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
105 * Work out how many blocks we need to proceed with the next chunk of a 105 * Work out how many blocks we need to proceed with the next chunk of a
106 * truncate transaction. 106 * truncate transaction.
107 */ 107 */
108static unsigned long blocks_for_truncate(struct inode *inode) 108static unsigned long blocks_for_truncate(struct inode *inode)
109{ 109{
110 unsigned long needed; 110 unsigned long needed;
111 111
@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
122 122
123 /* But we need to bound the transaction so we don't overflow the 123 /* But we need to bound the transaction so we don't overflow the
124 * journal. */ 124 * journal. */
125 if (needed > EXT3_MAX_TRANS_DATA) 125 if (needed > EXT3_MAX_TRANS_DATA)
126 needed = EXT3_MAX_TRANS_DATA; 126 needed = EXT3_MAX_TRANS_DATA;
127 127
128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
129} 129}
130 130
131/* 131/*
132 * Truncate transactions can be complex and absolutely huge. So we need to 132 * Truncate transactions can be complex and absolutely huge. So we need to
133 * be able to restart the transaction at a conventient checkpoint to make 133 * be able to restart the transaction at a conventient checkpoint to make
134 * sure we don't overflow the journal. 134 * sure we don't overflow the journal.
@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
136 * start_transaction gets us a new handle for a truncate transaction, 136 * start_transaction gets us a new handle for a truncate transaction,
137 * and extend_transaction tries to extend the existing one a bit. If 137 * and extend_transaction tries to extend the existing one a bit. If
138 * extend fails, we need to propagate the failure up and restart the 138 * extend fails, we need to propagate the failure up and restart the
139 * transaction in the top-level truncate loop. --sct 139 * transaction in the top-level truncate loop. --sct
140 */ 140 */
141static handle_t *start_transaction(struct inode *inode) 141static handle_t *start_transaction(struct inode *inode)
142{ 142{
143 handle_t *result; 143 handle_t *result;
144 144
@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode)
215 ext3_orphan_del(handle, inode); 215 ext3_orphan_del(handle, inode);
216 EXT3_I(inode)->i_dtime = get_seconds(); 216 EXT3_I(inode)->i_dtime = get_seconds();
217 217
218 /* 218 /*
219 * One subtle ordering requirement: if anything has gone wrong 219 * One subtle ordering requirement: if anything has gone wrong
220 * (transaction abort, IO errors, whatever), then we can still 220 * (transaction abort, IO errors, whatever), then we can still
221 * do these next steps (the fs will already have been marked as 221 * do these next steps (the fs will already have been marked as
222 * having errors), but we can't free the inode if the mark_dirty 222 * having errors), but we can't free the inode if the mark_dirty
223 * fails. 223 * fails.
224 */ 224 */
225 if (ext3_mark_inode_dirty(handle, inode)) 225 if (ext3_mark_inode_dirty(handle, inode))
226 /* If that failed, just do the required in-core inode clear. */ 226 /* If that failed, just do the required in-core inode clear. */
@@ -398,7 +398,7 @@ no_block:
398 * + if there is a block to the left of our position - allocate near it. 398 * + if there is a block to the left of our position - allocate near it.
399 * + if pointer will live in indirect block - allocate near that block. 399 * + if pointer will live in indirect block - allocate near that block.
400 * + if pointer will live in inode - allocate in the same 400 * + if pointer will live in inode - allocate in the same
401 * cylinder group. 401 * cylinder group.
402 * 402 *
403 * In the latter case we colour the starting block by the callers PID to 403 * In the latter case we colour the starting block by the callers PID to
404 * prevent it from clashing with concurrent allocations for a different inode 404 * prevent it from clashing with concurrent allocations for a different inode
@@ -470,7 +470,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
470 * ext3_blks_to_allocate: Look up the block map and count the number 470 * ext3_blks_to_allocate: Look up the block map and count the number
471 * of direct blocks need to be allocated for the given branch. 471 * of direct blocks need to be allocated for the given branch.
472 * 472 *
473 * @branch: chain of indirect blocks 473 * @branch: chain of indirect blocks
474 * @k: number of blocks need for indirect blocks 474 * @k: number of blocks need for indirect blocks
475 * @blks: number of data blocks to be mapped. 475 * @blks: number of data blocks to be mapped.
476 * @blocks_to_boundary: the offset in the indirect block 476 * @blocks_to_boundary: the offset in the indirect block
@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
744 jbd_debug(5, "splicing indirect only\n"); 744 jbd_debug(5, "splicing indirect only\n");
745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
746 err = ext3_journal_dirty_metadata(handle, where->bh); 746 err = ext3_journal_dirty_metadata(handle, where->bh);
747 if (err) 747 if (err)
748 goto err_out; 748 goto err_out;
749 } else { 749 } else {
750 /* 750 /*
@@ -1098,7 +1098,7 @@ static int walk_page_buffers( handle_t *handle,
1098 1098
1099 for ( bh = head, block_start = 0; 1099 for ( bh = head, block_start = 0;
1100 ret == 0 && (bh != head || !block_start); 1100 ret == 0 && (bh != head || !block_start);
1101 block_start = block_end, bh = next) 1101 block_start = block_end, bh = next)
1102 { 1102 {
1103 next = bh->b_this_page; 1103 next = bh->b_this_page;
1104 block_end = block_start + blocksize; 1104 block_end = block_start + blocksize;
@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle,
1137 * So what we do is to rely on the fact that journal_stop/journal_start 1137 * So what we do is to rely on the fact that journal_stop/journal_start
1138 * will _not_ run commit under these circumstances because handle->h_ref 1138 * will _not_ run commit under these circumstances because handle->h_ref
1139 * is elevated. We'll still have enough credits for the tiny quotafile 1139 * is elevated. We'll still have enough credits for the tiny quotafile
1140 * write. 1140 * write.
1141 */ 1141 */
1142static int do_journal_get_write_access(handle_t *handle, 1142static int do_journal_get_write_access(handle_t *handle,
1143 struct buffer_head *bh) 1143 struct buffer_head *bh)
@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file,
1282 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1282 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1283 EXT3_I(inode)->i_disksize = inode->i_size; 1283 EXT3_I(inode)->i_disksize = inode->i_size;
1284 ret2 = ext3_mark_inode_dirty(handle, inode); 1284 ret2 = ext3_mark_inode_dirty(handle, inode);
1285 if (!ret) 1285 if (!ret)
1286 ret = ret2; 1286 ret = ret2;
1287 } 1287 }
1288 ret2 = ext3_journal_stop(handle); 1288 ret2 = ext3_journal_stop(handle);
@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file,
1291 return ret; 1291 return ret;
1292} 1292}
1293 1293
1294/* 1294/*
1295 * bmap() is special. It gets used by applications such as lilo and by 1295 * bmap() is special. It gets used by applications such as lilo and by
1296 * the swapper to find the on-disk block of a specific piece of data. 1296 * the swapper to find the on-disk block of a specific piece of data.
1297 * 1297 *
@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file,
1300 * filesystem and enables swap, then they may get a nasty shock when the 1300 * filesystem and enables swap, then they may get a nasty shock when the
1301 * data getting swapped to that swapfile suddenly gets overwritten by 1301 * data getting swapped to that swapfile suddenly gets overwritten by
1302 * the original zero's written out previously to the journal and 1302 * the original zero's written out previously to the journal and
1303 * awaiting writeback in the kernel's buffer cache. 1303 * awaiting writeback in the kernel's buffer cache.
1304 * 1304 *
1305 * So, if we see any bmap calls here on a modified, data-journaled file, 1305 * So, if we see any bmap calls here on a modified, data-journaled file,
1306 * take extra steps to flush any blocks which might be in the cache. 1306 * take extra steps to flush any blocks which might be in the cache.
1307 */ 1307 */
1308static sector_t ext3_bmap(struct address_space *mapping, sector_t block) 1308static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1309{ 1309{
@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1312 int err; 1312 int err;
1313 1313
1314 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1314 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1315 /* 1315 /*
1316 * This is a REALLY heavyweight approach, but the use of 1316 * This is a REALLY heavyweight approach, but the use of
1317 * bmap on dirty files is expected to be extremely rare: 1317 * bmap on dirty files is expected to be extremely rare:
1318 * only if we run lilo or swapon on a freshly made file 1318 * only if we run lilo or swapon on a freshly made file
1319 * do we expect this to happen. 1319 * do we expect this to happen.
1320 * 1320 *
1321 * (bmap requires CAP_SYS_RAWIO so this does not 1321 * (bmap requires CAP_SYS_RAWIO so this does not
1322 * represent an unprivileged user DOS attack --- we'd be 1322 * represent an unprivileged user DOS attack --- we'd be
1323 * in trouble if mortal users could trigger this path at 1323 * in trouble if mortal users could trigger this path at
1324 * will.) 1324 * will.)
1325 * 1325 *
1326 * NB. EXT3_STATE_JDATA is not set on files other than 1326 * NB. EXT3_STATE_JDATA is not set on files other than
1327 * regular files. If somebody wants to bmap a directory 1327 * regular files. If somebody wants to bmap a directory
@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page,
1457 */ 1457 */
1458 1458
1459 /* 1459 /*
1460 * And attach them to the current transaction. But only if 1460 * And attach them to the current transaction. But only if
1461 * block_write_full_page() succeeded. Otherwise they are unmapped, 1461 * block_write_full_page() succeeded. Otherwise they are unmapped,
1462 * and generally junk. 1462 * and generally junk.
1463 */ 1463 */
@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1644 } 1644 }
1645 } 1645 }
1646 1646
1647 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1647 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1648 offset, nr_segs, 1648 offset, nr_segs,
1649 ext3_get_block, NULL); 1649 ext3_get_block, NULL);
1650 1650
@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2025 __le32 *first, __le32 *last) 2025 __le32 *first, __le32 *last)
2026{ 2026{
2027 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ 2027 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2028 unsigned long count = 0; /* Number of blocks in the run */ 2028 unsigned long count = 0; /* Number of blocks in the run */
2029 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 2029 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2030 corresponding to 2030 corresponding to
2031 block_to_free */ 2031 block_to_free */
@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2054 } else if (nr == block_to_free + count) { 2054 } else if (nr == block_to_free + count) {
2055 count++; 2055 count++;
2056 } else { 2056 } else {
2057 ext3_clear_blocks(handle, inode, this_bh, 2057 ext3_clear_blocks(handle, inode, this_bh,
2058 block_to_free, 2058 block_to_free,
2059 count, block_to_free_p, p); 2059 count, block_to_free_p, p);
2060 block_to_free = nr; 2060 block_to_free = nr;
@@ -2115,7 +2115,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2115 */ 2115 */
2116 if (!bh) { 2116 if (!bh) {
2117 ext3_error(inode->i_sb, "ext3_free_branches", 2117 ext3_error(inode->i_sb, "ext3_free_branches",
2118 "Read failure, inode=%ld, block="E3FSBLK, 2118 "Read failure, inode=%lu, block="E3FSBLK,
2119 inode->i_ino, nr); 2119 inode->i_ino, nr);
2120 continue; 2120 continue;
2121 } 2121 }
@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2184 *p = 0; 2184 *p = 0;
2185 BUFFER_TRACE(parent_bh, 2185 BUFFER_TRACE(parent_bh,
2186 "call ext3_journal_dirty_metadata"); 2186 "call ext3_journal_dirty_metadata");
2187 ext3_journal_dirty_metadata(handle, 2187 ext3_journal_dirty_metadata(handle,
2188 parent_bh); 2188 parent_bh);
2189 } 2189 }
2190 } 2190 }
@@ -2632,9 +2632,6 @@ void ext3_read_inode(struct inode * inode)
2632 * recovery code: that's fine, we're about to complete 2632 * recovery code: that's fine, we're about to complete
2633 * the process of deleting those. */ 2633 * the process of deleting those. */
2634 } 2634 }
2635 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
2636 * (for stat), not the fs block
2637 * size */
2638 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 2635 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2639 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 2636 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2640#ifdef EXT3_FRAGMENTS 2637#ifdef EXT3_FRAGMENTS
@@ -2704,7 +2701,7 @@ void ext3_read_inode(struct inode * inode)
2704 if (raw_inode->i_block[0]) 2701 if (raw_inode->i_block[0])
2705 init_special_inode(inode, inode->i_mode, 2702 init_special_inode(inode, inode->i_mode,
2706 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 2703 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2707 else 2704 else
2708 init_special_inode(inode, inode->i_mode, 2705 init_special_inode(inode, inode->i_mode,
2709 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 2706 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2710 } 2707 }
@@ -2724,8 +2721,8 @@ bad_inode:
2724 * 2721 *
2725 * The caller must have write access to iloc->bh. 2722 * The caller must have write access to iloc->bh.
2726 */ 2723 */
2727static int ext3_do_update_inode(handle_t *handle, 2724static int ext3_do_update_inode(handle_t *handle,
2728 struct inode *inode, 2725 struct inode *inode,
2729 struct ext3_iloc *iloc) 2726 struct ext3_iloc *iloc)
2730{ 2727{
2731 struct ext3_inode *raw_inode = ext3_raw_inode(iloc); 2728 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
@@ -2900,7 +2897,7 @@ int ext3_write_inode(struct inode *inode, int wait)
2900 * commit will leave the blocks being flushed in an unused state on 2897 * commit will leave the blocks being flushed in an unused state on
2901 * disk. (On recovery, the inode will get truncated and the blocks will 2898 * disk. (On recovery, the inode will get truncated and the blocks will
2902 * be freed, so we have a strong guarantee that no future commit will 2899 * be freed, so we have a strong guarantee that no future commit will
2903 * leave these blocks visible to the user.) 2900 * leave these blocks visible to the user.)
2904 * 2901 *
2905 * Called with inode->sem down. 2902 * Called with inode->sem down.
2906 */ 2903 */
@@ -3043,13 +3040,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
3043 return err; 3040 return err;
3044} 3041}
3045 3042
3046/* 3043/*
3047 * On success, We end up with an outstanding reference count against 3044 * On success, We end up with an outstanding reference count against
3048 * iloc->bh. This _must_ be cleaned up later. 3045 * iloc->bh. This _must_ be cleaned up later.
3049 */ 3046 */
3050 3047
3051int 3048int
3052ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 3049ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3053 struct ext3_iloc *iloc) 3050 struct ext3_iloc *iloc)
3054{ 3051{
3055 int err = 0; 3052 int err = 0;
@@ -3139,7 +3136,7 @@ out:
3139} 3136}
3140 3137
3141#if 0 3138#if 0
3142/* 3139/*
3143 * Bind an inode's backing buffer_head into this transaction, to prevent 3140 * Bind an inode's backing buffer_head into this transaction, to prevent
3144 * it from being flushed to disk early. Unlike 3141 * it from being flushed to disk early. Unlike
3145 * ext3_reserve_inode_write, this leaves behind no bh reference and 3142 * ext3_reserve_inode_write, this leaves behind no bh reference and
@@ -3157,7 +3154,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3157 BUFFER_TRACE(iloc.bh, "get_write_access"); 3154 BUFFER_TRACE(iloc.bh, "get_write_access");
3158 err = journal_get_write_access(handle, iloc.bh); 3155 err = journal_get_write_access(handle, iloc.bh);
3159 if (!err) 3156 if (!err)
3160 err = ext3_journal_dirty_metadata(handle, 3157 err = ext3_journal_dirty_metadata(handle,
3161 iloc.bh); 3158 iloc.bh);
3162 brelse(iloc.bh); 3159 brelse(iloc.bh);
3163 } 3160 }
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 2aa7101b27cd..85d132c37ee0 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -15,13 +15,13 @@
15 * Big-endian to little-endian byte-swapping/bitmaps by 15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995 16 * David S. Miller (davem@caip.rutgers.edu), 1995
17 * Directory entry file type support and forward compatibility hooks 17 * Directory entry file type support and forward compatibility hooks
18 * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 18 * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
19 * Hash Tree Directory indexing (c) 19 * Hash Tree Directory indexing (c)
20 * Daniel Phillips, 2001 20 * Daniel Phillips, 2001
21 * Hash Tree Directory indexing porting 21 * Hash Tree Directory indexing porting
22 * Christopher Li, 2002 22 * Christopher Li, 2002
23 * Hash Tree Directory indexing cleanup 23 * Hash Tree Directory indexing cleanup
24 * Theodore Ts'o, 2002 24 * Theodore Ts'o, 2002
25 */ 25 */
26 26
27#include <linux/fs.h> 27#include <linux/fs.h>
@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle,
76#ifdef DX_DEBUG 76#ifdef DX_DEBUG
77#define dxtrace(command) command 77#define dxtrace(command) command
78#else 78#else
79#define dxtrace(command) 79#define dxtrace(command)
80#endif 80#endif
81 81
82struct fake_dirent 82struct fake_dirent
@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
169static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); 169static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
170static int ext3_htree_next_block(struct inode *dir, __u32 hash, 170static int ext3_htree_next_block(struct inode *dir, __u32 hash,
171 struct dx_frame *frame, 171 struct dx_frame *frame,
172 struct dx_frame *frames, 172 struct dx_frame *frames,
173 __u32 *start_hash); 173 __u32 *start_hash);
174static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, 174static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
175 struct ext3_dir_entry_2 **res_dir, int *err); 175 struct ext3_dir_entry_2 **res_dir, int *err);
@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries)
250} 250}
251 251
252struct stats 252struct stats
253{ 253{
254 unsigned names; 254 unsigned names;
255 unsigned space; 255 unsigned space;
256 unsigned bcount; 256 unsigned bcount;
@@ -278,7 +278,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_ent
278 ((char *) de - base)); 278 ((char *) de - base));
279 } 279 }
280 space += EXT3_DIR_REC_LEN(de->name_len); 280 space += EXT3_DIR_REC_LEN(de->name_len);
281 names++; 281 names++;
282 } 282 }
283 de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len)); 283 de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
284 } 284 }
@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames)
464 */ 464 */
465static int ext3_htree_next_block(struct inode *dir, __u32 hash, 465static int ext3_htree_next_block(struct inode *dir, __u32 hash,
466 struct dx_frame *frame, 466 struct dx_frame *frame,
467 struct dx_frame *frames, 467 struct dx_frame *frames,
468 __u32 *start_hash) 468 __u32 *start_hash)
469{ 469{
470 struct dx_frame *p; 470 struct dx_frame *p;
@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
632 } 632 }
633 count += ret; 633 count += ret;
634 hashval = ~0; 634 hashval = ~0;
635 ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, 635 ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
636 frame, frames, &hashval); 636 frame, frames, &hashval);
637 *next_hash = hashval; 637 *next_hash = hashval;
638 if (ret < 0) { 638 if (ret < 0) {
@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
649 break; 649 break;
650 } 650 }
651 dx_release(frames); 651 dx_release(frames);
652 dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", 652 dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
653 count, *next_hash)); 653 count, *next_hash));
654 return count; 654 return count;
655errout: 655errout:
@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child)
1050 parent = ERR_PTR(-ENOMEM); 1050 parent = ERR_PTR(-ENOMEM);
1051 } 1051 }
1052 return parent; 1052 return parent;
1053} 1053}
1054 1054
1055#define S_SHIFT 12 1055#define S_SHIFT 12
1056static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { 1056static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -1198,7 +1198,7 @@ errout:
1198 * add_dirent_to_buf will attempt search the directory block for 1198 * add_dirent_to_buf will attempt search the directory block for
1199 * space. It will return -ENOSPC if no space is available, and -EIO 1199 * space. It will return -ENOSPC if no space is available, and -EIO
1200 * and -EEXIST if directory entry already exists. 1200 * and -EEXIST if directory entry already exists.
1201 * 1201 *
1202 * NOTE! bh is NOT released in the case where ENOSPC is returned. In 1202 * NOTE! bh is NOT released in the case where ENOSPC is returned. In
1203 * all other cases bh is released. 1203 * all other cases bh is released.
1204 */ 1204 */
@@ -1572,7 +1572,7 @@ cleanup:
1572 * ext3_delete_entry deletes a directory entry by merging it with the 1572 * ext3_delete_entry deletes a directory entry by merging it with the
1573 * previous entry 1573 * previous entry
1574 */ 1574 */
1575static int ext3_delete_entry (handle_t *handle, 1575static int ext3_delete_entry (handle_t *handle,
1576 struct inode * dir, 1576 struct inode * dir,
1577 struct ext3_dir_entry_2 * de_del, 1577 struct ext3_dir_entry_2 * de_del,
1578 struct buffer_head * bh) 1578 struct buffer_head * bh)
@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle,
1643 * is so far negative - it has no inode. 1643 * is so far negative - it has no inode.
1644 * 1644 *
1645 * If the create succeeds, we fill in the inode information 1645 * If the create succeeds, we fill in the inode information
1646 * with d_instantiate(). 1646 * with d_instantiate().
1647 */ 1647 */
1648static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, 1648static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
1649 struct nameidata *nd) 1649 struct nameidata *nd)
1650{ 1650{
1651 handle_t *handle; 1651 handle_t *handle;
1652 struct inode * inode; 1652 struct inode * inode;
1653 int err, retries = 0; 1653 int err, retries = 0;
1654 1654
@@ -1688,7 +1688,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry,
1688 1688
1689retry: 1689retry:
1690 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + 1690 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
1691 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + 1691 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
1692 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); 1692 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
1693 if (IS_ERR(handle)) 1693 if (IS_ERR(handle))
1694 return PTR_ERR(handle); 1694 return PTR_ERR(handle);
@@ -1813,10 +1813,10 @@ static int empty_dir (struct inode * inode)
1813 de1 = (struct ext3_dir_entry_2 *) 1813 de1 = (struct ext3_dir_entry_2 *)
1814 ((char *) de + le16_to_cpu(de->rec_len)); 1814 ((char *) de + le16_to_cpu(de->rec_len));
1815 if (le32_to_cpu(de->inode) != inode->i_ino || 1815 if (le32_to_cpu(de->inode) != inode->i_ino ||
1816 !le32_to_cpu(de1->inode) || 1816 !le32_to_cpu(de1->inode) ||
1817 strcmp (".", de->name) || 1817 strcmp (".", de->name) ||
1818 strcmp ("..", de1->name)) { 1818 strcmp ("..", de1->name)) {
1819 ext3_warning (inode->i_sb, "empty_dir", 1819 ext3_warning (inode->i_sb, "empty_dir",
1820 "bad directory (dir #%lu) - no `.' or `..'", 1820 "bad directory (dir #%lu) - no `.' or `..'",
1821 inode->i_ino); 1821 inode->i_ino);
1822 brelse (bh); 1822 brelse (bh);
@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
1883 * being truncated, or files being unlinked. */ 1883 * being truncated, or files being unlinked. */
1884 1884
1885 /* @@@ FIXME: Observation from aviro: 1885 /* @@@ FIXME: Observation from aviro:
1886 * I think I can trigger J_ASSERT in ext3_orphan_add(). We block 1886 * I think I can trigger J_ASSERT in ext3_orphan_add(). We block
1887 * here (on lock_super()), so race with ext3_link() which might bump 1887 * here (on lock_super()), so race with ext3_link() which might bump
1888 * ->i_nlink. For, say it, character device. Not a regular file, 1888 * ->i_nlink. For, say it, character device. Not a regular file,
1889 * not a directory, not a symlink and ->i_nlink > 0. 1889 * not a directory, not a symlink and ->i_nlink > 0.
@@ -1919,8 +1919,8 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
1919 if (!err) 1919 if (!err)
1920 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); 1920 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
1921 1921
1922 jbd_debug(4, "superblock will point to %ld\n", inode->i_ino); 1922 jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
1923 jbd_debug(4, "orphan inode %ld will point to %d\n", 1923 jbd_debug(4, "orphan inode %lu will point to %d\n",
1924 inode->i_ino, NEXT_ORPHAN(inode)); 1924 inode->i_ino, NEXT_ORPHAN(inode));
1925out_unlock: 1925out_unlock:
1926 unlock_super(sb); 1926 unlock_super(sb);
@@ -2129,7 +2129,7 @@ static int ext3_symlink (struct inode * dir,
2129 2129
2130retry: 2130retry:
2131 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + 2131 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
2132 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + 2132 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
2133 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); 2133 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
2134 if (IS_ERR(handle)) 2134 if (IS_ERR(handle))
2135 return PTR_ERR(handle); 2135 return PTR_ERR(handle);
@@ -2227,7 +2227,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2227 DQUOT_INIT(new_dentry->d_inode); 2227 DQUOT_INIT(new_dentry->d_inode);
2228 handle = ext3_journal_start(old_dir, 2 * 2228 handle = ext3_journal_start(old_dir, 2 *
2229 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2229 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
2230 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); 2230 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
2231 if (IS_ERR(handle)) 2231 if (IS_ERR(handle))
2232 return PTR_ERR(handle); 2232 return PTR_ERR(handle);
2233 2233
@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = {
2393 .removexattr = generic_removexattr, 2393 .removexattr = generic_removexattr,
2394#endif 2394#endif
2395 .permission = ext3_permission, 2395 .permission = ext3_permission,
2396}; 2396};
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 5e1337fd878a..b73cba12f79c 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -336,7 +336,7 @@ static int verify_reserved_gdb(struct super_block *sb,
336 unsigned five = 5; 336 unsigned five = 5;
337 unsigned seven = 7; 337 unsigned seven = 7;
338 unsigned grp; 338 unsigned grp;
339 __u32 *p = (__u32 *)primary->b_data; 339 __le32 *p = (__le32 *)primary->b_data;
340 int gdbackups = 0; 340 int gdbackups = 0;
341 341
342 while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) { 342 while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
@@ -380,7 +380,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
380 struct buffer_head *dind; 380 struct buffer_head *dind;
381 int gdbackups; 381 int gdbackups;
382 struct ext3_iloc iloc; 382 struct ext3_iloc iloc;
383 __u32 *data; 383 __le32 *data;
384 int err; 384 int err;
385 385
386 if (test_opt(sb, DEBUG)) 386 if (test_opt(sb, DEBUG))
@@ -417,7 +417,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
417 goto exit_bh; 417 goto exit_bh;
418 } 418 }
419 419
420 data = (__u32 *)dind->b_data; 420 data = (__le32 *)dind->b_data;
421 if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) { 421 if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
422 ext3_warning(sb, __FUNCTION__, 422 ext3_warning(sb, __FUNCTION__,
423 "new group %u GDT block "E3FSBLK" not reserved", 423 "new group %u GDT block "E3FSBLK" not reserved",
@@ -439,8 +439,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
439 if ((err = ext3_reserve_inode_write(handle, inode, &iloc))) 439 if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
440 goto exit_dindj; 440 goto exit_dindj;
441 441
442 n_group_desc = (struct buffer_head **)kmalloc((gdb_num + 1) * 442 n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
443 sizeof(struct buffer_head *), GFP_KERNEL); 443 GFP_KERNEL);
444 if (!n_group_desc) { 444 if (!n_group_desc) {
445 err = -ENOMEM; 445 err = -ENOMEM;
446 ext3_warning (sb, __FUNCTION__, 446 ext3_warning (sb, __FUNCTION__,
@@ -519,7 +519,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
519 struct buffer_head *dind; 519 struct buffer_head *dind;
520 struct ext3_iloc iloc; 520 struct ext3_iloc iloc;
521 ext3_fsblk_t blk; 521 ext3_fsblk_t blk;
522 __u32 *data, *end; 522 __le32 *data, *end;
523 int gdbackups = 0; 523 int gdbackups = 0;
524 int res, i; 524 int res, i;
525 int err; 525 int err;
@@ -536,8 +536,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
536 } 536 }
537 537
538 blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count; 538 blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
539 data = (__u32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count; 539 data = (__le32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
540 end = (__u32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb); 540 end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
541 541
542 /* Get each reserved primary GDT block and verify it holds backups */ 542 /* Get each reserved primary GDT block and verify it holds backups */
543 for (res = 0; res < reserved_gdb; res++, blk++) { 543 for (res = 0; res < reserved_gdb; res++, blk++) {
@@ -545,7 +545,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
545 ext3_warning(sb, __FUNCTION__, 545 ext3_warning(sb, __FUNCTION__,
546 "reserved block "E3FSBLK 546 "reserved block "E3FSBLK
547 " not at offset %ld", 547 " not at offset %ld",
548 blk, (long)(data - (__u32 *)dind->b_data)); 548 blk,
549 (long)(data - (__le32 *)dind->b_data));
549 err = -EINVAL; 550 err = -EINVAL;
550 goto exit_bh; 551 goto exit_bh;
551 } 552 }
@@ -560,7 +561,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
560 goto exit_bh; 561 goto exit_bh;
561 } 562 }
562 if (++data >= end) 563 if (++data >= end)
563 data = (__u32 *)dind->b_data; 564 data = (__le32 *)dind->b_data;
564 } 565 }
565 566
566 for (i = 0; i < reserved_gdb; i++) { 567 for (i = 0; i < reserved_gdb; i++) {
@@ -584,7 +585,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
584 blk = input->group * EXT3_BLOCKS_PER_GROUP(sb); 585 blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
585 for (i = 0; i < reserved_gdb; i++) { 586 for (i = 0; i < reserved_gdb; i++) {
586 int err2; 587 int err2;
587 data = (__u32 *)primary[i]->b_data; 588 data = (__le32 *)primary[i]->b_data;
588 /* printk("reserving backup %lu[%u] = %lu\n", 589 /* printk("reserving backup %lu[%u] = %lu\n",
589 primary[i]->b_blocknr, gdbackups, 590 primary[i]->b_blocknr, gdbackups,
590 blk + primary[i]->b_blocknr); */ 591 blk + primary[i]->b_blocknr); */
@@ -689,7 +690,7 @@ exit_err:
689 "can't update backup for group %d (err %d), " 690 "can't update backup for group %d (err %d), "
690 "forcing fsck on next reboot", group, err); 691 "forcing fsck on next reboot", group, err);
691 sbi->s_mount_state &= ~EXT3_VALID_FS; 692 sbi->s_mount_state &= ~EXT3_VALID_FS;
692 sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS); 693 sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
693 mark_buffer_dirty(sbi->s_sbh); 694 mark_buffer_dirty(sbi->s_sbh);
694 } 695 }
695} 696}
@@ -730,6 +731,18 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
730 return -EPERM; 731 return -EPERM;
731 } 732 }
732 733
734 if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
735 le32_to_cpu(es->s_blocks_count)) {
736 ext3_warning(sb, __FUNCTION__, "blocks_count overflow\n");
737 return -EINVAL;
738 }
739
740 if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
741 le32_to_cpu(es->s_inodes_count)) {
742 ext3_warning(sb, __FUNCTION__, "inodes_count overflow\n");
743 return -EINVAL;
744 }
745
733 if (reserved_gdb || gdb_off == 0) { 746 if (reserved_gdb || gdb_off == 0) {
734 if (!EXT3_HAS_COMPAT_FEATURE(sb, 747 if (!EXT3_HAS_COMPAT_FEATURE(sb,
735 EXT3_FEATURE_COMPAT_RESIZE_INODE)){ 748 EXT3_FEATURE_COMPAT_RESIZE_INODE)){
@@ -958,6 +971,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
958 971
959 add = EXT3_BLOCKS_PER_GROUP(sb) - last; 972 add = EXT3_BLOCKS_PER_GROUP(sb) - last;
960 973
974 if (o_blocks_count + add < o_blocks_count) {
975 ext3_warning(sb, __FUNCTION__, "blocks_count overflow");
976 return -EINVAL;
977 }
978
961 if (o_blocks_count + add > n_blocks_count) 979 if (o_blocks_count + add > n_blocks_count)
962 add = n_blocks_count - o_blocks_count; 980 add = n_blocks_count - o_blocks_count;
963 981
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3559086eee5f..8bfd56ef18ca 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -45,7 +45,7 @@
45static int ext3_load_journal(struct super_block *, struct ext3_super_block *, 45static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
46 unsigned long journal_devnum); 46 unsigned long journal_devnum);
47static int ext3_create_journal(struct super_block *, struct ext3_super_block *, 47static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
48 int); 48 unsigned int);
49static void ext3_commit_super (struct super_block * sb, 49static void ext3_commit_super (struct super_block * sb,
50 struct ext3_super_block * es, 50 struct ext3_super_block * es,
51 int sync); 51 int sync);
@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb);
62static void ext3_write_super (struct super_block * sb); 62static void ext3_write_super (struct super_block * sb);
63static void ext3_write_super_lockfs(struct super_block *sb); 63static void ext3_write_super_lockfs(struct super_block *sb);
64 64
65/* 65/*
66 * Wrappers for journal_start/end. 66 * Wrappers for journal_start/end.
67 * 67 *
68 * The only special thing we need to do here is to make sure that all 68 * The only special thing we need to do here is to make sure that all
69 * journal_end calls result in the superblock being marked dirty, so 69 * journal_end calls result in the superblock being marked dirty, so
70 * that sync() will call the filesystem's write_super callback if 70 * that sync() will call the filesystem's write_super callback if
71 * appropriate. 71 * appropriate.
72 */ 72 */
73handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) 73handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
74{ 74{
@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
90 return journal_start(journal, nblocks); 90 return journal_start(journal, nblocks);
91} 91}
92 92
93/* 93/*
94 * The only special thing we need to do here is to make sure that all 94 * The only special thing we need to do here is to make sure that all
95 * journal_stop calls result in the superblock being marked dirty, so 95 * journal_stop calls result in the superblock being marked dirty, so
96 * that sync() will call the filesystem's write_super callback if 96 * that sync() will call the filesystem's write_super callback if
97 * appropriate. 97 * appropriate.
98 */ 98 */
99int __ext3_journal_stop(const char *where, handle_t *handle) 99int __ext3_journal_stop(const char *where, handle_t *handle)
100{ 100{
@@ -159,20 +159,21 @@ static void ext3_handle_error(struct super_block *sb)
159 if (sb->s_flags & MS_RDONLY) 159 if (sb->s_flags & MS_RDONLY)
160 return; 160 return;
161 161
162 if (test_opt (sb, ERRORS_RO)) { 162 if (!test_opt (sb, ERRORS_CONT)) {
163 printk (KERN_CRIT "Remounting filesystem read-only\n");
164 sb->s_flags |= MS_RDONLY;
165 } else {
166 journal_t *journal = EXT3_SB(sb)->s_journal; 163 journal_t *journal = EXT3_SB(sb)->s_journal;
167 164
168 EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; 165 EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
169 if (journal) 166 if (journal)
170 journal_abort(journal, -EIO); 167 journal_abort(journal, -EIO);
171 } 168 }
169 if (test_opt (sb, ERRORS_RO)) {
170 printk (KERN_CRIT "Remounting filesystem read-only\n");
171 sb->s_flags |= MS_RDONLY;
172 }
173 ext3_commit_super(sb, es, 1);
172 if (test_opt(sb, ERRORS_PANIC)) 174 if (test_opt(sb, ERRORS_PANIC))
173 panic("EXT3-fs (device %s): panic forced after error\n", 175 panic("EXT3-fs (device %s): panic forced after error\n",
174 sb->s_id); 176 sb->s_id);
175 ext3_commit_super(sb, es, 1);
176} 177}
177 178
178void ext3_error (struct super_block * sb, const char * function, 179void ext3_error (struct super_block * sb, const char * function,
@@ -369,16 +370,16 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
369{ 370{
370 struct list_head *l; 371 struct list_head *l;
371 372
372 printk(KERN_ERR "sb orphan head is %d\n", 373 printk(KERN_ERR "sb orphan head is %d\n",
373 le32_to_cpu(sbi->s_es->s_last_orphan)); 374 le32_to_cpu(sbi->s_es->s_last_orphan));
374 375
375 printk(KERN_ERR "sb_info orphan list:\n"); 376 printk(KERN_ERR "sb_info orphan list:\n");
376 list_for_each(l, &sbi->s_orphan) { 377 list_for_each(l, &sbi->s_orphan) {
377 struct inode *inode = orphan_list_entry(l); 378 struct inode *inode = orphan_list_entry(l);
378 printk(KERN_ERR " " 379 printk(KERN_ERR " "
379 "inode %s:%ld at %p: mode %o, nlink %d, next %d\n", 380 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
380 inode->i_sb->s_id, inode->i_ino, inode, 381 inode->i_sb->s_id, inode->i_ino, inode,
381 inode->i_mode, inode->i_nlink, 382 inode->i_mode, inode->i_nlink,
382 NEXT_ORPHAN(inode)); 383 NEXT_ORPHAN(inode));
383 } 384 }
384} 385}
@@ -475,7 +476,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
475 inode_init_once(&ei->vfs_inode); 476 inode_init_once(&ei->vfs_inode);
476 } 477 }
477} 478}
478 479
479static int init_inodecache(void) 480static int init_inodecache(void)
480{ 481{
481 ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", 482 ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
@@ -490,8 +491,7 @@ static int init_inodecache(void)
490 491
491static void destroy_inodecache(void) 492static void destroy_inodecache(void)
492{ 493{
493 if (kmem_cache_destroy(ext3_inode_cachep)) 494 kmem_cache_destroy(ext3_inode_cachep);
494 printk(KERN_INFO "ext3_inode_cache: not all structures were freed\n");
495} 495}
496 496
497static void ext3_clear_inode(struct inode *inode) 497static void ext3_clear_inode(struct inode *inode)
@@ -733,8 +733,8 @@ static match_table_t tokens = {
733 733
734static ext3_fsblk_t get_sb_block(void **data) 734static ext3_fsblk_t get_sb_block(void **data)
735{ 735{
736 ext3_fsblk_t sb_block; 736 ext3_fsblk_t sb_block;
737 char *options = (char *) *data; 737 char *options = (char *) *data;
738 738
739 if (!options || strncmp(options, "sb=", 3) != 0) 739 if (!options || strncmp(options, "sb=", 3) != 0)
740 return 1; /* Default location */ 740 return 1; /* Default location */
@@ -753,7 +753,7 @@ static ext3_fsblk_t get_sb_block(void **data)
753} 753}
754 754
755static int parse_options (char *options, struct super_block *sb, 755static int parse_options (char *options, struct super_block *sb,
756 unsigned long *inum, unsigned long *journal_devnum, 756 unsigned int *inum, unsigned long *journal_devnum,
757 ext3_fsblk_t *n_blocks_count, int is_remount) 757 ext3_fsblk_t *n_blocks_count, int is_remount)
758{ 758{
759 struct ext3_sb_info *sbi = EXT3_SB(sb); 759 struct ext3_sb_info *sbi = EXT3_SB(sb);
@@ -1174,7 +1174,8 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
1174static int ext3_check_descriptors (struct super_block * sb) 1174static int ext3_check_descriptors (struct super_block * sb)
1175{ 1175{
1176 struct ext3_sb_info *sbi = EXT3_SB(sb); 1176 struct ext3_sb_info *sbi = EXT3_SB(sb);
1177 ext3_fsblk_t block = le32_to_cpu(sbi->s_es->s_first_data_block); 1177 ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1178 ext3_fsblk_t last_block;
1178 struct ext3_group_desc * gdp = NULL; 1179 struct ext3_group_desc * gdp = NULL;
1179 int desc_block = 0; 1180 int desc_block = 0;
1180 int i; 1181 int i;
@@ -1183,12 +1184,17 @@ static int ext3_check_descriptors (struct super_block * sb)
1183 1184
1184 for (i = 0; i < sbi->s_groups_count; i++) 1185 for (i = 0; i < sbi->s_groups_count; i++)
1185 { 1186 {
1187 if (i == sbi->s_groups_count - 1)
1188 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
1189 else
1190 last_block = first_block +
1191 (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1192
1186 if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0) 1193 if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0)
1187 gdp = (struct ext3_group_desc *) 1194 gdp = (struct ext3_group_desc *)
1188 sbi->s_group_desc[desc_block++]->b_data; 1195 sbi->s_group_desc[desc_block++]->b_data;
1189 if (le32_to_cpu(gdp->bg_block_bitmap) < block || 1196 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
1190 le32_to_cpu(gdp->bg_block_bitmap) >= 1197 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
1191 block + EXT3_BLOCKS_PER_GROUP(sb))
1192 { 1198 {
1193 ext3_error (sb, "ext3_check_descriptors", 1199 ext3_error (sb, "ext3_check_descriptors",
1194 "Block bitmap for group %d" 1200 "Block bitmap for group %d"
@@ -1197,9 +1203,8 @@ static int ext3_check_descriptors (struct super_block * sb)
1197 le32_to_cpu(gdp->bg_block_bitmap)); 1203 le32_to_cpu(gdp->bg_block_bitmap));
1198 return 0; 1204 return 0;
1199 } 1205 }
1200 if (le32_to_cpu(gdp->bg_inode_bitmap) < block || 1206 if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
1201 le32_to_cpu(gdp->bg_inode_bitmap) >= 1207 le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
1202 block + EXT3_BLOCKS_PER_GROUP(sb))
1203 { 1208 {
1204 ext3_error (sb, "ext3_check_descriptors", 1209 ext3_error (sb, "ext3_check_descriptors",
1205 "Inode bitmap for group %d" 1210 "Inode bitmap for group %d"
@@ -1208,9 +1213,9 @@ static int ext3_check_descriptors (struct super_block * sb)
1208 le32_to_cpu(gdp->bg_inode_bitmap)); 1213 le32_to_cpu(gdp->bg_inode_bitmap));
1209 return 0; 1214 return 0;
1210 } 1215 }
1211 if (le32_to_cpu(gdp->bg_inode_table) < block || 1216 if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
1212 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >= 1217 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
1213 block + EXT3_BLOCKS_PER_GROUP(sb)) 1218 last_block)
1214 { 1219 {
1215 ext3_error (sb, "ext3_check_descriptors", 1220 ext3_error (sb, "ext3_check_descriptors",
1216 "Inode table for group %d" 1221 "Inode table for group %d"
@@ -1219,7 +1224,7 @@ static int ext3_check_descriptors (struct super_block * sb)
1219 le32_to_cpu(gdp->bg_inode_table)); 1224 le32_to_cpu(gdp->bg_inode_table));
1220 return 0; 1225 return 0;
1221 } 1226 }
1222 block += EXT3_BLOCKS_PER_GROUP(sb); 1227 first_block += EXT3_BLOCKS_PER_GROUP(sb);
1223 gdp++; 1228 gdp++;
1224 } 1229 }
1225 1230
@@ -1301,17 +1306,17 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1301 DQUOT_INIT(inode); 1306 DQUOT_INIT(inode);
1302 if (inode->i_nlink) { 1307 if (inode->i_nlink) {
1303 printk(KERN_DEBUG 1308 printk(KERN_DEBUG
1304 "%s: truncating inode %ld to %Ld bytes\n", 1309 "%s: truncating inode %lu to %Ld bytes\n",
1305 __FUNCTION__, inode->i_ino, inode->i_size); 1310 __FUNCTION__, inode->i_ino, inode->i_size);
1306 jbd_debug(2, "truncating inode %ld to %Ld bytes\n", 1311 jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
1307 inode->i_ino, inode->i_size); 1312 inode->i_ino, inode->i_size);
1308 ext3_truncate(inode); 1313 ext3_truncate(inode);
1309 nr_truncates++; 1314 nr_truncates++;
1310 } else { 1315 } else {
1311 printk(KERN_DEBUG 1316 printk(KERN_DEBUG
1312 "%s: deleting unreferenced inode %ld\n", 1317 "%s: deleting unreferenced inode %lu\n",
1313 __FUNCTION__, inode->i_ino); 1318 __FUNCTION__, inode->i_ino);
1314 jbd_debug(2, "deleting unreferenced inode %ld\n", 1319 jbd_debug(2, "deleting unreferenced inode %lu\n",
1315 inode->i_ino); 1320 inode->i_ino);
1316 nr_orphans++; 1321 nr_orphans++;
1317 } 1322 }
@@ -1390,7 +1395,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1390 ext3_fsblk_t sb_block = get_sb_block(&data); 1395 ext3_fsblk_t sb_block = get_sb_block(&data);
1391 ext3_fsblk_t logic_sb_block; 1396 ext3_fsblk_t logic_sb_block;
1392 unsigned long offset = 0; 1397 unsigned long offset = 0;
1393 unsigned long journal_inum = 0; 1398 unsigned int journal_inum = 0;
1394 unsigned long journal_devnum = 0; 1399 unsigned long journal_devnum = 0;
1395 unsigned long def_mount_opts; 1400 unsigned long def_mount_opts;
1396 struct inode *root; 1401 struct inode *root;
@@ -1401,11 +1406,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1401 int needs_recovery; 1406 int needs_recovery;
1402 __le32 features; 1407 __le32 features;
1403 1408
1404 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); 1409 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1405 if (!sbi) 1410 if (!sbi)
1406 return -ENOMEM; 1411 return -ENOMEM;
1407 sb->s_fs_info = sbi; 1412 sb->s_fs_info = sbi;
1408 memset(sbi, 0, sizeof(*sbi));
1409 sbi->s_mount_opt = 0; 1413 sbi->s_mount_opt = 0;
1410 sbi->s_resuid = EXT3_DEF_RESUID; 1414 sbi->s_resuid = EXT3_DEF_RESUID;
1411 sbi->s_resgid = EXT3_DEF_RESGID; 1415 sbi->s_resgid = EXT3_DEF_RESGID;
@@ -1483,7 +1487,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1483 (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || 1487 (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
1484 EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || 1488 EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
1485 EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) 1489 EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
1486 printk(KERN_WARNING 1490 printk(KERN_WARNING
1487 "EXT3-fs warning: feature flags set on rev 0 fs, " 1491 "EXT3-fs warning: feature flags set on rev 0 fs, "
1488 "running e2fsck is recommended\n"); 1492 "running e2fsck is recommended\n");
1489 /* 1493 /*
@@ -1509,7 +1513,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1509 1513
1510 if (blocksize < EXT3_MIN_BLOCK_SIZE || 1514 if (blocksize < EXT3_MIN_BLOCK_SIZE ||
1511 blocksize > EXT3_MAX_BLOCK_SIZE) { 1515 blocksize > EXT3_MAX_BLOCK_SIZE) {
1512 printk(KERN_ERR 1516 printk(KERN_ERR
1513 "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", 1517 "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
1514 blocksize, sb->s_id); 1518 blocksize, sb->s_id);
1515 goto failed_mount; 1519 goto failed_mount;
@@ -1533,14 +1537,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1533 offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; 1537 offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
1534 bh = sb_bread(sb, logic_sb_block); 1538 bh = sb_bread(sb, logic_sb_block);
1535 if (!bh) { 1539 if (!bh) {
1536 printk(KERN_ERR 1540 printk(KERN_ERR
1537 "EXT3-fs: Can't read superblock on 2nd try.\n"); 1541 "EXT3-fs: Can't read superblock on 2nd try.\n");
1538 goto failed_mount; 1542 goto failed_mount;
1539 } 1543 }
1540 es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); 1544 es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
1541 sbi->s_es = es; 1545 sbi->s_es = es;
1542 if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { 1546 if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
1543 printk (KERN_ERR 1547 printk (KERN_ERR
1544 "EXT3-fs: Magic mismatch, very weird !\n"); 1548 "EXT3-fs: Magic mismatch, very weird !\n");
1545 goto failed_mount; 1549 goto failed_mount;
1546 } 1550 }
@@ -1622,10 +1626,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1622 1626
1623 if (EXT3_BLOCKS_PER_GROUP(sb) == 0) 1627 if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
1624 goto cantfind_ext3; 1628 goto cantfind_ext3;
1625 sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) - 1629 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
1626 le32_to_cpu(es->s_first_data_block) + 1630 le32_to_cpu(es->s_first_data_block) - 1)
1627 EXT3_BLOCKS_PER_GROUP(sb) - 1) / 1631 / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
1628 EXT3_BLOCKS_PER_GROUP(sb);
1629 db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) / 1632 db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
1630 EXT3_DESC_PER_BLOCK(sb); 1633 EXT3_DESC_PER_BLOCK(sb);
1631 sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), 1634 sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
@@ -1820,7 +1823,7 @@ out_fail:
1820/* 1823/*
1821 * Setup any per-fs journal parameters now. We'll do this both on 1824 * Setup any per-fs journal parameters now. We'll do this both on
1822 * initial mount, once the journal has been initialised but before we've 1825 * initial mount, once the journal has been initialised but before we've
1823 * done any recovery; and again on any subsequent remount. 1826 * done any recovery; and again on any subsequent remount.
1824 */ 1827 */
1825static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) 1828static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
1826{ 1829{
@@ -1840,7 +1843,8 @@ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
1840 spin_unlock(&journal->j_state_lock); 1843 spin_unlock(&journal->j_state_lock);
1841} 1844}
1842 1845
1843static journal_t *ext3_get_journal(struct super_block *sb, int journal_inum) 1846static journal_t *ext3_get_journal(struct super_block *sb,
1847 unsigned int journal_inum)
1844{ 1848{
1845 struct inode *journal_inode; 1849 struct inode *journal_inode;
1846 journal_t *journal; 1850 journal_t *journal;
@@ -1975,7 +1979,7 @@ static int ext3_load_journal(struct super_block *sb,
1975 unsigned long journal_devnum) 1979 unsigned long journal_devnum)
1976{ 1980{
1977 journal_t *journal; 1981 journal_t *journal;
1978 int journal_inum = le32_to_cpu(es->s_journal_inum); 1982 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
1979 dev_t journal_dev; 1983 dev_t journal_dev;
1980 int err = 0; 1984 int err = 0;
1981 int really_read_only; 1985 int really_read_only;
@@ -2061,7 +2065,7 @@ static int ext3_load_journal(struct super_block *sb,
2061 2065
2062static int ext3_create_journal(struct super_block * sb, 2066static int ext3_create_journal(struct super_block * sb,
2063 struct ext3_super_block * es, 2067 struct ext3_super_block * es,
2064 int journal_inum) 2068 unsigned int journal_inum)
2065{ 2069{
2066 journal_t *journal; 2070 journal_t *journal;
2067 2071
@@ -2074,7 +2078,7 @@ static int ext3_create_journal(struct super_block * sb,
2074 if (!(journal = ext3_get_journal(sb, journal_inum))) 2078 if (!(journal = ext3_get_journal(sb, journal_inum)))
2075 return -EINVAL; 2079 return -EINVAL;
2076 2080
2077 printk(KERN_INFO "EXT3-fs: creating new journal on inode %d\n", 2081 printk(KERN_INFO "EXT3-fs: creating new journal on inode %u\n",
2078 journal_inum); 2082 journal_inum);
2079 2083
2080 if (journal_create(journal)) { 2084 if (journal_create(journal)) {
@@ -2342,10 +2346,8 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2342 */ 2346 */
2343 ext3_clear_journal_err(sb, es); 2347 ext3_clear_journal_err(sb, es);
2344 sbi->s_mount_state = le16_to_cpu(es->s_state); 2348 sbi->s_mount_state = le16_to_cpu(es->s_state);
2345 if ((ret = ext3_group_extend(sb, es, n_blocks_count))) { 2349 if ((err = ext3_group_extend(sb, es, n_blocks_count)))
2346 err = ret;
2347 goto restore_opts; 2350 goto restore_opts;
2348 }
2349 if (!ext3_setup_super (sb, es, 0)) 2351 if (!ext3_setup_super (sb, es, 0))
2350 sb->s_flags &= ~MS_RDONLY; 2352 sb->s_flags &= ~MS_RDONLY;
2351 } 2353 }
@@ -2734,7 +2736,7 @@ static int __init init_ext3_fs(void)
2734out: 2736out:
2735 destroy_inodecache(); 2737 destroy_inodecache();
2736out1: 2738out1:
2737 exit_ext3_xattr(); 2739 exit_ext3_xattr();
2738 return err; 2740 return err;
2739} 2741}
2740 2742
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index a44a0562203a..f86f2482f01d 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -75,7 +75,7 @@
75 75
76#ifdef EXT3_XATTR_DEBUG 76#ifdef EXT3_XATTR_DEBUG
77# define ea_idebug(inode, f...) do { \ 77# define ea_idebug(inode, f...) do { \
78 printk(KERN_DEBUG "inode %s:%ld: ", \ 78 printk(KERN_DEBUG "inode %s:%lu: ", \
79 inode->i_sb->s_id, inode->i_ino); \ 79 inode->i_sb->s_id, inode->i_ino); \
80 printk(f); \ 80 printk(f); \
81 printk("\n"); \ 81 printk("\n"); \
@@ -233,7 +233,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
233 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 233 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
234 if (ext3_xattr_check_block(bh)) { 234 if (ext3_xattr_check_block(bh)) {
235bad_block: ext3_error(inode->i_sb, __FUNCTION__, 235bad_block: ext3_error(inode->i_sb, __FUNCTION__,
236 "inode %ld: bad block "E3FSBLK, inode->i_ino, 236 "inode %lu: bad block "E3FSBLK, inode->i_ino,
237 EXT3_I(inode)->i_file_acl); 237 EXT3_I(inode)->i_file_acl);
238 error = -EIO; 238 error = -EIO;
239 goto cleanup; 239 goto cleanup;
@@ -375,7 +375,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
375 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 375 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
376 if (ext3_xattr_check_block(bh)) { 376 if (ext3_xattr_check_block(bh)) {
377 ext3_error(inode->i_sb, __FUNCTION__, 377 ext3_error(inode->i_sb, __FUNCTION__,
378 "inode %ld: bad block "E3FSBLK, inode->i_ino, 378 "inode %lu: bad block "E3FSBLK, inode->i_ino,
379 EXT3_I(inode)->i_file_acl); 379 EXT3_I(inode)->i_file_acl);
380 error = -EIO; 380 error = -EIO;
381 goto cleanup; 381 goto cleanup;
@@ -647,7 +647,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
647 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 647 le32_to_cpu(BHDR(bs->bh)->h_refcount));
648 if (ext3_xattr_check_block(bs->bh)) { 648 if (ext3_xattr_check_block(bs->bh)) {
649 ext3_error(sb, __FUNCTION__, 649 ext3_error(sb, __FUNCTION__,
650 "inode %ld: bad block "E3FSBLK, inode->i_ino, 650 "inode %lu: bad block "E3FSBLK, inode->i_ino,
651 EXT3_I(inode)->i_file_acl); 651 EXT3_I(inode)->i_file_acl);
652 error = -EIO; 652 error = -EIO;
653 goto cleanup; 653 goto cleanup;
@@ -848,7 +848,7 @@ cleanup_dquot:
848 848
849bad_block: 849bad_block:
850 ext3_error(inode->i_sb, __FUNCTION__, 850 ext3_error(inode->i_sb, __FUNCTION__,
851 "inode %ld: bad block "E3FSBLK, inode->i_ino, 851 "inode %lu: bad block "E3FSBLK, inode->i_ino,
852 EXT3_I(inode)->i_file_acl); 852 EXT3_I(inode)->i_file_acl);
853 goto cleanup; 853 goto cleanup;
854 854
@@ -1077,14 +1077,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
1077 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); 1077 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
1078 if (!bh) { 1078 if (!bh) {
1079 ext3_error(inode->i_sb, __FUNCTION__, 1079 ext3_error(inode->i_sb, __FUNCTION__,
1080 "inode %ld: block "E3FSBLK" read error", inode->i_ino, 1080 "inode %lu: block "E3FSBLK" read error", inode->i_ino,
1081 EXT3_I(inode)->i_file_acl); 1081 EXT3_I(inode)->i_file_acl);
1082 goto cleanup; 1082 goto cleanup;
1083 } 1083 }
1084 if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || 1084 if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
1085 BHDR(bh)->h_blocks != cpu_to_le32(1)) { 1085 BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1086 ext3_error(inode->i_sb, __FUNCTION__, 1086 ext3_error(inode->i_sb, __FUNCTION__,
1087 "inode %ld: bad block "E3FSBLK, inode->i_ino, 1087 "inode %lu: bad block "E3FSBLK, inode->i_ino,
1088 EXT3_I(inode)->i_file_acl); 1088 EXT3_I(inode)->i_file_acl);
1089 goto cleanup; 1089 goto cleanup;
1090 } 1090 }
@@ -1211,7 +1211,7 @@ again:
1211 bh = sb_bread(inode->i_sb, ce->e_block); 1211 bh = sb_bread(inode->i_sb, ce->e_block);
1212 if (!bh) { 1212 if (!bh) {
1213 ext3_error(inode->i_sb, __FUNCTION__, 1213 ext3_error(inode->i_sb, __FUNCTION__,
1214 "inode %ld: block %lu read error", 1214 "inode %lu: block %lu read error",
1215 inode->i_ino, (unsigned long) ce->e_block); 1215 inode->i_ino, (unsigned long) ce->e_block);
1216 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= 1216 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1217 EXT3_XATTR_REFCOUNT_MAX) { 1217 EXT3_XATTR_REFCOUNT_MAX) {
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 97b967b84fc6..82cc4f59e3ba 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -58,8 +58,7 @@ int __init fat_cache_init(void)
58 58
59void fat_cache_destroy(void) 59void fat_cache_destroy(void)
60{ 60{
61 if (kmem_cache_destroy(fat_cache_cachep)) 61 kmem_cache_destroy(fat_cache_cachep);
62 printk(KERN_INFO "fat_cache: not all structures were freed\n");
63} 62}
64 63
65static inline struct fat_cache *fat_cache_alloc(struct inode *inode) 64static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 31b7174176ba..ab96ae823753 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -50,14 +50,14 @@ static int fat_add_cluster(struct inode *inode)
50 return err; 50 return err;
51} 51}
52 52
53static int __fat_get_blocks(struct inode *inode, sector_t iblock, 53static inline int __fat_get_block(struct inode *inode, sector_t iblock,
54 unsigned long *max_blocks, 54 unsigned long *max_blocks,
55 struct buffer_head *bh_result, int create) 55 struct buffer_head *bh_result, int create)
56{ 56{
57 struct super_block *sb = inode->i_sb; 57 struct super_block *sb = inode->i_sb;
58 struct msdos_sb_info *sbi = MSDOS_SB(sb); 58 struct msdos_sb_info *sbi = MSDOS_SB(sb);
59 sector_t phys;
60 unsigned long mapped_blocks; 59 unsigned long mapped_blocks;
60 sector_t phys;
61 int err, offset; 61 int err, offset;
62 62
63 err = fat_bmap(inode, iblock, &phys, &mapped_blocks); 63 err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
@@ -73,7 +73,7 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
73 73
74 if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) { 74 if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
75 fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)", 75 fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)",
76 MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private); 76 MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
77 return -EIO; 77 return -EIO;
78 } 78 }
79 79
@@ -93,34 +93,29 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
93 err = fat_bmap(inode, iblock, &phys, &mapped_blocks); 93 err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
94 if (err) 94 if (err)
95 return err; 95 return err;
96
96 BUG_ON(!phys); 97 BUG_ON(!phys);
97 BUG_ON(*max_blocks != mapped_blocks); 98 BUG_ON(*max_blocks != mapped_blocks);
98 set_buffer_new(bh_result); 99 set_buffer_new(bh_result);
99 map_bh(bh_result, sb, phys); 100 map_bh(bh_result, sb, phys);
101
100 return 0; 102 return 0;
101} 103}
102 104
103static int fat_get_blocks(struct inode *inode, sector_t iblock, 105static int fat_get_block(struct inode *inode, sector_t iblock,
104 struct buffer_head *bh_result, int create) 106 struct buffer_head *bh_result, int create)
105{ 107{
106 struct super_block *sb = inode->i_sb; 108 struct super_block *sb = inode->i_sb;
107 int err;
108 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 109 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
110 int err;
109 111
110 err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); 112 err = __fat_get_block(inode, iblock, &max_blocks, bh_result, create);
111 if (err) 113 if (err)
112 return err; 114 return err;
113 bh_result->b_size = max_blocks << sb->s_blocksize_bits; 115 bh_result->b_size = max_blocks << sb->s_blocksize_bits;
114 return 0; 116 return 0;
115} 117}
116 118
117static int fat_get_block(struct inode *inode, sector_t iblock,
118 struct buffer_head *bh_result, int create)
119{
120 unsigned long max_blocks = 1;
121 return __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
122}
123
124static int fat_writepage(struct page *page, struct writeback_control *wbc) 119static int fat_writepage(struct page *page, struct writeback_control *wbc)
125{ 120{
126 return block_write_full_page(page, fat_get_block, wbc); 121 return block_write_full_page(page, fat_get_block, wbc);
@@ -188,7 +183,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
188 * condition of fat_get_block() and ->truncate(). 183 * condition of fat_get_block() and ->truncate().
189 */ 184 */
190 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 185 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
191 offset, nr_segs, fat_get_blocks, NULL); 186 offset, nr_segs, fat_get_block, NULL);
192} 187}
193 188
194static sector_t _fat_bmap(struct address_space *mapping, sector_t block) 189static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
@@ -375,8 +370,6 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
375 inode->i_flags |= S_IMMUTABLE; 370 inode->i_flags |= S_IMMUTABLE;
376 } 371 }
377 MSDOS_I(inode)->i_attrs = de->attr & ATTR_UNUSED; 372 MSDOS_I(inode)->i_attrs = de->attr & ATTR_UNUSED;
378 /* this is as close to the truth as we can get ... */
379 inode->i_blksize = sbi->cluster_size;
380 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) 373 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
381 & ~((loff_t)sbi->cluster_size - 1)) >> 9; 374 & ~((loff_t)sbi->cluster_size - 1)) >> 9;
382 inode->i_mtime.tv_sec = 375 inode->i_mtime.tv_sec =
@@ -528,8 +521,7 @@ static int __init fat_init_inodecache(void)
528 521
529static void __exit fat_destroy_inodecache(void) 522static void __exit fat_destroy_inodecache(void)
530{ 523{
531 if (kmem_cache_destroy(fat_inode_cachep)) 524 kmem_cache_destroy(fat_inode_cachep);
532 printk(KERN_INFO "fat_inode_cache: not all structures were freed\n");
533} 525}
534 526
535static int fat_remount(struct super_block *sb, int *flags, char *data) 527static int fat_remount(struct super_block *sb, int *flags, char *data)
@@ -1137,7 +1129,6 @@ static int fat_read_root(struct inode *inode)
1137 MSDOS_I(inode)->i_start = 0; 1129 MSDOS_I(inode)->i_start = 0;
1138 inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry); 1130 inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry);
1139 } 1131 }
1140 inode->i_blksize = sbi->cluster_size;
1141 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) 1132 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
1142 & ~((loff_t)sbi->cluster_size - 1)) >> 9; 1133 & ~((loff_t)sbi->cluster_size - 1)) >> 9;
1143 MSDOS_I(inode)->i_logstart = 0; 1134 MSDOS_I(inode)->i_logstart = 0;
@@ -1168,11 +1159,10 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
1168 long error; 1159 long error;
1169 char buf[50]; 1160 char buf[50];
1170 1161
1171 sbi = kmalloc(sizeof(struct msdos_sb_info), GFP_KERNEL); 1162 sbi = kzalloc(sizeof(struct msdos_sb_info), GFP_KERNEL);
1172 if (!sbi) 1163 if (!sbi)
1173 return -ENOMEM; 1164 return -ENOMEM;
1174 sb->s_fs_info = sbi; 1165 sb->s_fs_info = sbi;
1175 memset(sbi, 0, sizeof(struct msdos_sb_info));
1176 1166
1177 sb->s_flags |= MS_NODIRATIME; 1167 sb->s_flags |= MS_NODIRATIME;
1178 sb->s_magic = MSDOS_SUPER_MAGIC; 1168 sb->s_magic = MSDOS_SUPER_MAGIC;
diff --git a/fs/file.c b/fs/file.c
index b3c6b82e6a9d..8d3bfca7714b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -281,10 +281,8 @@ static struct fdtable *alloc_fdtable(int nr)
281out2: 281out2:
282 nfds = fdt->max_fdset; 282 nfds = fdt->max_fdset;
283out: 283out:
284 if (new_openset) 284 free_fdset(new_openset, nfds);
285 free_fdset(new_openset, nfds); 285 free_fdset(new_execset, nfds);
286 if (new_execset)
287 free_fdset(new_execset, nfds);
288 kfree(fdt); 286 kfree(fdt);
289 return NULL; 287 return NULL;
290} 288}
diff --git a/fs/file_table.c b/fs/file_table.c
index 0131ba06e1ee..bc35a40417d7 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -169,7 +169,7 @@ void fastcall __fput(struct file *file)
169 if (file->f_op && file->f_op->release) 169 if (file->f_op && file->f_op->release)
170 file->f_op->release(inode, file); 170 file->f_op->release(inode, file);
171 security_file_free(file); 171 security_file_free(file);
172 if (unlikely(inode->i_cdev != NULL)) 172 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
173 cdev_put(inode->i_cdev); 173 cdev_put(inode->i_cdev);
174 fops_put(file->f_op); 174 fops_put(file->f_op);
175 if (file->f_mode & FMODE_WRITE) 175 if (file->f_mode & FMODE_WRITE)
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index d35979a58743..c8a92652612a 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -252,7 +252,7 @@ enum {
252 * Get filesystem private data from VFS inode. 252 * Get filesystem private data from VFS inode.
253 */ 253 */
254#define VXFS_INO(ip) \ 254#define VXFS_INO(ip) \
255 ((struct vxfs_inode_info *)(ip)->u.generic_ip) 255 ((struct vxfs_inode_info *)(ip)->i_private)
256 256
257/* 257/*
258 * Get filesystem private data from VFS superblock. 258 * Get filesystem private data from VFS superblock.
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ca6a39714771..4786d51ad3bd 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -239,11 +239,10 @@ vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip)
239 ip->i_ctime.tv_nsec = 0; 239 ip->i_ctime.tv_nsec = 0;
240 ip->i_mtime.tv_nsec = 0; 240 ip->i_mtime.tv_nsec = 0;
241 241
242 ip->i_blksize = PAGE_SIZE;
243 ip->i_blocks = vip->vii_blocks; 242 ip->i_blocks = vip->vii_blocks;
244 ip->i_generation = vip->vii_gen; 243 ip->i_generation = vip->vii_gen;
245 244
246 ip->u.generic_ip = (void *)vip; 245 ip->i_private = vip;
247 246
248} 247}
249 248
@@ -338,5 +337,5 @@ vxfs_read_inode(struct inode *ip)
338void 337void
339vxfs_clear_inode(struct inode *ip) 338vxfs_clear_inode(struct inode *ip)
340{ 339{
341 kmem_cache_free(vxfs_inode_cachep, ip->u.generic_ip); 340 kmem_cache_free(vxfs_inode_cachep, ip->i_private);
342} 341}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 46fe60b2da23..79ec1f23d4d2 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -23,7 +23,7 @@ static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
23{ 23{
24 struct fuse_conn *fc; 24 struct fuse_conn *fc;
25 mutex_lock(&fuse_mutex); 25 mutex_lock(&fuse_mutex);
26 fc = file->f_dentry->d_inode->u.generic_ip; 26 fc = file->f_dentry->d_inode->i_private;
27 if (fc) 27 if (fc)
28 fc = fuse_conn_get(fc); 28 fc = fuse_conn_get(fc);
29 mutex_unlock(&fuse_mutex); 29 mutex_unlock(&fuse_mutex);
@@ -98,7 +98,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
98 inode->i_op = iop; 98 inode->i_op = iop;
99 inode->i_fop = fop; 99 inode->i_fop = fop;
100 inode->i_nlink = nlink; 100 inode->i_nlink = nlink;
101 inode->u.generic_ip = fc; 101 inode->i_private = fc;
102 d_add(dentry, inode); 102 d_add(dentry, inode);
103 return dentry; 103 return dentry;
104} 104}
@@ -150,7 +150,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
150 150
151 for (i = fc->ctl_ndents - 1; i >= 0; i--) { 151 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
152 struct dentry *dentry = fc->ctl_dentry[i]; 152 struct dentry *dentry = fc->ctl_dentry[i];
153 dentry->d_inode->u.generic_ip = NULL; 153 dentry->d_inode->i_private = NULL;
154 d_drop(dentry); 154 d_drop(dentry);
155 dput(dentry); 155 dput(dentry);
156 } 156 }
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 7d25092262ae..cb7cadb0b790 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -118,7 +118,6 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
118 inode->i_uid = attr->uid; 118 inode->i_uid = attr->uid;
119 inode->i_gid = attr->gid; 119 inode->i_gid = attr->gid;
120 i_size_write(inode, attr->size); 120 i_size_write(inode, attr->size);
121 inode->i_blksize = PAGE_CACHE_SIZE;
122 inode->i_blocks = attr->blocks; 121 inode->i_blocks = attr->blocks;
123 inode->i_atime.tv_sec = attr->atime; 122 inode->i_atime.tv_sec = attr->atime;
124 inode->i_atime.tv_nsec = attr->atimensec; 123 inode->i_atime.tv_nsec = attr->atimensec;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 13231dd5ce66..0d200068d0af 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -249,10 +249,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
249 sb = tree->inode->i_sb; 249 sb = tree->inode->i_sb;
250 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * 250 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
251 sizeof(struct page *); 251 sizeof(struct page *);
252 node = kmalloc(size, GFP_KERNEL); 252 node = kzalloc(size, GFP_KERNEL);
253 if (!node) 253 if (!node)
254 return NULL; 254 return NULL;
255 memset(node, 0, size);
256 node->tree = tree; 255 node->tree = tree;
257 node->this = cnid; 256 node->this = cnid;
258 set_bit(HFS_BNODE_NEW, &node->flags); 257 set_bit(HFS_BNODE_NEW, &node->flags);
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 400357994319..5fd0ed71f923 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -21,10 +21,9 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
21 struct page *page; 21 struct page *page;
22 unsigned int size; 22 unsigned int size;
23 23
24 tree = kmalloc(sizeof(*tree), GFP_KERNEL); 24 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
25 if (!tree) 25 if (!tree)
26 return NULL; 26 return NULL;
27 memset(tree, 0, sizeof(*tree));
28 27
29 init_MUTEX(&tree->tree_lock); 28 init_MUTEX(&tree->tree_lock);
30 spin_lock_init(&tree->hash_lock); 29 spin_lock_init(&tree->hash_lock);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 315cf44a90b2..d05641c35fc9 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -154,7 +154,6 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
154 inode->i_gid = current->fsgid; 154 inode->i_gid = current->fsgid;
155 inode->i_nlink = 1; 155 inode->i_nlink = 1;
156 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 156 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
157 inode->i_blksize = HFS_SB(sb)->alloc_blksz;
158 HFS_I(inode)->flags = 0; 157 HFS_I(inode)->flags = 0;
159 HFS_I(inode)->rsrc_inode = NULL; 158 HFS_I(inode)->rsrc_inode = NULL;
160 HFS_I(inode)->fs_blocks = 0; 159 HFS_I(inode)->fs_blocks = 0;
@@ -284,7 +283,6 @@ static int hfs_read_inode(struct inode *inode, void *data)
284 inode->i_uid = hsb->s_uid; 283 inode->i_uid = hsb->s_uid;
285 inode->i_gid = hsb->s_gid; 284 inode->i_gid = hsb->s_gid;
286 inode->i_nlink = 1; 285 inode->i_nlink = 1;
287 inode->i_blksize = HFS_SB(inode->i_sb)->alloc_blksz;
288 286
289 if (idata->key) 287 if (idata->key)
290 HFS_I(inode)->cat_key = *idata->key; 288 HFS_I(inode)->cat_key = *idata->key;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 34937ee83ab1..d43b4fcc8ad3 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -356,11 +356,10 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
356 struct inode *root_inode; 356 struct inode *root_inode;
357 int res; 357 int res;
358 358
359 sbi = kmalloc(sizeof(struct hfs_sb_info), GFP_KERNEL); 359 sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
360 if (!sbi) 360 if (!sbi)
361 return -ENOMEM; 361 return -ENOMEM;
362 sb->s_fs_info = sbi; 362 sb->s_fs_info = sbi;
363 memset(sbi, 0, sizeof(struct hfs_sb_info));
364 INIT_HLIST_HEAD(&sbi->rsrc_inodes); 363 INIT_HLIST_HEAD(&sbi->rsrc_inodes);
365 364
366 res = -EINVAL; 365 res = -EINVAL;
@@ -455,8 +454,7 @@ static int __init init_hfs_fs(void)
455static void __exit exit_hfs_fs(void) 454static void __exit exit_hfs_fs(void)
456{ 455{
457 unregister_filesystem(&hfs_fs_type); 456 unregister_filesystem(&hfs_fs_type);
458 if (kmem_cache_destroy(hfs_inode_cachep)) 457 kmem_cache_destroy(hfs_inode_cachep);
459 printk(KERN_ERR "hfs_inode_cache: not all structures were freed\n");
460} 458}
461 459
462module_init(init_hfs_fs) 460module_init(init_hfs_fs)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 77bf434da679..29da6574ba77 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -409,10 +409,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
409 sb = tree->inode->i_sb; 409 sb = tree->inode->i_sb;
410 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * 410 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
411 sizeof(struct page *); 411 sizeof(struct page *);
412 node = kmalloc(size, GFP_KERNEL); 412 node = kzalloc(size, GFP_KERNEL);
413 if (!node) 413 if (!node)
414 return NULL; 414 return NULL;
415 memset(node, 0, size);
416 node->tree = tree; 415 node->tree = tree;
417 node->this = cnid; 416 node->this = cnid;
418 set_bit(HFS_BNODE_NEW, &node->flags); 417 set_bit(HFS_BNODE_NEW, &node->flags);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index cfc852fdd1b5..a9b9e872e29a 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -24,10 +24,9 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
24 struct page *page; 24 struct page *page;
25 unsigned int size; 25 unsigned int size;
26 26
27 tree = kmalloc(sizeof(*tree), GFP_KERNEL); 27 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
28 if (!tree) 28 if (!tree)
29 return NULL; 29 return NULL;
30 memset(tree, 0, sizeof(*tree));
31 30
32 init_MUTEX(&tree->tree_lock); 31 init_MUTEX(&tree->tree_lock);
33 spin_lock_init(&tree->hash_lock); 32 spin_lock_init(&tree->hash_lock);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 924ecdef8091..0eb1a6092668 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -304,7 +304,6 @@ struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
304 inode->i_gid = current->fsgid; 304 inode->i_gid = current->fsgid;
305 inode->i_nlink = 1; 305 inode->i_nlink = 1;
306 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 306 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
307 inode->i_blksize = HFSPLUS_SB(sb).alloc_blksz;
308 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 307 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
309 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 308 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
310 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 309 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
@@ -407,7 +406,6 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
407 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); 406 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
408 407
409 HFSPLUS_I(inode).dev = 0; 408 HFSPLUS_I(inode).dev = 0;
410 inode->i_blksize = HFSPLUS_SB(inode->i_sb).alloc_blksz;
411 if (type == HFSPLUS_FOLDER) { 409 if (type == HFSPLUS_FOLDER) {
412 struct hfsplus_cat_folder *folder = &entry.folder; 410 struct hfsplus_cat_folder *folder = &entry.folder;
413 411
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d279d5924f28..194eede52fa4 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -493,8 +493,7 @@ static int __init init_hfsplus_fs(void)
493static void __exit exit_hfsplus_fs(void) 493static void __exit exit_hfsplus_fs(void)
494{ 494{
495 unregister_filesystem(&hfsplus_fs_type); 495 unregister_filesystem(&hfsplus_fs_type);
496 if (kmem_cache_destroy(hfsplus_inode_cachep)) 496 kmem_cache_destroy(hfsplus_inode_cachep);
497 printk(KERN_ERR "hfsplus_inode_cache: not all structures were freed\n");
498} 497}
499 498
500module_init(init_hfsplus_fs) 499module_init(init_hfsplus_fs)
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index b82e3d9c8790..322e876c35ed 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -156,7 +156,6 @@ static int read_name(struct inode *ino, char *name)
156 ino->i_mode = i_mode; 156 ino->i_mode = i_mode;
157 ino->i_nlink = i_nlink; 157 ino->i_nlink = i_nlink;
158 ino->i_size = i_size; 158 ino->i_size = i_size;
159 ino->i_blksize = i_blksize;
160 ino->i_blocks = i_blocks; 159 ino->i_blocks = i_blocks;
161 return(0); 160 return(0);
162} 161}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 2807aa833e62..b52b7381d10f 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -76,7 +76,7 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
76 return NULL; 76 return NULL;
77 } 77 }
78 78
79 qbh->data = data = (char *)kmalloc(2048, GFP_NOFS); 79 qbh->data = data = kmalloc(2048, GFP_NOFS);
80 if (!data) { 80 if (!data) {
81 printk("HPFS: hpfs_map_4sectors: out of memory\n"); 81 printk("HPFS: hpfs_map_4sectors: out of memory\n");
82 goto bail; 82 goto bail;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 56f2c338c4d9..bcf6ee36e065 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -17,7 +17,6 @@ void hpfs_init_inode(struct inode *i)
17 i->i_gid = hpfs_sb(sb)->sb_gid; 17 i->i_gid = hpfs_sb(sb)->sb_gid;
18 i->i_mode = hpfs_sb(sb)->sb_mode; 18 i->i_mode = hpfs_sb(sb)->sb_mode;
19 hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv; 19 hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
20 i->i_blksize = 512;
21 i->i_size = -1; 20 i->i_size = -1;
22 i->i_blocks = -1; 21 i->i_blocks = -1;
23 22
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 8fe51c343786..450b5e0b4785 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -203,8 +203,7 @@ static int init_inodecache(void)
203 203
204static void destroy_inodecache(void) 204static void destroy_inodecache(void)
205{ 205{
206 if (kmem_cache_destroy(hpfs_inode_cachep)) 206 kmem_cache_destroy(hpfs_inode_cachep);
207 printk(KERN_INFO "hpfs_inode_cache: not all structures were freed\n");
208} 207}
209 208
210/* 209/*
@@ -462,11 +461,10 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
462 461
463 int o; 462 int o;
464 463
465 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); 464 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
466 if (!sbi) 465 if (!sbi)
467 return -ENOMEM; 466 return -ENOMEM;
468 s->s_fs_info = sbi; 467 s->s_fs_info = sbi;
469 memset(sbi, 0, sizeof(*sbi));
470 468
471 sbi->sb_bmp_dir = NULL; 469 sbi->sb_bmp_dir = NULL;
472 sbi->sb_cp_table = NULL; 470 sbi->sb_cp_table = NULL;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 3a9bdf58166f..dcb6d2e988b8 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -152,7 +152,6 @@ static void hppfs_read_inode(struct inode *ino)
152 ino->i_mode = proc_ino->i_mode; 152 ino->i_mode = proc_ino->i_mode;
153 ino->i_nlink = proc_ino->i_nlink; 153 ino->i_nlink = proc_ino->i_nlink;
154 ino->i_size = proc_ino->i_size; 154 ino->i_size = proc_ino->i_size;
155 ino->i_blksize = proc_ino->i_blksize;
156 ino->i_blocks = proc_ino->i_blocks; 155 ino->i_blocks = proc_ino->i_blocks;
157} 156}
158 157
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index c3920c96dadf..e025a31b4c64 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -357,7 +357,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
357 inode->i_mode = mode; 357 inode->i_mode = mode;
358 inode->i_uid = uid; 358 inode->i_uid = uid;
359 inode->i_gid = gid; 359 inode->i_gid = gid;
360 inode->i_blksize = HPAGE_SIZE;
361 inode->i_blocks = 0; 360 inode->i_blocks = 0;
362 inode->i_mapping->a_ops = &hugetlbfs_aops; 361 inode->i_mapping->a_ops = &hugetlbfs_aops;
363 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 362 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
diff --git a/fs/inode.c b/fs/inode.c
index 0bf9f0444a96..f5c04dd9ae8a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -163,7 +163,7 @@ static struct inode *alloc_inode(struct super_block *sb)
163 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 163 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
164 mapping->backing_dev_info = bdi; 164 mapping->backing_dev_info = bdi;
165 } 165 }
166 memset(&inode->u, 0, sizeof(inode->u)); 166 inode->i_private = 0;
167 inode->i_mapping = mapping; 167 inode->i_mapping = mapping;
168 } 168 }
169 return inode; 169 return inode;
@@ -254,9 +254,9 @@ void clear_inode(struct inode *inode)
254 DQUOT_DROP(inode); 254 DQUOT_DROP(inode);
255 if (inode->i_sb && inode->i_sb->s_op->clear_inode) 255 if (inode->i_sb && inode->i_sb->s_op->clear_inode)
256 inode->i_sb->s_op->clear_inode(inode); 256 inode->i_sb->s_op->clear_inode(inode);
257 if (inode->i_bdev) 257 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
258 bd_forget(inode); 258 bd_forget(inode);
259 if (inode->i_cdev) 259 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
260 cd_forget(inode); 260 cd_forget(inode);
261 inode->i_state = I_CLEAR; 261 inode->i_state = I_CLEAR;
262} 262}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 14391361c886..4527692f432b 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -96,9 +96,7 @@ static int init_inodecache(void)
96 96
97static void destroy_inodecache(void) 97static void destroy_inodecache(void)
98{ 98{
99 if (kmem_cache_destroy(isofs_inode_cachep)) 99 kmem_cache_destroy(isofs_inode_cachep);
100 printk(KERN_INFO "iso_inode_cache: not all structures were "
101 "freed\n");
102} 100}
103 101
104static int isofs_remount(struct super_block *sb, int *flags, char *data) 102static int isofs_remount(struct super_block *sb, int *flags, char *data)
@@ -557,11 +555,10 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
557 struct iso9660_options opt; 555 struct iso9660_options opt;
558 struct isofs_sb_info * sbi; 556 struct isofs_sb_info * sbi;
559 557
560 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); 558 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
561 if (!sbi) 559 if (!sbi)
562 return -ENOMEM; 560 return -ENOMEM;
563 s->s_fs_info = sbi; 561 s->s_fs_info = sbi;
564 memset(sbi, 0, sizeof(*sbi));
565 562
566 if (!parse_options((char *)data, &opt)) 563 if (!parse_options((char *)data, &opt))
567 goto out_freesbi; 564 goto out_freesbi;
@@ -1238,7 +1235,7 @@ static void isofs_read_inode(struct inode *inode)
1238 } 1235 }
1239 inode->i_uid = sbi->s_uid; 1236 inode->i_uid = sbi->s_uid;
1240 inode->i_gid = sbi->s_gid; 1237 inode->i_gid = sbi->s_gid;
1241 inode->i_blocks = inode->i_blksize = 0; 1238 inode->i_blocks = 0;
1242 1239
1243 ei->i_format_parm[0] = 0; 1240 ei->i_format_parm[0] = 0;
1244 ei->i_format_parm[1] = 0; 1241 ei->i_format_parm[1] = 0;
@@ -1294,7 +1291,6 @@ static void isofs_read_inode(struct inode *inode)
1294 isonum_711 (de->ext_attr_length)); 1291 isonum_711 (de->ext_attr_length));
1295 1292
1296 /* Set the number of blocks for stat() - should be done before RR */ 1293 /* Set the number of blocks for stat() - should be done before RR */
1297 inode->i_blksize = PAGE_CACHE_SIZE; /* For stat() only */
1298 inode->i_blocks = (inode->i_size + 511) >> 9; 1294 inode->i_blocks = (inode->i_size + 511) >> 9;
1299 1295
1300 /* 1296 /*
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 47678a26c13b..0208cc7ac5d0 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/checkpoint.c 2 * linux/fs/checkpoint.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
6 * Copyright 1999 Red Hat Software --- All Rights Reserved 6 * Copyright 1999 Red Hat Software --- All Rights Reserved
@@ -9,8 +9,8 @@
9 * the terms of the GNU General Public License, version 2, or at your 9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Checkpoint routines for the generic filesystem journaling code. 12 * Checkpoint routines for the generic filesystem journaling code.
13 * Part of the ext2fs journaling system. 13 * Part of the ext2fs journaling system.
14 * 14 *
15 * Checkpointing is the process of ensuring that a section of the log is 15 * Checkpointing is the process of ensuring that a section of the log is
16 * committed fully to disk, so that that portion of the log can be 16 * committed fully to disk, so that that portion of the log can be
@@ -145,6 +145,7 @@ void __log_wait_for_space(journal_t *journal)
145 * jbd_unlock_bh_state(). 145 * jbd_unlock_bh_state().
146 */ 146 */
147static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh) 147static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
148 __releases(journal->j_list_lock)
148{ 149{
149 get_bh(bh); 150 get_bh(bh);
150 spin_unlock(&journal->j_list_lock); 151 spin_unlock(&journal->j_list_lock);
@@ -225,7 +226,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
225 * Try to flush one buffer from the checkpoint list to disk. 226 * Try to flush one buffer from the checkpoint list to disk.
226 * 227 *
227 * Return 1 if something happened which requires us to abort the current 228 * Return 1 if something happened which requires us to abort the current
228 * scan of the checkpoint list. 229 * scan of the checkpoint list.
229 * 230 *
230 * Called with j_list_lock held and drops it if 1 is returned 231 * Called with j_list_lock held and drops it if 1 is returned
231 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 232 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
@@ -269,7 +270,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
269 * possibly block, while still holding the journal lock. 270 * possibly block, while still holding the journal lock.
270 * We cannot afford to let the transaction logic start 271 * We cannot afford to let the transaction logic start
271 * messing around with this buffer before we write it to 272 * messing around with this buffer before we write it to
272 * disk, as that would break recoverability. 273 * disk, as that would break recoverability.
273 */ 274 */
274 BUFFER_TRACE(bh, "queue"); 275 BUFFER_TRACE(bh, "queue");
275 get_bh(bh); 276 get_bh(bh);
@@ -292,7 +293,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
292 * Perform an actual checkpoint. We take the first transaction on the 293 * Perform an actual checkpoint. We take the first transaction on the
293 * list of transactions to be checkpointed and send all its buffers 294 * list of transactions to be checkpointed and send all its buffers
294 * to disk. We submit larger chunks of data at once. 295 * to disk. We submit larger chunks of data at once.
295 * 296 *
296 * The journal should be locked before calling this function. 297 * The journal should be locked before calling this function.
297 */ 298 */
298int log_do_checkpoint(journal_t *journal) 299int log_do_checkpoint(journal_t *journal)
@@ -303,10 +304,10 @@ int log_do_checkpoint(journal_t *journal)
303 304
304 jbd_debug(1, "Start checkpoint\n"); 305 jbd_debug(1, "Start checkpoint\n");
305 306
306 /* 307 /*
307 * First thing: if there are any transactions in the log which 308 * First thing: if there are any transactions in the log which
308 * don't need checkpointing, just eliminate them from the 309 * don't need checkpointing, just eliminate them from the
309 * journal straight away. 310 * journal straight away.
310 */ 311 */
311 result = cleanup_journal_tail(journal); 312 result = cleanup_journal_tail(journal);
312 jbd_debug(1, "cleanup_journal_tail returned %d\n", result); 313 jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
@@ -384,9 +385,9 @@ out:
384 * we have already got rid of any since the last update of the log tail 385 * we have already got rid of any since the last update of the log tail
385 * in the journal superblock. If so, we can instantly roll the 386 * in the journal superblock. If so, we can instantly roll the
386 * superblock forward to remove those transactions from the log. 387 * superblock forward to remove those transactions from the log.
387 * 388 *
388 * Return <0 on error, 0 on success, 1 if there was nothing to clean up. 389 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
389 * 390 *
390 * Called with the journal lock held. 391 * Called with the journal lock held.
391 * 392 *
392 * This is the only part of the journaling code which really needs to be 393 * This is the only part of the journaling code which really needs to be
@@ -403,8 +404,8 @@ int cleanup_journal_tail(journal_t *journal)
403 unsigned long blocknr, freed; 404 unsigned long blocknr, freed;
404 405
405 /* OK, work out the oldest transaction remaining in the log, and 406 /* OK, work out the oldest transaction remaining in the log, and
406 * the log block it starts at. 407 * the log block it starts at.
407 * 408 *
408 * If the log is now empty, we need to work out which is the 409 * If the log is now empty, we need to work out which is the
409 * next transaction ID we will write, and where it will 410 * next transaction ID we will write, and where it will
410 * start. */ 411 * start. */
@@ -479,7 +480,7 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
479 if (!jh) 480 if (!jh)
480 return 0; 481 return 0;
481 482
482 last_jh = jh->b_cpprev; 483 last_jh = jh->b_cpprev;
483 do { 484 do {
484 jh = next_jh; 485 jh = next_jh;
485 next_jh = jh->b_cpnext; 486 next_jh = jh->b_cpnext;
@@ -557,7 +558,7 @@ out:
557 return ret; 558 return ret;
558} 559}
559 560
560/* 561/*
561 * journal_remove_checkpoint: called after a buffer has been committed 562 * journal_remove_checkpoint: called after a buffer has been committed
562 * to disk (either by being write-back flushed to disk, or being 563 * to disk (either by being write-back flushed to disk, or being
563 * committed to the log). 564 * committed to the log).
@@ -635,7 +636,7 @@ out:
635 * Called with the journal locked. 636 * Called with the journal locked.
636 * Called with j_list_lock held. 637 * Called with j_list_lock held.
637 */ 638 */
638void __journal_insert_checkpoint(struct journal_head *jh, 639void __journal_insert_checkpoint(struct journal_head *jh,
639 transaction_t *transaction) 640 transaction_t *transaction)
640{ 641{
641 JBUFFER_TRACE(jh, "entry"); 642 JBUFFER_TRACE(jh, "entry");
@@ -657,7 +658,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
657 658
658/* 659/*
659 * We've finished with this transaction structure: adios... 660 * We've finished with this transaction structure: adios...
660 * 661 *
661 * The transaction must have no links except for the checkpoint by this 662 * The transaction must have no links except for the checkpoint by this
662 * point. 663 * point.
663 * 664 *
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 42da60784311..32a8caf0c41e 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -160,6 +160,117 @@ static int journal_write_commit_record(journal_t *journal,
160 return (ret == -EIO); 160 return (ret == -EIO);
161} 161}
162 162
163static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
164{
165 int i;
166
167 for (i = 0; i < bufs; i++) {
168 wbuf[i]->b_end_io = end_buffer_write_sync;
169 /* We use-up our safety reference in submit_bh() */
170 submit_bh(WRITE, wbuf[i]);
171 }
172}
173
174/*
175 * Submit all the data buffers to disk
176 */
177static void journal_submit_data_buffers(journal_t *journal,
178 transaction_t *commit_transaction)
179{
180 struct journal_head *jh;
181 struct buffer_head *bh;
182 int locked;
183 int bufs = 0;
184 struct buffer_head **wbuf = journal->j_wbuf;
185
186 /*
187 * Whenever we unlock the journal and sleep, things can get added
188 * onto ->t_sync_datalist, so we have to keep looping back to
189 * write_out_data until we *know* that the list is empty.
190 *
191 * Cleanup any flushed data buffers from the data list. Even in
192 * abort mode, we want to flush this out as soon as possible.
193 */
194write_out_data:
195 cond_resched();
196 spin_lock(&journal->j_list_lock);
197
198 while (commit_transaction->t_sync_datalist) {
199 jh = commit_transaction->t_sync_datalist;
200 bh = jh2bh(jh);
201 locked = 0;
202
203 /* Get reference just to make sure buffer does not disappear
204 * when we are forced to drop various locks */
205 get_bh(bh);
206 /* If the buffer is dirty, we need to submit IO and hence
207 * we need the buffer lock. We try to lock the buffer without
208 * blocking. If we fail, we need to drop j_list_lock and do
209 * blocking lock_buffer().
210 */
211 if (buffer_dirty(bh)) {
212 if (test_set_buffer_locked(bh)) {
213 BUFFER_TRACE(bh, "needs blocking lock");
214 spin_unlock(&journal->j_list_lock);
215 /* Write out all data to prevent deadlocks */
216 journal_do_submit_data(wbuf, bufs);
217 bufs = 0;
218 lock_buffer(bh);
219 spin_lock(&journal->j_list_lock);
220 }
221 locked = 1;
222 }
223 /* We have to get bh_state lock. Again out of order, sigh. */
224 if (!inverted_lock(journal, bh)) {
225 jbd_lock_bh_state(bh);
226 spin_lock(&journal->j_list_lock);
227 }
228 /* Someone already cleaned up the buffer? */
229 if (!buffer_jbd(bh)
230 || jh->b_transaction != commit_transaction
231 || jh->b_jlist != BJ_SyncData) {
232 jbd_unlock_bh_state(bh);
233 if (locked)
234 unlock_buffer(bh);
235 BUFFER_TRACE(bh, "already cleaned up");
236 put_bh(bh);
237 continue;
238 }
239 if (locked && test_clear_buffer_dirty(bh)) {
240 BUFFER_TRACE(bh, "needs writeout, adding to array");
241 wbuf[bufs++] = bh;
242 __journal_file_buffer(jh, commit_transaction,
243 BJ_Locked);
244 jbd_unlock_bh_state(bh);
245 if (bufs == journal->j_wbufsize) {
246 spin_unlock(&journal->j_list_lock);
247 journal_do_submit_data(wbuf, bufs);
248 bufs = 0;
249 goto write_out_data;
250 }
251 }
252 else {
253 BUFFER_TRACE(bh, "writeout complete: unfile");
254 __journal_unfile_buffer(jh);
255 jbd_unlock_bh_state(bh);
256 if (locked)
257 unlock_buffer(bh);
258 journal_remove_journal_head(bh);
259 /* Once for our safety reference, once for
260 * journal_remove_journal_head() */
261 put_bh(bh);
262 put_bh(bh);
263 }
264
265 if (lock_need_resched(&journal->j_list_lock)) {
266 spin_unlock(&journal->j_list_lock);
267 goto write_out_data;
268 }
269 }
270 spin_unlock(&journal->j_list_lock);
271 journal_do_submit_data(wbuf, bufs);
272}
273
163/* 274/*
164 * journal_commit_transaction 275 * journal_commit_transaction
165 * 276 *
@@ -313,80 +424,13 @@ void journal_commit_transaction(journal_t *journal)
313 * Now start flushing things to disk, in the order they appear 424 * Now start flushing things to disk, in the order they appear
314 * on the transaction lists. Data blocks go first. 425 * on the transaction lists. Data blocks go first.
315 */ 426 */
316
317 err = 0; 427 err = 0;
318 /* 428 journal_submit_data_buffers(journal, commit_transaction);
319 * Whenever we unlock the journal and sleep, things can get added
320 * onto ->t_sync_datalist, so we have to keep looping back to
321 * write_out_data until we *know* that the list is empty.
322 */
323 bufs = 0;
324 /*
325 * Cleanup any flushed data buffers from the data list. Even in
326 * abort mode, we want to flush this out as soon as possible.
327 */
328write_out_data:
329 cond_resched();
330 spin_lock(&journal->j_list_lock);
331
332 while (commit_transaction->t_sync_datalist) {
333 struct buffer_head *bh;
334
335 jh = commit_transaction->t_sync_datalist;
336 commit_transaction->t_sync_datalist = jh->b_tnext;
337 bh = jh2bh(jh);
338 if (buffer_locked(bh)) {
339 BUFFER_TRACE(bh, "locked");
340 if (!inverted_lock(journal, bh))
341 goto write_out_data;
342 __journal_temp_unlink_buffer(jh);
343 __journal_file_buffer(jh, commit_transaction,
344 BJ_Locked);
345 jbd_unlock_bh_state(bh);
346 if (lock_need_resched(&journal->j_list_lock)) {
347 spin_unlock(&journal->j_list_lock);
348 goto write_out_data;
349 }
350 } else {
351 if (buffer_dirty(bh)) {
352 BUFFER_TRACE(bh, "start journal writeout");
353 get_bh(bh);
354 wbuf[bufs++] = bh;
355 if (bufs == journal->j_wbufsize) {
356 jbd_debug(2, "submit %d writes\n",
357 bufs);
358 spin_unlock(&journal->j_list_lock);
359 ll_rw_block(SWRITE, bufs, wbuf);
360 journal_brelse_array(wbuf, bufs);
361 bufs = 0;
362 goto write_out_data;
363 }
364 } else {
365 BUFFER_TRACE(bh, "writeout complete: unfile");
366 if (!inverted_lock(journal, bh))
367 goto write_out_data;
368 __journal_unfile_buffer(jh);
369 jbd_unlock_bh_state(bh);
370 journal_remove_journal_head(bh);
371 put_bh(bh);
372 if (lock_need_resched(&journal->j_list_lock)) {
373 spin_unlock(&journal->j_list_lock);
374 goto write_out_data;
375 }
376 }
377 }
378 }
379
380 if (bufs) {
381 spin_unlock(&journal->j_list_lock);
382 ll_rw_block(SWRITE, bufs, wbuf);
383 journal_brelse_array(wbuf, bufs);
384 spin_lock(&journal->j_list_lock);
385 }
386 429
387 /* 430 /*
388 * Wait for all previously submitted IO to complete. 431 * Wait for all previously submitted IO to complete.
389 */ 432 */
433 spin_lock(&journal->j_list_lock);
390 while (commit_transaction->t_locked_list) { 434 while (commit_transaction->t_locked_list) {
391 struct buffer_head *bh; 435 struct buffer_head *bh;
392 436
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f66724ce443a..2fc66c3e6681 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -181,7 +181,7 @@ loop:
181 transaction->t_expires)) 181 transaction->t_expires))
182 should_sleep = 0; 182 should_sleep = 0;
183 if (journal->j_flags & JFS_UNMOUNT) 183 if (journal->j_flags & JFS_UNMOUNT)
184 should_sleep = 0; 184 should_sleep = 0;
185 if (should_sleep) { 185 if (should_sleep) {
186 spin_unlock(&journal->j_state_lock); 186 spin_unlock(&journal->j_state_lock);
187 schedule(); 187 schedule();
@@ -271,7 +271,7 @@ static void journal_kill_thread(journal_t *journal)
271int journal_write_metadata_buffer(transaction_t *transaction, 271int journal_write_metadata_buffer(transaction_t *transaction,
272 struct journal_head *jh_in, 272 struct journal_head *jh_in,
273 struct journal_head **jh_out, 273 struct journal_head **jh_out,
274 int blocknr) 274 unsigned long blocknr)
275{ 275{
276 int need_copy_out = 0; 276 int need_copy_out = 0;
277 int done_copy_out = 0; 277 int done_copy_out = 0;
@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
578 * this is a no-op. If needed, we can use j_blk_offset - everything is 578 * this is a no-op. If needed, we can use j_blk_offset - everything is
579 * ready. 579 * ready.
580 */ 580 */
581int journal_bmap(journal_t *journal, unsigned long blocknr, 581int journal_bmap(journal_t *journal, unsigned long blocknr,
582 unsigned long *retp) 582 unsigned long *retp)
583{ 583{
584 int err = 0; 584 int err = 0;
@@ -696,13 +696,13 @@ fail:
696 * @bdev: Block device on which to create the journal 696 * @bdev: Block device on which to create the journal
697 * @fs_dev: Device which hold journalled filesystem for this journal. 697 * @fs_dev: Device which hold journalled filesystem for this journal.
698 * @start: Block nr Start of journal. 698 * @start: Block nr Start of journal.
699 * @len: Lenght of the journal in blocks. 699 * @len: Length of the journal in blocks.
700 * @blocksize: blocksize of journalling device 700 * @blocksize: blocksize of journalling device
701 * @returns: a newly created journal_t * 701 * @returns: a newly created journal_t *
702 * 702 *
703 * journal_init_dev creates a journal which maps a fixed contiguous 703 * journal_init_dev creates a journal which maps a fixed contiguous
704 * range of blocks on an arbitrary block device. 704 * range of blocks on an arbitrary block device.
705 * 705 *
706 */ 706 */
707journal_t * journal_init_dev(struct block_device *bdev, 707journal_t * journal_init_dev(struct block_device *bdev,
708 struct block_device *fs_dev, 708 struct block_device *fs_dev,
@@ -739,11 +739,11 @@ journal_t * journal_init_dev(struct block_device *bdev,
739 739
740 return journal; 740 return journal;
741} 741}
742 742
743/** 743/**
744 * journal_t * journal_init_inode () - creates a journal which maps to a inode. 744 * journal_t * journal_init_inode () - creates a journal which maps to a inode.
745 * @inode: An inode to create the journal in 745 * @inode: An inode to create the journal in
746 * 746 *
747 * journal_init_inode creates a journal which maps an on-disk inode as 747 * journal_init_inode creates a journal which maps an on-disk inode as
748 * the journal. The inode must exist already, must support bmap() and 748 * the journal. The inode must exist already, must support bmap() and
749 * must have all data blocks preallocated. 749 * must have all data blocks preallocated.
@@ -763,7 +763,7 @@ journal_t * journal_init_inode (struct inode *inode)
763 journal->j_inode = inode; 763 journal->j_inode = inode;
764 jbd_debug(1, 764 jbd_debug(1,
765 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", 765 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
766 journal, inode->i_sb->s_id, inode->i_ino, 766 journal, inode->i_sb->s_id, inode->i_ino,
767 (long long) inode->i_size, 767 (long long) inode->i_size,
768 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); 768 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
769 769
@@ -798,10 +798,10 @@ journal_t * journal_init_inode (struct inode *inode)
798 return journal; 798 return journal;
799} 799}
800 800
801/* 801/*
802 * If the journal init or create aborts, we need to mark the journal 802 * If the journal init or create aborts, we need to mark the journal
803 * superblock as being NULL to prevent the journal destroy from writing 803 * superblock as being NULL to prevent the journal destroy from writing
804 * back a bogus superblock. 804 * back a bogus superblock.
805 */ 805 */
806static void journal_fail_superblock (journal_t *journal) 806static void journal_fail_superblock (journal_t *journal)
807{ 807{
@@ -820,7 +820,7 @@ static void journal_fail_superblock (journal_t *journal)
820static int journal_reset(journal_t *journal) 820static int journal_reset(journal_t *journal)
821{ 821{
822 journal_superblock_t *sb = journal->j_superblock; 822 journal_superblock_t *sb = journal->j_superblock;
823 unsigned int first, last; 823 unsigned long first, last;
824 824
825 first = be32_to_cpu(sb->s_first); 825 first = be32_to_cpu(sb->s_first);
826 last = be32_to_cpu(sb->s_maxlen); 826 last = be32_to_cpu(sb->s_maxlen);
@@ -844,13 +844,13 @@ static int journal_reset(journal_t *journal)
844 return 0; 844 return 0;
845} 845}
846 846
847/** 847/**
848 * int journal_create() - Initialise the new journal file 848 * int journal_create() - Initialise the new journal file
849 * @journal: Journal to create. This structure must have been initialised 849 * @journal: Journal to create. This structure must have been initialised
850 * 850 *
851 * Given a journal_t structure which tells us which disk blocks we can 851 * Given a journal_t structure which tells us which disk blocks we can
852 * use, create a new journal superblock and initialise all of the 852 * use, create a new journal superblock and initialise all of the
853 * journal fields from scratch. 853 * journal fields from scratch.
854 **/ 854 **/
855int journal_create(journal_t *journal) 855int journal_create(journal_t *journal)
856{ 856{
@@ -915,7 +915,7 @@ int journal_create(journal_t *journal)
915 return journal_reset(journal); 915 return journal_reset(journal);
916} 916}
917 917
918/** 918/**
919 * void journal_update_superblock() - Update journal sb on disk. 919 * void journal_update_superblock() - Update journal sb on disk.
920 * @journal: The journal to update. 920 * @journal: The journal to update.
921 * @wait: Set to '0' if you don't want to wait for IO completion. 921 * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -939,7 +939,7 @@ void journal_update_superblock(journal_t *journal, int wait)
939 journal->j_transaction_sequence) { 939 journal->j_transaction_sequence) {
940 jbd_debug(1,"JBD: Skipping superblock update on recovered sb " 940 jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
941 "(start %ld, seq %d, errno %d)\n", 941 "(start %ld, seq %d, errno %d)\n",
942 journal->j_tail, journal->j_tail_sequence, 942 journal->j_tail, journal->j_tail_sequence,
943 journal->j_errno); 943 journal->j_errno);
944 goto out; 944 goto out;
945 } 945 }
@@ -1062,7 +1062,7 @@ static int load_superblock(journal_t *journal)
1062/** 1062/**
1063 * int journal_load() - Read journal from disk. 1063 * int journal_load() - Read journal from disk.
1064 * @journal: Journal to act on. 1064 * @journal: Journal to act on.
1065 * 1065 *
1066 * Given a journal_t structure which tells us which disk blocks contain 1066 * Given a journal_t structure which tells us which disk blocks contain
1067 * a journal, read the journal from disk to initialise the in-memory 1067 * a journal, read the journal from disk to initialise the in-memory
1068 * structures. 1068 * structures.
@@ -1094,7 +1094,7 @@ int journal_load(journal_t *journal)
1094 /* 1094 /*
1095 * Create a slab for this blocksize 1095 * Create a slab for this blocksize
1096 */ 1096 */
1097 err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); 1097 err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1098 if (err) 1098 if (err)
1099 return err; 1099 return err;
1100 1100
@@ -1172,9 +1172,9 @@ void journal_destroy(journal_t *journal)
1172 * @compat: bitmask of compatible features 1172 * @compat: bitmask of compatible features
1173 * @ro: bitmask of features that force read-only mount 1173 * @ro: bitmask of features that force read-only mount
1174 * @incompat: bitmask of incompatible features 1174 * @incompat: bitmask of incompatible features
1175 * 1175 *
1176 * Check whether the journal uses all of a given set of 1176 * Check whether the journal uses all of a given set of
1177 * features. Return true (non-zero) if it does. 1177 * features. Return true (non-zero) if it does.
1178 **/ 1178 **/
1179 1179
1180int journal_check_used_features (journal_t *journal, unsigned long compat, 1180int journal_check_used_features (journal_t *journal, unsigned long compat,
@@ -1203,7 +1203,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
1203 * @compat: bitmask of compatible features 1203 * @compat: bitmask of compatible features
1204 * @ro: bitmask of features that force read-only mount 1204 * @ro: bitmask of features that force read-only mount
1205 * @incompat: bitmask of incompatible features 1205 * @incompat: bitmask of incompatible features
1206 * 1206 *
1207 * Check whether the journaling code supports the use of 1207 * Check whether the journaling code supports the use of
1208 * all of a given set of features on this journal. Return true 1208 * all of a given set of features on this journal. Return true
1209 * (non-zero) if it can. */ 1209 * (non-zero) if it can. */
@@ -1241,7 +1241,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
1241 * @incompat: bitmask of incompatible features 1241 * @incompat: bitmask of incompatible features
1242 * 1242 *
1243 * Mark a given journal feature as present on the 1243 * Mark a given journal feature as present on the
1244 * superblock. Returns true if the requested features could be set. 1244 * superblock. Returns true if the requested features could be set.
1245 * 1245 *
1246 */ 1246 */
1247 1247
@@ -1327,7 +1327,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
1327/** 1327/**
1328 * int journal_flush () - Flush journal 1328 * int journal_flush () - Flush journal
1329 * @journal: Journal to act on. 1329 * @journal: Journal to act on.
1330 * 1330 *
1331 * Flush all data for a given journal to disk and empty the journal. 1331 * Flush all data for a given journal to disk and empty the journal.
1332 * Filesystems can use this when remounting readonly to ensure that 1332 * Filesystems can use this when remounting readonly to ensure that
1333 * recovery does not need to happen on remount. 1333 * recovery does not need to happen on remount.
@@ -1394,7 +1394,7 @@ int journal_flush(journal_t *journal)
1394 * int journal_wipe() - Wipe journal contents 1394 * int journal_wipe() - Wipe journal contents
1395 * @journal: Journal to act on. 1395 * @journal: Journal to act on.
1396 * @write: flag (see below) 1396 * @write: flag (see below)
1397 * 1397 *
1398 * Wipe out all of the contents of a journal, safely. This will produce 1398 * Wipe out all of the contents of a journal, safely. This will produce
1399 * a warning if the journal contains any valid recovery information. 1399 * a warning if the journal contains any valid recovery information.
1400 * Must be called between journal_init_*() and journal_load(). 1400 * Must be called between journal_init_*() and journal_load().
@@ -1449,7 +1449,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
1449 1449
1450/* 1450/*
1451 * Journal abort has very specific semantics, which we describe 1451 * Journal abort has very specific semantics, which we describe
1452 * for journal abort. 1452 * for journal abort.
1453 * 1453 *
1454 * Two internal function, which provide abort to te jbd layer 1454 * Two internal function, which provide abort to te jbd layer
1455 * itself are here. 1455 * itself are here.
@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1504 * Perform a complete, immediate shutdown of the ENTIRE 1504 * Perform a complete, immediate shutdown of the ENTIRE
1505 * journal (not of a single transaction). This operation cannot be 1505 * journal (not of a single transaction). This operation cannot be
1506 * undone without closing and reopening the journal. 1506 * undone without closing and reopening the journal.
1507 * 1507 *
1508 * The journal_abort function is intended to support higher level error 1508 * The journal_abort function is intended to support higher level error
1509 * recovery mechanisms such as the ext2/ext3 remount-readonly error 1509 * recovery mechanisms such as the ext2/ext3 remount-readonly error
1510 * mode. 1510 * mode.
@@ -1538,7 +1538,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1538 * supply an errno; a null errno implies that absolutely no further 1538 * supply an errno; a null errno implies that absolutely no further
1539 * writes are done to the journal (unless there are any already in 1539 * writes are done to the journal (unless there are any already in
1540 * progress). 1540 * progress).
1541 * 1541 *
1542 */ 1542 */
1543 1543
1544void journal_abort(journal_t *journal, int errno) 1544void journal_abort(journal_t *journal, int errno)
@@ -1546,7 +1546,7 @@ void journal_abort(journal_t *journal, int errno)
1546 __journal_abort_soft(journal, errno); 1546 __journal_abort_soft(journal, errno);
1547} 1547}
1548 1548
1549/** 1549/**
1550 * int journal_errno () - returns the journal's error state. 1550 * int journal_errno () - returns the journal's error state.
1551 * @journal: journal to examine. 1551 * @journal: journal to examine.
1552 * 1552 *
@@ -1570,7 +1570,7 @@ int journal_errno(journal_t *journal)
1570 return err; 1570 return err;
1571} 1571}
1572 1572
1573/** 1573/**
1574 * int journal_clear_err () - clears the journal's error state 1574 * int journal_clear_err () - clears the journal's error state
1575 * @journal: journal to act on. 1575 * @journal: journal to act on.
1576 * 1576 *
@@ -1590,7 +1590,7 @@ int journal_clear_err(journal_t *journal)
1590 return err; 1590 return err;
1591} 1591}
1592 1592
1593/** 1593/**
1594 * void journal_ack_err() - Ack journal err. 1594 * void journal_ack_err() - Ack journal err.
1595 * @journal: journal to act on. 1595 * @journal: journal to act on.
1596 * 1596 *
@@ -1612,7 +1612,7 @@ int journal_blocks_per_page(struct inode *inode)
1612 1612
1613/* 1613/*
1614 * Simple support for retrying memory allocations. Introduced to help to 1614 * Simple support for retrying memory allocations. Introduced to help to
1615 * debug different VM deadlock avoidance strategies. 1615 * debug different VM deadlock avoidance strategies.
1616 */ 1616 */
1617void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) 1617void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1618{ 1618{
@@ -2047,13 +2047,7 @@ static int __init journal_init(void)
2047{ 2047{
2048 int ret; 2048 int ret;
2049 2049
2050/* Static check for data structure consistency. There's no code 2050 BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
2051 * invoked --- we'll just get a linker failure if things aren't right.
2052 */
2053 extern void journal_bad_superblock_size(void);
2054 if (sizeof(struct journal_superblock_s) != 1024)
2055 journal_bad_superblock_size();
2056
2057 2051
2058 ret = journal_init_caches(); 2052 ret = journal_init_caches();
2059 if (ret != 0) 2053 if (ret != 0)
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index de5bafb4e853..445eed6ce5dc 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/recovery.c 2 * linux/fs/recovery.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
6 * Copyright 1999-2000 Red Hat Software --- All Rights Reserved 6 * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
@@ -10,7 +10,7 @@
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Journal recovery routines for the generic filesystem journaling code; 12 * Journal recovery routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system. 13 * part of the ext2fs journaling system.
14 */ 14 */
15 15
16#ifndef __KERNEL__ 16#ifndef __KERNEL__
@@ -25,9 +25,9 @@
25 25
26/* 26/*
27 * Maintain information about the progress of the recovery job, so that 27 * Maintain information about the progress of the recovery job, so that
28 * the different passes can carry information between them. 28 * the different passes can carry information between them.
29 */ 29 */
30struct recovery_info 30struct recovery_info
31{ 31{
32 tid_t start_transaction; 32 tid_t start_transaction;
33 tid_t end_transaction; 33 tid_t end_transaction;
@@ -116,7 +116,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
116 err = 0; 116 err = 0;
117 117
118failed: 118failed:
119 if (nbufs) 119 if (nbufs)
120 journal_brelse_array(bufs, nbufs); 120 journal_brelse_array(bufs, nbufs);
121 return err; 121 return err;
122} 122}
@@ -128,7 +128,7 @@ failed:
128 * Read a block from the journal 128 * Read a block from the journal
129 */ 129 */
130 130
131static int jread(struct buffer_head **bhp, journal_t *journal, 131static int jread(struct buffer_head **bhp, journal_t *journal,
132 unsigned int offset) 132 unsigned int offset)
133{ 133{
134 int err; 134 int err;
@@ -212,14 +212,14 @@ do { \
212/** 212/**
213 * journal_recover - recovers a on-disk journal 213 * journal_recover - recovers a on-disk journal
214 * @journal: the journal to recover 214 * @journal: the journal to recover
215 * 215 *
216 * The primary function for recovering the log contents when mounting a 216 * The primary function for recovering the log contents when mounting a
217 * journaled device. 217 * journaled device.
218 * 218 *
219 * Recovery is done in three passes. In the first pass, we look for the 219 * Recovery is done in three passes. In the first pass, we look for the
220 * end of the log. In the second, we assemble the list of revoke 220 * end of the log. In the second, we assemble the list of revoke
221 * blocks. In the third and final pass, we replay any un-revoked blocks 221 * blocks. In the third and final pass, we replay any un-revoked blocks
222 * in the log. 222 * in the log.
223 */ 223 */
224int journal_recover(journal_t *journal) 224int journal_recover(journal_t *journal)
225{ 225{
@@ -231,10 +231,10 @@ int journal_recover(journal_t *journal)
231 memset(&info, 0, sizeof(info)); 231 memset(&info, 0, sizeof(info));
232 sb = journal->j_superblock; 232 sb = journal->j_superblock;
233 233
234 /* 234 /*
235 * The journal superblock's s_start field (the current log head) 235 * The journal superblock's s_start field (the current log head)
236 * is always zero if, and only if, the journal was cleanly 236 * is always zero if, and only if, the journal was cleanly
237 * unmounted. 237 * unmounted.
238 */ 238 */
239 239
240 if (!sb->s_start) { 240 if (!sb->s_start) {
@@ -253,7 +253,7 @@ int journal_recover(journal_t *journal)
253 jbd_debug(0, "JBD: recovery, exit status %d, " 253 jbd_debug(0, "JBD: recovery, exit status %d, "
254 "recovered transactions %u to %u\n", 254 "recovered transactions %u to %u\n",
255 err, info.start_transaction, info.end_transaction); 255 err, info.start_transaction, info.end_transaction);
256 jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n", 256 jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
257 info.nr_replays, info.nr_revoke_hits, info.nr_revokes); 257 info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
258 258
259 /* Restart the log at the next transaction ID, thus invalidating 259 /* Restart the log at the next transaction ID, thus invalidating
@@ -268,15 +268,15 @@ int journal_recover(journal_t *journal)
268/** 268/**
269 * journal_skip_recovery - Start journal and wipe exiting records 269 * journal_skip_recovery - Start journal and wipe exiting records
270 * @journal: journal to startup 270 * @journal: journal to startup
271 * 271 *
272 * Locate any valid recovery information from the journal and set up the 272 * Locate any valid recovery information from the journal and set up the
273 * journal structures in memory to ignore it (presumably because the 273 * journal structures in memory to ignore it (presumably because the
274 * caller has evidence that it is out of date). 274 * caller has evidence that it is out of date).
275 * This function does'nt appear to be exorted.. 275 * This function does'nt appear to be exorted..
276 * 276 *
277 * We perform one pass over the journal to allow us to tell the user how 277 * We perform one pass over the journal to allow us to tell the user how
278 * much recovery information is being erased, and to let us initialise 278 * much recovery information is being erased, and to let us initialise
279 * the journal transaction sequence numbers to the next unused ID. 279 * the journal transaction sequence numbers to the next unused ID.
280 */ 280 */
281int journal_skip_recovery(journal_t *journal) 281int journal_skip_recovery(journal_t *journal)
282{ 282{
@@ -297,7 +297,7 @@ int journal_skip_recovery(journal_t *journal)
297#ifdef CONFIG_JBD_DEBUG 297#ifdef CONFIG_JBD_DEBUG
298 int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence); 298 int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
299#endif 299#endif
300 jbd_debug(0, 300 jbd_debug(0,
301 "JBD: ignoring %d transaction%s from the journal.\n", 301 "JBD: ignoring %d transaction%s from the journal.\n",
302 dropped, (dropped == 1) ? "" : "s"); 302 dropped, (dropped == 1) ? "" : "s");
303 journal->j_transaction_sequence = ++info.end_transaction; 303 journal->j_transaction_sequence = ++info.end_transaction;
@@ -314,7 +314,7 @@ static int do_one_pass(journal_t *journal,
314 unsigned long next_log_block; 314 unsigned long next_log_block;
315 int err, success = 0; 315 int err, success = 0;
316 journal_superblock_t * sb; 316 journal_superblock_t * sb;
317 journal_header_t * tmp; 317 journal_header_t * tmp;
318 struct buffer_head * bh; 318 struct buffer_head * bh;
319 unsigned int sequence; 319 unsigned int sequence;
320 int blocktype; 320 int blocktype;
@@ -324,10 +324,10 @@ static int do_one_pass(journal_t *journal,
324 MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t)) 324 MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
325 / sizeof(journal_block_tag_t)); 325 / sizeof(journal_block_tag_t));
326 326
327 /* 327 /*
328 * First thing is to establish what we expect to find in the log 328 * First thing is to establish what we expect to find in the log
329 * (in terms of transaction IDs), and where (in terms of log 329 * (in terms of transaction IDs), and where (in terms of log
330 * block offsets): query the superblock. 330 * block offsets): query the superblock.
331 */ 331 */
332 332
333 sb = journal->j_superblock; 333 sb = journal->j_superblock;
@@ -344,7 +344,7 @@ static int do_one_pass(journal_t *journal,
344 * Now we walk through the log, transaction by transaction, 344 * Now we walk through the log, transaction by transaction,
345 * making sure that each transaction has a commit block in the 345 * making sure that each transaction has a commit block in the
346 * expected place. Each complete transaction gets replayed back 346 * expected place. Each complete transaction gets replayed back
347 * into the main filesystem. 347 * into the main filesystem.
348 */ 348 */
349 349
350 while (1) { 350 while (1) {
@@ -379,8 +379,8 @@ static int do_one_pass(journal_t *journal,
379 next_log_block++; 379 next_log_block++;
380 wrap(journal, next_log_block); 380 wrap(journal, next_log_block);
381 381
382 /* What kind of buffer is it? 382 /* What kind of buffer is it?
383 * 383 *
384 * If it is a descriptor block, check that it has the 384 * If it is a descriptor block, check that it has the
385 * expected sequence number. Otherwise, we're all done 385 * expected sequence number. Otherwise, we're all done
386 * here. */ 386 * here. */
@@ -394,7 +394,7 @@ static int do_one_pass(journal_t *journal,
394 394
395 blocktype = be32_to_cpu(tmp->h_blocktype); 395 blocktype = be32_to_cpu(tmp->h_blocktype);
396 sequence = be32_to_cpu(tmp->h_sequence); 396 sequence = be32_to_cpu(tmp->h_sequence);
397 jbd_debug(3, "Found magic %d, sequence %d\n", 397 jbd_debug(3, "Found magic %d, sequence %d\n",
398 blocktype, sequence); 398 blocktype, sequence);
399 399
400 if (sequence != next_commit_ID) { 400 if (sequence != next_commit_ID) {
@@ -438,7 +438,7 @@ static int do_one_pass(journal_t *journal,
438 /* Recover what we can, but 438 /* Recover what we can, but
439 * report failure at the end. */ 439 * report failure at the end. */
440 success = err; 440 success = err;
441 printk (KERN_ERR 441 printk (KERN_ERR
442 "JBD: IO error %d recovering " 442 "JBD: IO error %d recovering "
443 "block %ld in log\n", 443 "block %ld in log\n",
444 err, io_block); 444 err, io_block);
@@ -452,7 +452,7 @@ static int do_one_pass(journal_t *journal,
452 * revoked, then we're all done 452 * revoked, then we're all done
453 * here. */ 453 * here. */
454 if (journal_test_revoke 454 if (journal_test_revoke
455 (journal, blocknr, 455 (journal, blocknr,
456 next_commit_ID)) { 456 next_commit_ID)) {
457 brelse(obh); 457 brelse(obh);
458 ++info->nr_revoke_hits; 458 ++info->nr_revoke_hits;
@@ -465,7 +465,7 @@ static int do_one_pass(journal_t *journal,
465 blocknr, 465 blocknr,
466 journal->j_blocksize); 466 journal->j_blocksize);
467 if (nbh == NULL) { 467 if (nbh == NULL) {
468 printk(KERN_ERR 468 printk(KERN_ERR
469 "JBD: Out of memory " 469 "JBD: Out of memory "
470 "during recovery.\n"); 470 "during recovery.\n");
471 err = -ENOMEM; 471 err = -ENOMEM;
@@ -537,7 +537,7 @@ static int do_one_pass(journal_t *journal,
537 } 537 }
538 538
539 done: 539 done:
540 /* 540 /*
541 * We broke out of the log scan loop: either we came to the 541 * We broke out of the log scan loop: either we came to the
542 * known end of the log or we found an unexpected block in the 542 * known end of the log or we found an unexpected block in the
543 * log. If the latter happened, then we know that the "current" 543 * log. If the latter happened, then we know that the "current"
@@ -567,7 +567,7 @@ static int do_one_pass(journal_t *journal,
567 567
568/* Scan a revoke record, marking all blocks mentioned as revoked. */ 568/* Scan a revoke record, marking all blocks mentioned as revoked. */
569 569
570static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, 570static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
571 tid_t sequence, struct recovery_info *info) 571 tid_t sequence, struct recovery_info *info)
572{ 572{
573 journal_revoke_header_t *header; 573 journal_revoke_header_t *header;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index a56144183462..c532429d8d9b 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/revoke.c 2 * linux/fs/revoke.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
5 * 5 *
6 * Copyright 2000 Red Hat corp --- All Rights Reserved 6 * Copyright 2000 Red Hat corp --- All Rights Reserved
@@ -15,10 +15,10 @@
15 * Revoke is the mechanism used to prevent old log records for deleted 15 * Revoke is the mechanism used to prevent old log records for deleted
16 * metadata from being replayed on top of newer data using the same 16 * metadata from being replayed on top of newer data using the same
17 * blocks. The revoke mechanism is used in two separate places: 17 * blocks. The revoke mechanism is used in two separate places:
18 * 18 *
19 * + Commit: during commit we write the entire list of the current 19 * + Commit: during commit we write the entire list of the current
20 * transaction's revoked blocks to the journal 20 * transaction's revoked blocks to the journal
21 * 21 *
22 * + Recovery: during recovery we record the transaction ID of all 22 * + Recovery: during recovery we record the transaction ID of all
23 * revoked blocks. If there are multiple revoke records in the log 23 * revoked blocks. If there are multiple revoke records in the log
24 * for a single block, only the last one counts, and if there is a log 24 * for a single block, only the last one counts, and if there is a log
@@ -29,7 +29,7 @@
29 * single transaction: 29 * single transaction:
30 * 30 *
31 * Block is revoked and then journaled: 31 * Block is revoked and then journaled:
32 * The desired end result is the journaling of the new block, so we 32 * The desired end result is the journaling of the new block, so we
33 * cancel the revoke before the transaction commits. 33 * cancel the revoke before the transaction commits.
34 * 34 *
35 * Block is journaled and then revoked: 35 * Block is journaled and then revoked:
@@ -41,7 +41,7 @@
41 * transaction must have happened after the block was journaled and so 41 * transaction must have happened after the block was journaled and so
42 * the revoke must take precedence. 42 * the revoke must take precedence.
43 * 43 *
44 * Block is revoked and then written as data: 44 * Block is revoked and then written as data:
45 * The data write is allowed to succeed, but the revoke is _not_ 45 * The data write is allowed to succeed, but the revoke is _not_
46 * cancelled. We still need to prevent old log records from 46 * cancelled. We still need to prevent old log records from
47 * overwriting the new data. We don't even need to clear the revoke 47 * overwriting the new data. We don't even need to clear the revoke
@@ -54,7 +54,7 @@
54 * buffer has not been revoked, and cancel_revoke 54 * buffer has not been revoked, and cancel_revoke
55 * need do nothing. 55 * need do nothing.
56 * RevokeValid set, Revoked set: 56 * RevokeValid set, Revoked set:
57 * buffer has been revoked. 57 * buffer has been revoked.
58 */ 58 */
59 59
60#ifndef __KERNEL__ 60#ifndef __KERNEL__
@@ -77,7 +77,7 @@ static kmem_cache_t *revoke_table_cache;
77 journal replay, this involves recording the transaction ID of the 77 journal replay, this involves recording the transaction ID of the
78 last transaction to revoke this block. */ 78 last transaction to revoke this block. */
79 79
80struct jbd_revoke_record_s 80struct jbd_revoke_record_s
81{ 81{
82 struct list_head hash; 82 struct list_head hash;
83 tid_t sequence; /* Used for recovery only */ 83 tid_t sequence; /* Used for recovery only */
@@ -90,8 +90,8 @@ struct jbd_revoke_table_s
90{ 90{
91 /* It is conceivable that we might want a larger hash table 91 /* It is conceivable that we might want a larger hash table
92 * for recovery. Must be a power of two. */ 92 * for recovery. Must be a power of two. */
93 int hash_size; 93 int hash_size;
94 int hash_shift; 94 int hash_shift;
95 struct list_head *hash_table; 95 struct list_head *hash_table;
96}; 96};
97 97
@@ -301,22 +301,22 @@ void journal_destroy_revoke(journal_t *journal)
301 301
302#ifdef __KERNEL__ 302#ifdef __KERNEL__
303 303
304/* 304/*
305 * journal_revoke: revoke a given buffer_head from the journal. This 305 * journal_revoke: revoke a given buffer_head from the journal. This
306 * prevents the block from being replayed during recovery if we take a 306 * prevents the block from being replayed during recovery if we take a
307 * crash after this current transaction commits. Any subsequent 307 * crash after this current transaction commits. Any subsequent
308 * metadata writes of the buffer in this transaction cancel the 308 * metadata writes of the buffer in this transaction cancel the
309 * revoke. 309 * revoke.
310 * 310 *
311 * Note that this call may block --- it is up to the caller to make 311 * Note that this call may block --- it is up to the caller to make
312 * sure that there are no further calls to journal_write_metadata 312 * sure that there are no further calls to journal_write_metadata
313 * before the revoke is complete. In ext3, this implies calling the 313 * before the revoke is complete. In ext3, this implies calling the
314 * revoke before clearing the block bitmap when we are deleting 314 * revoke before clearing the block bitmap when we are deleting
315 * metadata. 315 * metadata.
316 * 316 *
317 * Revoke performs a journal_forget on any buffer_head passed in as a 317 * Revoke performs a journal_forget on any buffer_head passed in as a
318 * parameter, but does _not_ forget the buffer_head if the bh was only 318 * parameter, but does _not_ forget the buffer_head if the bh was only
319 * found implicitly. 319 * found implicitly.
320 * 320 *
321 * bh_in may not be a journalled buffer - it may have come off 321 * bh_in may not be a journalled buffer - it may have come off
322 * the hash tables without an attached journal_head. 322 * the hash tables without an attached journal_head.
@@ -325,7 +325,7 @@ void journal_destroy_revoke(journal_t *journal)
325 * by one. 325 * by one.
326 */ 326 */
327 327
328int journal_revoke(handle_t *handle, unsigned long blocknr, 328int journal_revoke(handle_t *handle, unsigned long blocknr,
329 struct buffer_head *bh_in) 329 struct buffer_head *bh_in)
330{ 330{
331 struct buffer_head *bh = NULL; 331 struct buffer_head *bh = NULL;
@@ -487,7 +487,7 @@ void journal_switch_revoke_table(journal_t *journal)
487 else 487 else
488 journal->j_revoke = journal->j_revoke_table[0]; 488 journal->j_revoke = journal->j_revoke_table[0];
489 489
490 for (i = 0; i < journal->j_revoke->hash_size; i++) 490 for (i = 0; i < journal->j_revoke->hash_size; i++)
491 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 491 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
492} 492}
493 493
@@ -498,7 +498,7 @@ void journal_switch_revoke_table(journal_t *journal)
498 * Called with the journal lock held. 498 * Called with the journal lock held.
499 */ 499 */
500 500
501void journal_write_revoke_records(journal_t *journal, 501void journal_write_revoke_records(journal_t *journal,
502 transaction_t *transaction) 502 transaction_t *transaction)
503{ 503{
504 struct journal_head *descriptor; 504 struct journal_head *descriptor;
@@ -507,7 +507,7 @@ void journal_write_revoke_records(journal_t *journal,
507 struct list_head *hash_list; 507 struct list_head *hash_list;
508 int i, offset, count; 508 int i, offset, count;
509 509
510 descriptor = NULL; 510 descriptor = NULL;
511 offset = 0; 511 offset = 0;
512 count = 0; 512 count = 0;
513 513
@@ -519,10 +519,10 @@ void journal_write_revoke_records(journal_t *journal,
519 hash_list = &revoke->hash_table[i]; 519 hash_list = &revoke->hash_table[i];
520 520
521 while (!list_empty(hash_list)) { 521 while (!list_empty(hash_list)) {
522 record = (struct jbd_revoke_record_s *) 522 record = (struct jbd_revoke_record_s *)
523 hash_list->next; 523 hash_list->next;
524 write_one_revoke_record(journal, transaction, 524 write_one_revoke_record(journal, transaction,
525 &descriptor, &offset, 525 &descriptor, &offset,
526 record); 526 record);
527 count++; 527 count++;
528 list_del(&record->hash); 528 list_del(&record->hash);
@@ -534,14 +534,14 @@ void journal_write_revoke_records(journal_t *journal,
534 jbd_debug(1, "Wrote %d revoke records\n", count); 534 jbd_debug(1, "Wrote %d revoke records\n", count);
535} 535}
536 536
537/* 537/*
538 * Write out one revoke record. We need to create a new descriptor 538 * Write out one revoke record. We need to create a new descriptor
539 * block if the old one is full or if we have not already created one. 539 * block if the old one is full or if we have not already created one.
540 */ 540 */
541 541
542static void write_one_revoke_record(journal_t *journal, 542static void write_one_revoke_record(journal_t *journal,
543 transaction_t *transaction, 543 transaction_t *transaction,
544 struct journal_head **descriptorp, 544 struct journal_head **descriptorp,
545 int *offsetp, 545 int *offsetp,
546 struct jbd_revoke_record_s *record) 546 struct jbd_revoke_record_s *record)
547{ 547{
@@ -584,21 +584,21 @@ static void write_one_revoke_record(journal_t *journal,
584 *descriptorp = descriptor; 584 *descriptorp = descriptor;
585 } 585 }
586 586
587 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = 587 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
588 cpu_to_be32(record->blocknr); 588 cpu_to_be32(record->blocknr);
589 offset += 4; 589 offset += 4;
590 *offsetp = offset; 590 *offsetp = offset;
591} 591}
592 592
593/* 593/*
594 * Flush a revoke descriptor out to the journal. If we are aborting, 594 * Flush a revoke descriptor out to the journal. If we are aborting,
595 * this is a noop; otherwise we are generating a buffer which needs to 595 * this is a noop; otherwise we are generating a buffer which needs to
596 * be waited for during commit, so it has to go onto the appropriate 596 * be waited for during commit, so it has to go onto the appropriate
597 * journal buffer list. 597 * journal buffer list.
598 */ 598 */
599 599
600static void flush_descriptor(journal_t *journal, 600static void flush_descriptor(journal_t *journal,
601 struct journal_head *descriptor, 601 struct journal_head *descriptor,
602 int offset) 602 int offset)
603{ 603{
604 journal_revoke_header_t *header; 604 journal_revoke_header_t *header;
@@ -618,7 +618,7 @@ static void flush_descriptor(journal_t *journal,
618} 618}
619#endif 619#endif
620 620
621/* 621/*
622 * Revoke support for recovery. 622 * Revoke support for recovery.
623 * 623 *
624 * Recovery needs to be able to: 624 * Recovery needs to be able to:
@@ -629,7 +629,7 @@ static void flush_descriptor(journal_t *journal,
629 * check whether a given block in a given transaction should be replayed 629 * check whether a given block in a given transaction should be replayed
630 * (ie. has not been revoked by a revoke record in that or a subsequent 630 * (ie. has not been revoked by a revoke record in that or a subsequent
631 * transaction) 631 * transaction)
632 * 632 *
633 * empty the revoke table after recovery. 633 * empty the revoke table after recovery.
634 */ 634 */
635 635
@@ -637,11 +637,11 @@ static void flush_descriptor(journal_t *journal,
637 * First, setting revoke records. We create a new revoke record for 637 * First, setting revoke records. We create a new revoke record for
638 * every block ever revoked in the log as we scan it for recovery, and 638 * every block ever revoked in the log as we scan it for recovery, and
639 * we update the existing records if we find multiple revokes for a 639 * we update the existing records if we find multiple revokes for a
640 * single block. 640 * single block.
641 */ 641 */
642 642
643int journal_set_revoke(journal_t *journal, 643int journal_set_revoke(journal_t *journal,
644 unsigned long blocknr, 644 unsigned long blocknr,
645 tid_t sequence) 645 tid_t sequence)
646{ 646{
647 struct jbd_revoke_record_s *record; 647 struct jbd_revoke_record_s *record;
@@ -653,18 +653,18 @@ int journal_set_revoke(journal_t *journal,
653 if (tid_gt(sequence, record->sequence)) 653 if (tid_gt(sequence, record->sequence))
654 record->sequence = sequence; 654 record->sequence = sequence;
655 return 0; 655 return 0;
656 } 656 }
657 return insert_revoke_hash(journal, blocknr, sequence); 657 return insert_revoke_hash(journal, blocknr, sequence);
658} 658}
659 659
660/* 660/*
661 * Test revoke records. For a given block referenced in the log, has 661 * Test revoke records. For a given block referenced in the log, has
662 * that block been revoked? A revoke record with a given transaction 662 * that block been revoked? A revoke record with a given transaction
663 * sequence number revokes all blocks in that transaction and earlier 663 * sequence number revokes all blocks in that transaction and earlier
664 * ones, but later transactions still need replayed. 664 * ones, but later transactions still need replayed.
665 */ 665 */
666 666
667int journal_test_revoke(journal_t *journal, 667int journal_test_revoke(journal_t *journal,
668 unsigned long blocknr, 668 unsigned long blocknr,
669 tid_t sequence) 669 tid_t sequence)
670{ 670{
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index f5169a96260e..e1b3c8af4d17 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/transaction.c 2 * linux/fs/transaction.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 * 5 *
6 * Copyright 1998 Red Hat corp --- All Rights Reserved 6 * Copyright 1998 Red Hat corp --- All Rights Reserved
@@ -10,7 +10,7 @@
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Generic filesystem transaction handling code; part of the ext2fs 12 * Generic filesystem transaction handling code; part of the ext2fs
13 * journaling system. 13 * journaling system.
14 * 14 *
15 * This file manages transactions (compound commits managed by the 15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the 16 * journaling code) and handles (individual atomic operations by the
@@ -74,7 +74,7 @@ get_transaction(journal_t *journal, transaction_t *transaction)
74 * start_this_handle: Given a handle, deal with any locking or stalling 74 * start_this_handle: Given a handle, deal with any locking or stalling
75 * needed to make sure that there is enough journal space for the handle 75 * needed to make sure that there is enough journal space for the handle
76 * to begin. Attach the handle to a transaction and set up the 76 * to begin. Attach the handle to a transaction and set up the
77 * transaction's buffer credits. 77 * transaction's buffer credits.
78 */ 78 */
79 79
80static int start_this_handle(journal_t *journal, handle_t *handle) 80static int start_this_handle(journal_t *journal, handle_t *handle)
@@ -117,7 +117,7 @@ repeat_locked:
117 if (is_journal_aborted(journal) || 117 if (is_journal_aborted(journal) ||
118 (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) { 118 (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
119 spin_unlock(&journal->j_state_lock); 119 spin_unlock(&journal->j_state_lock);
120 ret = -EROFS; 120 ret = -EROFS;
121 goto out; 121 goto out;
122 } 122 }
123 123
@@ -182,7 +182,7 @@ repeat_locked:
182 goto repeat; 182 goto repeat;
183 } 183 }
184 184
185 /* 185 /*
186 * The commit code assumes that it can get enough log space 186 * The commit code assumes that it can get enough log space
187 * without forcing a checkpoint. This is *critical* for 187 * without forcing a checkpoint. This is *critical* for
188 * correctness: a checkpoint of a buffer which is also 188 * correctness: a checkpoint of a buffer which is also
@@ -191,7 +191,7 @@ repeat_locked:
191 * 191 *
192 * We must therefore ensure the necessary space in the journal 192 * We must therefore ensure the necessary space in the journal
193 * *before* starting to dirty potentially checkpointed buffers 193 * *before* starting to dirty potentially checkpointed buffers
194 * in the new transaction. 194 * in the new transaction.
195 * 195 *
196 * The worst part is, any transaction currently committing can 196 * The worst part is, any transaction currently committing can
197 * reduce the free space arbitrarily. Be careful to account for 197 * reduce the free space arbitrarily. Be careful to account for
@@ -246,13 +246,13 @@ static handle_t *new_handle(int nblocks)
246} 246}
247 247
248/** 248/**
249 * handle_t *journal_start() - Obtain a new handle. 249 * handle_t *journal_start() - Obtain a new handle.
250 * @journal: Journal to start transaction on. 250 * @journal: Journal to start transaction on.
251 * @nblocks: number of block buffer we might modify 251 * @nblocks: number of block buffer we might modify
252 * 252 *
253 * We make sure that the transaction can guarantee at least nblocks of 253 * We make sure that the transaction can guarantee at least nblocks of
254 * modified buffers in the log. We block until the log can guarantee 254 * modified buffers in the log. We block until the log can guarantee
255 * that much space. 255 * that much space.
256 * 256 *
257 * This function is visible to journal users (like ext3fs), so is not 257 * This function is visible to journal users (like ext3fs), so is not
258 * called with the journal already locked. 258 * called with the journal already locked.
@@ -292,11 +292,11 @@ handle_t *journal_start(journal_t *journal, int nblocks)
292 * int journal_extend() - extend buffer credits. 292 * int journal_extend() - extend buffer credits.
293 * @handle: handle to 'extend' 293 * @handle: handle to 'extend'
294 * @nblocks: nr blocks to try to extend by. 294 * @nblocks: nr blocks to try to extend by.
295 * 295 *
296 * Some transactions, such as large extends and truncates, can be done 296 * Some transactions, such as large extends and truncates, can be done
297 * atomically all at once or in several stages. The operation requests 297 * atomically all at once or in several stages. The operation requests
298 * a credit for a number of buffer modications in advance, but can 298 * a credit for a number of buffer modications in advance, but can
299 * extend its credit if it needs more. 299 * extend its credit if it needs more.
300 * 300 *
301 * journal_extend tries to give the running handle more buffer credits. 301 * journal_extend tries to give the running handle more buffer credits.
302 * It does not guarantee that allocation - this is a best-effort only. 302 * It does not guarantee that allocation - this is a best-effort only.
@@ -363,7 +363,7 @@ out:
363 * int journal_restart() - restart a handle . 363 * int journal_restart() - restart a handle .
364 * @handle: handle to restart 364 * @handle: handle to restart
365 * @nblocks: nr credits requested 365 * @nblocks: nr credits requested
366 * 366 *
367 * Restart a handle for a multi-transaction filesystem 367 * Restart a handle for a multi-transaction filesystem
368 * operation. 368 * operation.
369 * 369 *
@@ -462,7 +462,7 @@ void journal_lock_updates(journal_t *journal)
462/** 462/**
463 * void journal_unlock_updates (journal_t* journal) - release barrier 463 * void journal_unlock_updates (journal_t* journal) - release barrier
464 * @journal: Journal to release the barrier on. 464 * @journal: Journal to release the barrier on.
465 * 465 *
466 * Release a transaction barrier obtained with journal_lock_updates(). 466 * Release a transaction barrier obtained with journal_lock_updates().
467 * 467 *
468 * Should be called without the journal lock held. 468 * Should be called without the journal lock held.
@@ -547,8 +547,8 @@ repeat:
547 jbd_lock_bh_state(bh); 547 jbd_lock_bh_state(bh);
548 548
549 /* We now hold the buffer lock so it is safe to query the buffer 549 /* We now hold the buffer lock so it is safe to query the buffer
550 * state. Is the buffer dirty? 550 * state. Is the buffer dirty?
551 * 551 *
552 * If so, there are two possibilities. The buffer may be 552 * If so, there are two possibilities. The buffer may be
553 * non-journaled, and undergoing a quite legitimate writeback. 553 * non-journaled, and undergoing a quite legitimate writeback.
554 * Otherwise, it is journaled, and we don't expect dirty buffers 554 * Otherwise, it is journaled, and we don't expect dirty buffers
@@ -566,7 +566,7 @@ repeat:
566 */ 566 */
567 if (jh->b_transaction) { 567 if (jh->b_transaction) {
568 J_ASSERT_JH(jh, 568 J_ASSERT_JH(jh,
569 jh->b_transaction == transaction || 569 jh->b_transaction == transaction ||
570 jh->b_transaction == 570 jh->b_transaction ==
571 journal->j_committing_transaction); 571 journal->j_committing_transaction);
572 if (jh->b_next_transaction) 572 if (jh->b_next_transaction)
@@ -580,7 +580,7 @@ repeat:
580 */ 580 */
581 JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 581 JBUFFER_TRACE(jh, "Unexpected dirty buffer");
582 jbd_unexpected_dirty_buffer(jh); 582 jbd_unexpected_dirty_buffer(jh);
583 } 583 }
584 584
585 unlock_buffer(bh); 585 unlock_buffer(bh);
586 586
@@ -653,7 +653,7 @@ repeat:
653 * buffer had better remain locked during the kmalloc, 653 * buffer had better remain locked during the kmalloc,
654 * but that should be true --- we hold the journal lock 654 * but that should be true --- we hold the journal lock
655 * still and the buffer is already on the BUF_JOURNAL 655 * still and the buffer is already on the BUF_JOURNAL
656 * list so won't be flushed. 656 * list so won't be flushed.
657 * 657 *
658 * Subtle point, though: if this is a get_undo_access, 658 * Subtle point, though: if this is a get_undo_access,
659 * then we will be relying on the frozen_data to contain 659 * then we will be relying on the frozen_data to contain
@@ -765,8 +765,8 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
765 * manually rather than reading off disk), then we need to keep the 765 * manually rather than reading off disk), then we need to keep the
766 * buffer_head locked until it has been completely filled with new 766 * buffer_head locked until it has been completely filled with new
767 * data. In this case, we should be able to make the assertion that 767 * data. In this case, we should be able to make the assertion that
768 * the bh is not already part of an existing transaction. 768 * the bh is not already part of an existing transaction.
769 * 769 *
770 * The buffer should already be locked by the caller by this point. 770 * The buffer should already be locked by the caller by this point.
771 * There is no lock ranking violation: it was a newly created, 771 * There is no lock ranking violation: it was a newly created,
772 * unlocked buffer beforehand. */ 772 * unlocked buffer beforehand. */
@@ -778,7 +778,7 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
778 * 778 *
779 * Call this if you create a new bh. 779 * Call this if you create a new bh.
780 */ 780 */
781int journal_get_create_access(handle_t *handle, struct buffer_head *bh) 781int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
782{ 782{
783 transaction_t *transaction = handle->h_transaction; 783 transaction_t *transaction = handle->h_transaction;
784 journal_t *journal = transaction->t_journal; 784 journal_t *journal = transaction->t_journal;
@@ -847,13 +847,13 @@ out:
847 * do not reuse freed space until the deallocation has been committed, 847 * do not reuse freed space until the deallocation has been committed,
848 * since if we overwrote that space we would make the delete 848 * since if we overwrote that space we would make the delete
849 * un-rewindable in case of a crash. 849 * un-rewindable in case of a crash.
850 * 850 *
851 * To deal with that, journal_get_undo_access requests write access to a 851 * To deal with that, journal_get_undo_access requests write access to a
852 * buffer for parts of non-rewindable operations such as delete 852 * buffer for parts of non-rewindable operations such as delete
853 * operations on the bitmaps. The journaling code must keep a copy of 853 * operations on the bitmaps. The journaling code must keep a copy of
854 * the buffer's contents prior to the undo_access call until such time 854 * the buffer's contents prior to the undo_access call until such time
855 * as we know that the buffer has definitely been committed to disk. 855 * as we know that the buffer has definitely been committed to disk.
856 * 856 *
857 * We never need to know which transaction the committed data is part 857 * We never need to know which transaction the committed data is part
858 * of, buffers touched here are guaranteed to be dirtied later and so 858 * of, buffers touched here are guaranteed to be dirtied later and so
859 * will be committed to a new transaction in due course, at which point 859 * will be committed to a new transaction in due course, at which point
@@ -911,13 +911,13 @@ out:
911 return err; 911 return err;
912} 912}
913 913
914/** 914/**
915 * int journal_dirty_data() - mark a buffer as containing dirty data which 915 * int journal_dirty_data() - mark a buffer as containing dirty data which
916 * needs to be flushed before we can commit the 916 * needs to be flushed before we can commit the
917 * current transaction. 917 * current transaction.
918 * @handle: transaction 918 * @handle: transaction
919 * @bh: bufferhead to mark 919 * @bh: bufferhead to mark
920 * 920 *
921 * The buffer is placed on the transaction's data list and is marked as 921 * The buffer is placed on the transaction's data list and is marked as
922 * belonging to the transaction. 922 * belonging to the transaction.
923 * 923 *
@@ -946,15 +946,15 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
946 946
947 /* 947 /*
948 * What if the buffer is already part of a running transaction? 948 * What if the buffer is already part of a running transaction?
949 * 949 *
950 * There are two cases: 950 * There are two cases:
951 * 1) It is part of the current running transaction. Refile it, 951 * 1) It is part of the current running transaction. Refile it,
952 * just in case we have allocated it as metadata, deallocated 952 * just in case we have allocated it as metadata, deallocated
953 * it, then reallocated it as data. 953 * it, then reallocated it as data.
954 * 2) It is part of the previous, still-committing transaction. 954 * 2) It is part of the previous, still-committing transaction.
955 * If all we want to do is to guarantee that the buffer will be 955 * If all we want to do is to guarantee that the buffer will be
956 * written to disk before this new transaction commits, then 956 * written to disk before this new transaction commits, then
957 * being sure that the *previous* transaction has this same 957 * being sure that the *previous* transaction has this same
958 * property is sufficient for us! Just leave it on its old 958 * property is sufficient for us! Just leave it on its old
959 * transaction. 959 * transaction.
960 * 960 *
@@ -1076,18 +1076,18 @@ no_journal:
1076 return 0; 1076 return 0;
1077} 1077}
1078 1078
1079/** 1079/**
1080 * int journal_dirty_metadata() - mark a buffer as containing dirty metadata 1080 * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
1081 * @handle: transaction to add buffer to. 1081 * @handle: transaction to add buffer to.
1082 * @bh: buffer to mark 1082 * @bh: buffer to mark
1083 * 1083 *
1084 * mark dirty metadata which needs to be journaled as part of the current 1084 * mark dirty metadata which needs to be journaled as part of the current
1085 * transaction. 1085 * transaction.
1086 * 1086 *
1087 * The buffer is placed on the transaction's metadata list and is marked 1087 * The buffer is placed on the transaction's metadata list and is marked
1088 * as belonging to the transaction. 1088 * as belonging to the transaction.
1089 * 1089 *
1090 * Returns error number or 0 on success. 1090 * Returns error number or 0 on success.
1091 * 1091 *
1092 * Special care needs to be taken if the buffer already belongs to the 1092 * Special care needs to be taken if the buffer already belongs to the
1093 * current committing transaction (in which case we should have frozen 1093 * current committing transaction (in which case we should have frozen
@@ -1135,11 +1135,11 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1135 1135
1136 set_buffer_jbddirty(bh); 1136 set_buffer_jbddirty(bh);
1137 1137
1138 /* 1138 /*
1139 * Metadata already on the current transaction list doesn't 1139 * Metadata already on the current transaction list doesn't
1140 * need to be filed. Metadata on another transaction's list must 1140 * need to be filed. Metadata on another transaction's list must
1141 * be committing, and will be refiled once the commit completes: 1141 * be committing, and will be refiled once the commit completes:
1142 * leave it alone for now. 1142 * leave it alone for now.
1143 */ 1143 */
1144 if (jh->b_transaction != transaction) { 1144 if (jh->b_transaction != transaction) {
1145 JBUFFER_TRACE(jh, "already on other transaction"); 1145 JBUFFER_TRACE(jh, "already on other transaction");
@@ -1165,7 +1165,7 @@ out:
1165 return 0; 1165 return 0;
1166} 1166}
1167 1167
1168/* 1168/*
1169 * journal_release_buffer: undo a get_write_access without any buffer 1169 * journal_release_buffer: undo a get_write_access without any buffer
1170 * updates, if the update decided in the end that it didn't need access. 1170 * updates, if the update decided in the end that it didn't need access.
1171 * 1171 *
@@ -1176,20 +1176,20 @@ journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1176 BUFFER_TRACE(bh, "entry"); 1176 BUFFER_TRACE(bh, "entry");
1177} 1177}
1178 1178
1179/** 1179/**
1180 * void journal_forget() - bforget() for potentially-journaled buffers. 1180 * void journal_forget() - bforget() for potentially-journaled buffers.
1181 * @handle: transaction handle 1181 * @handle: transaction handle
1182 * @bh: bh to 'forget' 1182 * @bh: bh to 'forget'
1183 * 1183 *
1184 * We can only do the bforget if there are no commits pending against the 1184 * We can only do the bforget if there are no commits pending against the
1185 * buffer. If the buffer is dirty in the current running transaction we 1185 * buffer. If the buffer is dirty in the current running transaction we
1186 * can safely unlink it. 1186 * can safely unlink it.
1187 * 1187 *
1188 * bh may not be a journalled buffer at all - it may be a non-JBD 1188 * bh may not be a journalled buffer at all - it may be a non-JBD
1189 * buffer which came off the hashtable. Check for this. 1189 * buffer which came off the hashtable. Check for this.
1190 * 1190 *
1191 * Decrements bh->b_count by one. 1191 * Decrements bh->b_count by one.
1192 * 1192 *
1193 * Allow this call even if the handle has aborted --- it may be part of 1193 * Allow this call even if the handle has aborted --- it may be part of
1194 * the caller's cleanup after an abort. 1194 * the caller's cleanup after an abort.
1195 */ 1195 */
@@ -1237,7 +1237,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1237 1237
1238 drop_reserve = 1; 1238 drop_reserve = 1;
1239 1239
1240 /* 1240 /*
1241 * We are no longer going to journal this buffer. 1241 * We are no longer going to journal this buffer.
1242 * However, the commit of this transaction is still 1242 * However, the commit of this transaction is still
1243 * important to the buffer: the delete that we are now 1243 * important to the buffer: the delete that we are now
@@ -1246,7 +1246,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1246 * 1246 *
1247 * So, if we have a checkpoint on the buffer, we should 1247 * So, if we have a checkpoint on the buffer, we should
1248 * now refile the buffer on our BJ_Forget list so that 1248 * now refile the buffer on our BJ_Forget list so that
1249 * we know to remove the checkpoint after we commit. 1249 * we know to remove the checkpoint after we commit.
1250 */ 1250 */
1251 1251
1252 if (jh->b_cp_transaction) { 1252 if (jh->b_cp_transaction) {
@@ -1264,7 +1264,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1264 } 1264 }
1265 } 1265 }
1266 } else if (jh->b_transaction) { 1266 } else if (jh->b_transaction) {
1267 J_ASSERT_JH(jh, (jh->b_transaction == 1267 J_ASSERT_JH(jh, (jh->b_transaction ==
1268 journal->j_committing_transaction)); 1268 journal->j_committing_transaction));
1269 /* However, if the buffer is still owned by a prior 1269 /* However, if the buffer is still owned by a prior
1270 * (committing) transaction, we can't drop it yet... */ 1270 * (committing) transaction, we can't drop it yet... */
@@ -1294,7 +1294,7 @@ drop:
1294/** 1294/**
1295 * int journal_stop() - complete a transaction 1295 * int journal_stop() - complete a transaction
1296 * @handle: tranaction to complete. 1296 * @handle: tranaction to complete.
1297 * 1297 *
1298 * All done for a particular handle. 1298 * All done for a particular handle.
1299 * 1299 *
1300 * There is not much action needed here. We just return any remaining 1300 * There is not much action needed here. We just return any remaining
@@ -1303,7 +1303,7 @@ drop:
1303 * filesystem is marked for synchronous update. 1303 * filesystem is marked for synchronous update.
1304 * 1304 *
1305 * journal_stop itself will not usually return an error, but it may 1305 * journal_stop itself will not usually return an error, but it may
1306 * do so in unusual circumstances. In particular, expect it to 1306 * do so in unusual circumstances. In particular, expect it to
1307 * return -EIO if a journal_abort has been executed since the 1307 * return -EIO if a journal_abort has been executed since the
1308 * transaction began. 1308 * transaction began.
1309 */ 1309 */
@@ -1373,7 +1373,7 @@ int journal_stop(handle_t *handle)
1373 if (handle->h_sync || 1373 if (handle->h_sync ||
1374 transaction->t_outstanding_credits > 1374 transaction->t_outstanding_credits >
1375 journal->j_max_transaction_buffers || 1375 journal->j_max_transaction_buffers ||
1376 time_after_eq(jiffies, transaction->t_expires)) { 1376 time_after_eq(jiffies, transaction->t_expires)) {
1377 /* Do this even for aborted journals: an abort still 1377 /* Do this even for aborted journals: an abort still
1378 * completes the commit thread, it just doesn't write 1378 * completes the commit thread, it just doesn't write
1379 * anything to disk. */ 1379 * anything to disk. */
@@ -1388,7 +1388,7 @@ int journal_stop(handle_t *handle)
1388 1388
1389 /* 1389 /*
1390 * Special case: JFS_SYNC synchronous updates require us 1390 * Special case: JFS_SYNC synchronous updates require us
1391 * to wait for the commit to complete. 1391 * to wait for the commit to complete.
1392 */ 1392 */
1393 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1393 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1394 err = log_wait_commit(journal, tid); 1394 err = log_wait_commit(journal, tid);
@@ -1439,7 +1439,7 @@ int journal_force_commit(journal_t *journal)
1439 * jbd_lock_bh_state(jh2bh(jh)) is held. 1439 * jbd_lock_bh_state(jh2bh(jh)) is held.
1440 */ 1440 */
1441 1441
1442static inline void 1442static inline void
1443__blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1443__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1444{ 1444{
1445 if (!*list) { 1445 if (!*list) {
@@ -1454,7 +1454,7 @@ __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1454 } 1454 }
1455} 1455}
1456 1456
1457/* 1457/*
1458 * Remove a buffer from a transaction list, given the transaction's list 1458 * Remove a buffer from a transaction list, given the transaction's list
1459 * head pointer. 1459 * head pointer.
1460 * 1460 *
@@ -1475,7 +1475,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1475 jh->b_tnext->b_tprev = jh->b_tprev; 1475 jh->b_tnext->b_tprev = jh->b_tprev;
1476} 1476}
1477 1477
1478/* 1478/*
1479 * Remove a buffer from the appropriate transaction list. 1479 * Remove a buffer from the appropriate transaction list.
1480 * 1480 *
1481 * Note that this function can *change* the value of 1481 * Note that this function can *change* the value of
@@ -1595,17 +1595,17 @@ out:
1595} 1595}
1596 1596
1597 1597
1598/** 1598/**
1599 * int journal_try_to_free_buffers() - try to free page buffers. 1599 * int journal_try_to_free_buffers() - try to free page buffers.
1600 * @journal: journal for operation 1600 * @journal: journal for operation
1601 * @page: to try and free 1601 * @page: to try and free
1602 * @unused_gfp_mask: unused 1602 * @unused_gfp_mask: unused
1603 * 1603 *
1604 * 1604 *
1605 * For all the buffers on this page, 1605 * For all the buffers on this page,
1606 * if they are fully written out ordered data, move them onto BUF_CLEAN 1606 * if they are fully written out ordered data, move them onto BUF_CLEAN
1607 * so try_to_free_buffers() can reap them. 1607 * so try_to_free_buffers() can reap them.
1608 * 1608 *
1609 * This function returns non-zero if we wish try_to_free_buffers() 1609 * This function returns non-zero if we wish try_to_free_buffers()
1610 * to be called. We do this if the page is releasable by try_to_free_buffers(). 1610 * to be called. We do this if the page is releasable by try_to_free_buffers().
1611 * We also do it if the page has locked or dirty buffers and the caller wants 1611 * We also do it if the page has locked or dirty buffers and the caller wants
@@ -1629,7 +1629,7 @@ out:
1629 * cannot happen because we never reallocate freed data as metadata 1629 * cannot happen because we never reallocate freed data as metadata
1630 * while the data is part of a transaction. Yes? 1630 * while the data is part of a transaction. Yes?
1631 */ 1631 */
1632int journal_try_to_free_buffers(journal_t *journal, 1632int journal_try_to_free_buffers(journal_t *journal,
1633 struct page *page, gfp_t unused_gfp_mask) 1633 struct page *page, gfp_t unused_gfp_mask)
1634{ 1634{
1635 struct buffer_head *head; 1635 struct buffer_head *head;
@@ -1697,7 +1697,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1697} 1697}
1698 1698
1699/* 1699/*
1700 * journal_invalidatepage 1700 * journal_invalidatepage
1701 * 1701 *
1702 * This code is tricky. It has a number of cases to deal with. 1702 * This code is tricky. It has a number of cases to deal with.
1703 * 1703 *
@@ -1705,15 +1705,15 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1705 * 1705 *
1706 * i_size must be updated on disk before we start calling invalidatepage on the 1706 * i_size must be updated on disk before we start calling invalidatepage on the
1707 * data. 1707 * data.
1708 * 1708 *
1709 * This is done in ext3 by defining an ext3_setattr method which 1709 * This is done in ext3 by defining an ext3_setattr method which
1710 * updates i_size before truncate gets going. By maintaining this 1710 * updates i_size before truncate gets going. By maintaining this
1711 * invariant, we can be sure that it is safe to throw away any buffers 1711 * invariant, we can be sure that it is safe to throw away any buffers
1712 * attached to the current transaction: once the transaction commits, 1712 * attached to the current transaction: once the transaction commits,
1713 * we know that the data will not be needed. 1713 * we know that the data will not be needed.
1714 * 1714 *
1715 * Note however that we can *not* throw away data belonging to the 1715 * Note however that we can *not* throw away data belonging to the
1716 * previous, committing transaction! 1716 * previous, committing transaction!
1717 * 1717 *
1718 * Any disk blocks which *are* part of the previous, committing 1718 * Any disk blocks which *are* part of the previous, committing
1719 * transaction (and which therefore cannot be discarded immediately) are 1719 * transaction (and which therefore cannot be discarded immediately) are
@@ -1732,7 +1732,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1732 * don't make guarantees about the order in which data hits disk --- in 1732 * don't make guarantees about the order in which data hits disk --- in
1733 * particular we don't guarantee that new dirty data is flushed before 1733 * particular we don't guarantee that new dirty data is flushed before
1734 * transaction commit --- so it is always safe just to discard data 1734 * transaction commit --- so it is always safe just to discard data
1735 * immediately in that mode. --sct 1735 * immediately in that mode. --sct
1736 */ 1736 */
1737 1737
1738/* 1738/*
@@ -1876,9 +1876,9 @@ zap_buffer_unlocked:
1876 return may_free; 1876 return may_free;
1877} 1877}
1878 1878
1879/** 1879/**
1880 * void journal_invalidatepage() 1880 * void journal_invalidatepage()
1881 * @journal: journal to use for flush... 1881 * @journal: journal to use for flush...
1882 * @page: page to flush 1882 * @page: page to flush
1883 * @offset: length of page to invalidate. 1883 * @offset: length of page to invalidate.
1884 * 1884 *
@@ -1886,7 +1886,7 @@ zap_buffer_unlocked:
1886 * 1886 *
1887 */ 1887 */
1888void journal_invalidatepage(journal_t *journal, 1888void journal_invalidatepage(journal_t *journal,
1889 struct page *page, 1889 struct page *page,
1890 unsigned long offset) 1890 unsigned long offset)
1891{ 1891{
1892 struct buffer_head *head, *bh, *next; 1892 struct buffer_head *head, *bh, *next;
@@ -1908,7 +1908,7 @@ void journal_invalidatepage(journal_t *journal,
1908 next = bh->b_this_page; 1908 next = bh->b_this_page;
1909 1909
1910 if (offset <= curr_off) { 1910 if (offset <= curr_off) {
1911 /* This block is wholly outside the truncation point */ 1911 /* This block is wholly outside the truncation point */
1912 lock_buffer(bh); 1912 lock_buffer(bh);
1913 may_free &= journal_unmap_buffer(journal, bh); 1913 may_free &= journal_unmap_buffer(journal, bh);
1914 unlock_buffer(bh); 1914 unlock_buffer(bh);
@@ -1924,8 +1924,8 @@ void journal_invalidatepage(journal_t *journal,
1924 } 1924 }
1925} 1925}
1926 1926
1927/* 1927/*
1928 * File a buffer on the given transaction list. 1928 * File a buffer on the given transaction list.
1929 */ 1929 */
1930void __journal_file_buffer(struct journal_head *jh, 1930void __journal_file_buffer(struct journal_head *jh,
1931 transaction_t *transaction, int jlist) 1931 transaction_t *transaction, int jlist)
@@ -1948,7 +1948,7 @@ void __journal_file_buffer(struct journal_head *jh,
1948 * with __jbd_unexpected_dirty_buffer()'s handling of dirty 1948 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1949 * state. */ 1949 * state. */
1950 1950
1951 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 1951 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1952 jlist == BJ_Shadow || jlist == BJ_Forget) { 1952 jlist == BJ_Shadow || jlist == BJ_Forget) {
1953 if (test_clear_buffer_dirty(bh) || 1953 if (test_clear_buffer_dirty(bh) ||
1954 test_clear_buffer_jbddirty(bh)) 1954 test_clear_buffer_jbddirty(bh))
@@ -2008,7 +2008,7 @@ void journal_file_buffer(struct journal_head *jh,
2008 jbd_unlock_bh_state(jh2bh(jh)); 2008 jbd_unlock_bh_state(jh2bh(jh));
2009} 2009}
2010 2010
2011/* 2011/*
2012 * Remove a buffer from its current buffer list in preparation for 2012 * Remove a buffer from its current buffer list in preparation for
2013 * dropping it from its current transaction entirely. If the buffer has 2013 * dropping it from its current transaction entirely. If the buffer has
2014 * already started to be used by a subsequent transaction, refile the 2014 * already started to be used by a subsequent transaction, refile the
@@ -2060,7 +2060,7 @@ void __journal_refile_buffer(struct journal_head *jh)
2060 * to the caller to remove the journal_head if necessary. For the 2060 * to the caller to remove the journal_head if necessary. For the
2061 * unlocked journal_refile_buffer call, the caller isn't going to be 2061 * unlocked journal_refile_buffer call, the caller isn't going to be
2062 * doing anything else to the buffer so we need to do the cleanup 2062 * doing anything else to the buffer so we need to do the cleanup
2063 * ourselves to avoid a jh leak. 2063 * ourselves to avoid a jh leak.
2064 * 2064 *
2065 * *** The journal_head may be freed by this call! *** 2065 * *** The journal_head may be freed by this call! ***
2066 */ 2066 */
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 93068697a9bf..f5cf9c93e243 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -364,12 +364,11 @@ jffs_new_inode(const struct inode * dir, struct jffs_raw_inode *raw_inode,
364 inode->i_ctime.tv_nsec = 0; 364 inode->i_ctime.tv_nsec = 0;
365 inode->i_mtime.tv_nsec = 0; 365 inode->i_mtime.tv_nsec = 0;
366 inode->i_atime.tv_nsec = 0; 366 inode->i_atime.tv_nsec = 0;
367 inode->i_blksize = PAGE_SIZE;
368 inode->i_blocks = (inode->i_size + 511) >> 9; 367 inode->i_blocks = (inode->i_size + 511) >> 9;
369 368
370 f = jffs_find_file(c, raw_inode->ino); 369 f = jffs_find_file(c, raw_inode->ino);
371 370
372 inode->u.generic_ip = (void *)f; 371 inode->i_private = (void *)f;
373 insert_inode_hash(inode); 372 insert_inode_hash(inode);
374 373
375 return inode; 374 return inode;
@@ -442,7 +441,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
442 }); 441 });
443 442
444 result = -ENOTDIR; 443 result = -ENOTDIR;
445 if (!(old_dir_f = (struct jffs_file *)old_dir->u.generic_ip)) { 444 if (!(old_dir_f = old_dir->i_private)) {
446 D(printk("jffs_rename(): Old dir invalid.\n")); 445 D(printk("jffs_rename(): Old dir invalid.\n"));
447 goto jffs_rename_end; 446 goto jffs_rename_end;
448 } 447 }
@@ -456,7 +455,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
456 455
457 /* Find the new directory. */ 456 /* Find the new directory. */
458 result = -ENOTDIR; 457 result = -ENOTDIR;
459 if (!(new_dir_f = (struct jffs_file *)new_dir->u.generic_ip)) { 458 if (!(new_dir_f = new_dir->i_private)) {
460 D(printk("jffs_rename(): New dir invalid.\n")); 459 D(printk("jffs_rename(): New dir invalid.\n"));
461 goto jffs_rename_end; 460 goto jffs_rename_end;
462 } 461 }
@@ -593,7 +592,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
593 } 592 }
594 else { 593 else {
595 ddino = ((struct jffs_file *) 594 ddino = ((struct jffs_file *)
596 inode->u.generic_ip)->pino; 595 inode->i_private)->pino;
597 } 596 }
598 D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); 597 D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
599 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { 598 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
@@ -604,7 +603,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
604 } 603 }
605 filp->f_pos++; 604 filp->f_pos++;
606 } 605 }
607 f = ((struct jffs_file *)inode->u.generic_ip)->children; 606 f = ((struct jffs_file *)inode->i_private)->children;
608 607
609 j = 2; 608 j = 2;
610 while(f && (f->deleted || j++ < filp->f_pos )) { 609 while(f && (f->deleted || j++ < filp->f_pos )) {
@@ -652,7 +651,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
652 lock_kernel(); 651 lock_kernel();
653 652
654 D3({ 653 D3({
655 char *s = (char *)kmalloc(len + 1, GFP_KERNEL); 654 char *s = kmalloc(len + 1, GFP_KERNEL);
656 memcpy(s, name, len); 655 memcpy(s, name, len);
657 s[len] = '\0'; 656 s[len] = '\0';
658 printk("jffs_lookup(): dir: 0x%p, name: \"%s\"\n", dir, s); 657 printk("jffs_lookup(): dir: 0x%p, name: \"%s\"\n", dir, s);
@@ -668,7 +667,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
668 } 667 }
669 668
670 r = -EACCES; 669 r = -EACCES;
671 if (!(d = (struct jffs_file *)dir->u.generic_ip)) { 670 if (!(d = (struct jffs_file *)dir->i_private)) {
672 D(printk("jffs_lookup(): No such inode! (%lu)\n", 671 D(printk("jffs_lookup(): No such inode! (%lu)\n",
673 dir->i_ino)); 672 dir->i_ino));
674 goto jffs_lookup_end; 673 goto jffs_lookup_end;
@@ -739,7 +738,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
739 unsigned long read_len; 738 unsigned long read_len;
740 int result; 739 int result;
741 struct inode *inode = (struct inode*)page->mapping->host; 740 struct inode *inode = (struct inode*)page->mapping->host;
742 struct jffs_file *f = (struct jffs_file *)inode->u.generic_ip; 741 struct jffs_file *f = (struct jffs_file *)inode->i_private;
743 struct jffs_control *c = (struct jffs_control *)inode->i_sb->s_fs_info; 742 struct jffs_control *c = (struct jffs_control *)inode->i_sb->s_fs_info;
744 int r; 743 int r;
745 loff_t offset; 744 loff_t offset;
@@ -828,7 +827,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
828 }); 827 });
829 828
830 lock_kernel(); 829 lock_kernel();
831 dir_f = (struct jffs_file *)dir->u.generic_ip; 830 dir_f = dir->i_private;
832 831
833 ASSERT(if (!dir_f) { 832 ASSERT(if (!dir_f) {
834 printk(KERN_ERR "jffs_mkdir(): No reference to a " 833 printk(KERN_ERR "jffs_mkdir(): No reference to a "
@@ -972,7 +971,7 @@ jffs_remove(struct inode *dir, struct dentry *dentry, int type)
972 kfree(_name); 971 kfree(_name);
973 }); 972 });
974 973
975 dir_f = (struct jffs_file *) dir->u.generic_ip; 974 dir_f = dir->i_private;
976 c = dir_f->c; 975 c = dir_f->c;
977 976
978 result = -ENOENT; 977 result = -ENOENT;
@@ -1082,7 +1081,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1082 if (!old_valid_dev(rdev)) 1081 if (!old_valid_dev(rdev))
1083 return -EINVAL; 1082 return -EINVAL;
1084 lock_kernel(); 1083 lock_kernel();
1085 dir_f = (struct jffs_file *)dir->u.generic_ip; 1084 dir_f = dir->i_private;
1086 c = dir_f->c; 1085 c = dir_f->c;
1087 1086
1088 D3(printk (KERN_NOTICE "mknod(): down biglock\n")); 1087 D3(printk (KERN_NOTICE "mknod(): down biglock\n"));
@@ -1173,8 +1172,8 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1173 lock_kernel(); 1172 lock_kernel();
1174 D1({ 1173 D1({
1175 int len = dentry->d_name.len; 1174 int len = dentry->d_name.len;
1176 char *_name = (char *)kmalloc(len + 1, GFP_KERNEL); 1175 char *_name = kmalloc(len + 1, GFP_KERNEL);
1177 char *_symname = (char *)kmalloc(symname_len + 1, GFP_KERNEL); 1176 char *_symname = kmalloc(symname_len + 1, GFP_KERNEL);
1178 memcpy(_name, dentry->d_name.name, len); 1177 memcpy(_name, dentry->d_name.name, len);
1179 _name[len] = '\0'; 1178 _name[len] = '\0';
1180 memcpy(_symname, symname, symname_len); 1179 memcpy(_symname, symname, symname_len);
@@ -1186,7 +1185,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1186 kfree(_symname); 1185 kfree(_symname);
1187 }); 1186 });
1188 1187
1189 dir_f = (struct jffs_file *)dir->u.generic_ip; 1188 dir_f = dir->i_private;
1190 ASSERT(if (!dir_f) { 1189 ASSERT(if (!dir_f) {
1191 printk(KERN_ERR "jffs_symlink(): No reference to a " 1190 printk(KERN_ERR "jffs_symlink(): No reference to a "
1192 "jffs_file struct in inode.\n"); 1191 "jffs_file struct in inode.\n");
@@ -1282,14 +1281,14 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
1282 lock_kernel(); 1281 lock_kernel();
1283 D1({ 1282 D1({
1284 int len = dentry->d_name.len; 1283 int len = dentry->d_name.len;
1285 char *s = (char *)kmalloc(len + 1, GFP_KERNEL); 1284 char *s = kmalloc(len + 1, GFP_KERNEL);
1286 memcpy(s, dentry->d_name.name, len); 1285 memcpy(s, dentry->d_name.name, len);
1287 s[len] = '\0'; 1286 s[len] = '\0';
1288 printk("jffs_create(): dir: 0x%p, name: \"%s\"\n", dir, s); 1287 printk("jffs_create(): dir: 0x%p, name: \"%s\"\n", dir, s);
1289 kfree(s); 1288 kfree(s);
1290 }); 1289 });
1291 1290
1292 dir_f = (struct jffs_file *)dir->u.generic_ip; 1291 dir_f = dir->i_private;
1293 ASSERT(if (!dir_f) { 1292 ASSERT(if (!dir_f) {
1294 printk(KERN_ERR "jffs_create(): No reference to a " 1293 printk(KERN_ERR "jffs_create(): No reference to a "
1295 "jffs_file struct in inode.\n"); 1294 "jffs_file struct in inode.\n");
@@ -1403,9 +1402,9 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
1403 goto out_isem; 1402 goto out_isem;
1404 } 1403 }
1405 1404
1406 if (!(f = (struct jffs_file *)inode->u.generic_ip)) { 1405 if (!(f = inode->i_private)) {
1407 D(printk("jffs_file_write(): inode->u.generic_ip = 0x%p\n", 1406 D(printk("jffs_file_write(): inode->i_private = 0x%p\n",
1408 inode->u.generic_ip)); 1407 inode->i_private));
1409 goto out_isem; 1408 goto out_isem;
1410 } 1409 }
1411 1410
@@ -1693,7 +1692,7 @@ jffs_read_inode(struct inode *inode)
1693 mutex_unlock(&c->fmc->biglock); 1692 mutex_unlock(&c->fmc->biglock);
1694 return; 1693 return;
1695 } 1694 }
1696 inode->u.generic_ip = (void *)f; 1695 inode->i_private = f;
1697 inode->i_mode = f->mode; 1696 inode->i_mode = f->mode;
1698 inode->i_nlink = f->nlink; 1697 inode->i_nlink = f->nlink;
1699 inode->i_uid = f->uid; 1698 inode->i_uid = f->uid;
@@ -1706,7 +1705,6 @@ jffs_read_inode(struct inode *inode)
1706 inode->i_mtime.tv_nsec = 1705 inode->i_mtime.tv_nsec =
1707 inode->i_ctime.tv_nsec = 0; 1706 inode->i_ctime.tv_nsec = 0;
1708 1707
1709 inode->i_blksize = PAGE_SIZE;
1710 inode->i_blocks = (inode->i_size + 511) >> 9; 1708 inode->i_blocks = (inode->i_size + 511) >> 9;
1711 if (S_ISREG(inode->i_mode)) { 1709 if (S_ISREG(inode->i_mode)) {
1712 inode->i_op = &jffs_file_inode_operations; 1710 inode->i_op = &jffs_file_inode_operations;
@@ -1748,7 +1746,7 @@ jffs_delete_inode(struct inode *inode)
1748 lock_kernel(); 1746 lock_kernel();
1749 inode->i_size = 0; 1747 inode->i_size = 0;
1750 inode->i_blocks = 0; 1748 inode->i_blocks = 0;
1751 inode->u.generic_ip = NULL; 1749 inode->i_private = NULL;
1752 clear_inode(inode); 1750 clear_inode(inode);
1753 if (inode->i_nlink == 0) { 1751 if (inode->i_nlink == 0) {
1754 c = (struct jffs_control *) inode->i_sb->s_fs_info; 1752 c = (struct jffs_control *) inode->i_sb->s_fs_info;
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 9000f1effedf..4a543e114970 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -488,13 +488,11 @@ jffs_create_file(struct jffs_control *c,
488{ 488{
489 struct jffs_file *f; 489 struct jffs_file *f;
490 490
491 if (!(f = (struct jffs_file *)kmalloc(sizeof(struct jffs_file), 491 if (!(f = kzalloc(sizeof(*f), GFP_KERNEL))) {
492 GFP_KERNEL))) {
493 D(printk("jffs_create_file(): Failed!\n")); 492 D(printk("jffs_create_file(): Failed!\n"));
494 return NULL; 493 return NULL;
495 } 494 }
496 no_jffs_file++; 495 no_jffs_file++;
497 memset(f, 0, sizeof(struct jffs_file));
498 f->ino = raw_inode->ino; 496 f->ino = raw_inode->ino;
499 f->pino = raw_inode->pino; 497 f->pino = raw_inode->pino;
500 f->nlink = raw_inode->nlink; 498 f->nlink = raw_inode->nlink;
@@ -516,7 +514,7 @@ jffs_create_control(struct super_block *sb)
516 514
517 D2(printk("jffs_create_control()\n")); 515 D2(printk("jffs_create_control()\n"));
518 516
519 if (!(c = (struct jffs_control *)kmalloc(s, GFP_KERNEL))) { 517 if (!(c = kmalloc(s, GFP_KERNEL))) {
520 goto fail_control; 518 goto fail_control;
521 } 519 }
522 DJM(no_jffs_control++); 520 DJM(no_jffs_control++);
@@ -524,7 +522,7 @@ jffs_create_control(struct super_block *sb)
524 c->gc_task = NULL; 522 c->gc_task = NULL;
525 c->hash_len = JFFS_HASH_SIZE; 523 c->hash_len = JFFS_HASH_SIZE;
526 s = sizeof(struct list_head) * c->hash_len; 524 s = sizeof(struct list_head) * c->hash_len;
527 if (!(c->hash = (struct list_head *)kmalloc(s, GFP_KERNEL))) { 525 if (!(c->hash = kmalloc(s, GFP_KERNEL))) {
528 goto fail_hash; 526 goto fail_hash;
529 } 527 }
530 DJM(no_hash++); 528 DJM(no_hash++);
@@ -593,8 +591,7 @@ jffs_add_virtual_root(struct jffs_control *c)
593 D2(printk("jffs_add_virtual_root(): " 591 D2(printk("jffs_add_virtual_root(): "
594 "Creating a virtual root directory.\n")); 592 "Creating a virtual root directory.\n"));
595 593
596 if (!(root = (struct jffs_file *)kmalloc(sizeof(struct jffs_file), 594 if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
597 GFP_KERNEL))) {
598 return -ENOMEM; 595 return -ENOMEM;
599 } 596 }
600 no_jffs_file++; 597 no_jffs_file++;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 7d8ca1aeace2..29b68d939bd9 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -94,8 +94,7 @@ jffs_build_begin(struct jffs_control *c, int unit)
94 struct mtd_info *mtd; 94 struct mtd_info *mtd;
95 95
96 D3(printk("jffs_build_begin()\n")); 96 D3(printk("jffs_build_begin()\n"));
97 fmc = (struct jffs_fmcontrol *)kmalloc(sizeof(struct jffs_fmcontrol), 97 fmc = kmalloc(sizeof(*fmc), GFP_KERNEL);
98 GFP_KERNEL);
99 if (!fmc) { 98 if (!fmc) {
100 D(printk("jffs_build_begin(): Allocation of " 99 D(printk("jffs_build_begin(): Allocation of "
101 "struct jffs_fmcontrol failed!\n")); 100 "struct jffs_fmcontrol failed!\n"));
@@ -486,8 +485,7 @@ jffs_add_node(struct jffs_node *node)
486 485
487 D3(printk("jffs_add_node(): ino = %u\n", node->ino)); 486 D3(printk("jffs_add_node(): ino = %u\n", node->ino));
488 487
489 ref = (struct jffs_node_ref *)kmalloc(sizeof(struct jffs_node_ref), 488 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
490 GFP_KERNEL);
491 if (!ref) 489 if (!ref)
492 return -ENOMEM; 490 return -ENOMEM;
493 491
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 4780f82825d6..72d9909d95ff 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -263,7 +263,6 @@ void jffs2_read_inode (struct inode *inode)
263 263
264 inode->i_nlink = f->inocache->nlink; 264 inode->i_nlink = f->inocache->nlink;
265 265
266 inode->i_blksize = PAGE_SIZE;
267 inode->i_blocks = (inode->i_size + 511) >> 9; 266 inode->i_blocks = (inode->i_size + 511) >> 9;
268 267
269 switch (inode->i_mode & S_IFMT) { 268 switch (inode->i_mode & S_IFMT) {
@@ -449,7 +448,6 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
449 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; 448 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
450 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); 449 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
451 450
452 inode->i_blksize = PAGE_SIZE;
453 inode->i_blocks = 0; 451 inode->i_blocks = 0;
454 inode->i_size = 0; 452 inode->i_size = 0;
455 453
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 68e3953419b4..6de374513c01 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -119,10 +119,9 @@ static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
119 struct jffs2_sb_info *c; 119 struct jffs2_sb_info *c;
120 int ret; 120 int ret;
121 121
122 c = kmalloc(sizeof(*c), GFP_KERNEL); 122 c = kzalloc(sizeof(*c), GFP_KERNEL);
123 if (!c) 123 if (!c)
124 return -ENOMEM; 124 return -ENOMEM;
125 memset(c, 0, sizeof(*c));
126 c->mtd = mtd; 125 c->mtd = mtd;
127 126
128 sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c); 127 sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 4d52593a5fc6..4c74f0944f7e 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -468,7 +468,7 @@ int extRecord(struct inode *ip, xad_t * xp)
468int extFill(struct inode *ip, xad_t * xp) 468int extFill(struct inode *ip, xad_t * xp)
469{ 469{
470 int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage; 470 int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
471 s64 blkno = offsetXAD(xp) >> ip->i_blksize; 471 s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
472 472
473// assert(ISSPARSE(ip)); 473// assert(ISSPARSE(ip));
474 474
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ccbe60aff83d..369d7f39c040 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -3115,7 +3115,6 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
3115 ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec); 3115 ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
3116 ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec); 3116 ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
3117 ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec); 3117 ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
3118 ip->i_blksize = ip->i_sb->s_blocksize;
3119 ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks)); 3118 ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
3120 ip->i_generation = le32_to_cpu(dip->di_gen); 3119 ip->i_generation = le32_to_cpu(dip->di_gen);
3121 3120
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 495df402916d..bffaca9ae3a2 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -115,7 +115,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
115 } 115 }
116 jfs_inode->mode2 |= mode; 116 jfs_inode->mode2 |= mode;
117 117
118 inode->i_blksize = sb->s_blocksize;
119 inode->i_blocks = 0; 118 inode->i_blocks = 0;
120 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 119 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
121 jfs_inode->otime = inode->i_ctime.tv_sec; 120 jfs_inode->otime = inode->i_ctime.tv_sec;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index e1e0a6e6ebdf..f5afc129d6b1 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -257,7 +257,7 @@ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
257 int rc = 0; 257 int rc = 0;
258 int xflag; 258 int xflag;
259 s64 xaddr; 259 s64 xaddr;
260 sector_t file_blocks = (inode->i_size + inode->i_blksize - 1) >> 260 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
261 inode->i_blkbits; 261 inode->i_blkbits;
262 262
263 if (lblock >= file_blocks) 263 if (lblock >= file_blocks)
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index efbb586bed4b..3856efc399c1 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -282,7 +282,7 @@ int txInit(void)
282 TxLockVHWM = (nTxLock * 8) / 10; 282 TxLockVHWM = (nTxLock * 8) / 10;
283 283
284 size = sizeof(struct tblock) * nTxBlock; 284 size = sizeof(struct tblock) * nTxBlock;
285 TxBlock = (struct tblock *) vmalloc(size); 285 TxBlock = vmalloc(size);
286 if (TxBlock == NULL) 286 if (TxBlock == NULL)
287 return -ENOMEM; 287 return -ENOMEM;
288 288
@@ -307,7 +307,7 @@ int txInit(void)
307 * tlock id = 0 is reserved. 307 * tlock id = 0 is reserved.
308 */ 308 */
309 size = sizeof(struct tlock) * nTxLock; 309 size = sizeof(struct tlock) * nTxLock;
310 TxLock = (struct tlock *) vmalloc(size); 310 TxLock = vmalloc(size);
311 if (TxLock == NULL) { 311 if (TxLock == NULL) {
312 vfree(TxBlock); 312 vfree(TxBlock);
313 return -ENOMEM; 313 return -ENOMEM;
diff --git a/fs/libfs.c b/fs/libfs.c
index ac02ea602c3d..8db5afb7b0a7 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -383,7 +383,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files
383 return -ENOMEM; 383 return -ENOMEM;
384 inode->i_mode = S_IFDIR | 0755; 384 inode->i_mode = S_IFDIR | 0755;
385 inode->i_uid = inode->i_gid = 0; 385 inode->i_uid = inode->i_gid = 0;
386 inode->i_blksize = PAGE_CACHE_SIZE;
387 inode->i_blocks = 0; 386 inode->i_blocks = 0;
388 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 387 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
389 inode->i_op = &simple_dir_inode_operations; 388 inode->i_op = &simple_dir_inode_operations;
@@ -405,7 +404,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files
405 goto out; 404 goto out;
406 inode->i_mode = S_IFREG | files->mode; 405 inode->i_mode = S_IFREG | files->mode;
407 inode->i_uid = inode->i_gid = 0; 406 inode->i_uid = inode->i_gid = 0;
408 inode->i_blksize = PAGE_CACHE_SIZE;
409 inode->i_blocks = 0; 407 inode->i_blocks = 0;
410 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 408 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
411 inode->i_fop = files->ops; 409 inode->i_fop = files->ops;
@@ -547,7 +545,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
547 545
548 attr->get = get; 546 attr->get = get;
549 attr->set = set; 547 attr->set = set;
550 attr->data = inode->u.generic_ip; 548 attr->data = inode->i_private;
551 attr->fmt = fmt; 549 attr->fmt = fmt;
552 mutex_init(&attr->mutex); 550 mutex_init(&attr->mutex);
553 551
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 52774feab93f..f95cc3f3c42d 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -160,7 +160,7 @@ static void nlmclnt_prepare_reclaim(struct nlm_host *host)
160 */ 160 */
161 list_splice_init(&host->h_granted, &host->h_reclaim); 161 list_splice_init(&host->h_granted, &host->h_reclaim);
162 162
163 dprintk("NLM: reclaiming locks for host %s", host->h_name); 163 dprintk("NLM: reclaiming locks for host %s\n", host->h_name);
164} 164}
165 165
166static void nlmclnt_finish_reclaim(struct nlm_host *host) 166static void nlmclnt_finish_reclaim(struct nlm_host *host)
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 50dbb67ae0c4..271e2165fff6 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -100,7 +100,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
100 res = __nlm_find_lockowner(host, owner); 100 res = __nlm_find_lockowner(host, owner);
101 if (res == NULL) { 101 if (res == NULL) {
102 spin_unlock(&host->h_lock); 102 spin_unlock(&host->h_lock);
103 new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL); 103 new = kmalloc(sizeof(*new), GFP_KERNEL);
104 spin_lock(&host->h_lock); 104 spin_lock(&host->h_lock);
105 res = __nlm_find_lockowner(host, owner); 105 res = __nlm_find_lockowner(host, owner);
106 if (res == NULL && new != NULL) { 106 if (res == NULL && new != NULL) {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 703fb038c813..a0d0b58ce7a4 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -99,9 +99,9 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
99 /* Ooops, no host found, create it */ 99 /* Ooops, no host found, create it */
100 dprintk("lockd: creating host entry\n"); 100 dprintk("lockd: creating host entry\n");
101 101
102 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL))) 102 host = kzalloc(sizeof(*host), GFP_KERNEL);
103 if (!host)
103 goto nohost; 104 goto nohost;
104 memset(host, 0, sizeof(*host));
105 105
106 addr = sin->sin_addr.s_addr; 106 addr = sin->sin_addr.s_addr;
107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); 107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 01b4db9e5466..a92dd98f8401 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -100,11 +100,10 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
100 nlm_debug_print_fh("creating file for", f); 100 nlm_debug_print_fh("creating file for", f);
101 101
102 nfserr = nlm_lck_denied_nolocks; 102 nfserr = nlm_lck_denied_nolocks;
103 file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL); 103 file = kzalloc(sizeof(*file), GFP_KERNEL);
104 if (!file) 104 if (!file)
105 goto out_unlock; 105 goto out_unlock;
106 106
107 memset(file, 0, sizeof(*file));
108 memcpy(&file->f_handle, f, sizeof(struct nfs_fh)); 107 memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
109 file->f_hash = hash; 108 file->f_hash = hash;
110 init_MUTEX(&file->f_sema); 109 init_MUTEX(&file->f_sema);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 4a6abc49418e..df6b1075b549 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -254,7 +254,7 @@ struct inode * minix_new_inode(const struct inode * dir, int * error)
254 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid; 254 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
255 inode->i_ino = j; 255 inode->i_ino = j;
256 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 256 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
257 inode->i_blocks = inode->i_blksize = 0; 257 inode->i_blocks = 0;
258 memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u)); 258 memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
259 insert_inode_hash(inode); 259 insert_inode_hash(inode);
260 mark_inode_dirty(inode); 260 mark_inode_dirty(inode);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 330ff9fc7cf0..c11a4b9fb863 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -90,8 +90,7 @@ static int init_inodecache(void)
90 90
91static void destroy_inodecache(void) 91static void destroy_inodecache(void)
92{ 92{
93 if (kmem_cache_destroy(minix_inode_cachep)) 93 kmem_cache_destroy(minix_inode_cachep);
94 printk(KERN_INFO "minix_inode_cache: not all structures were freed\n");
95} 94}
96 95
97static struct super_operations minix_sops = { 96static struct super_operations minix_sops = {
@@ -145,11 +144,10 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
145 struct inode *root_inode; 144 struct inode *root_inode;
146 struct minix_sb_info *sbi; 145 struct minix_sb_info *sbi;
147 146
148 sbi = kmalloc(sizeof(struct minix_sb_info), GFP_KERNEL); 147 sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
149 if (!sbi) 148 if (!sbi)
150 return -ENOMEM; 149 return -ENOMEM;
151 s->s_fs_info = sbi; 150 s->s_fs_info = sbi;
152 memset(sbi, 0, sizeof(struct minix_sb_info));
153 151
154 /* N.B. These should be compile-time tests. 152 /* N.B. These should be compile-time tests.
155 Unfortunately that is impossible. */ 153 Unfortunately that is impossible. */
@@ -207,10 +205,9 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
207 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) 205 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
208 goto out_illegal_sb; 206 goto out_illegal_sb;
209 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); 207 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
210 map = kmalloc(i, GFP_KERNEL); 208 map = kzalloc(i, GFP_KERNEL);
211 if (!map) 209 if (!map)
212 goto out_no_map; 210 goto out_no_map;
213 memset(map, 0, i);
214 sbi->s_imap = &map[0]; 211 sbi->s_imap = &map[0];
215 sbi->s_zmap = &map[sbi->s_imap_blocks]; 212 sbi->s_zmap = &map[sbi->s_imap_blocks];
216 213
@@ -399,7 +396,7 @@ static void V1_minix_read_inode(struct inode * inode)
399 inode->i_mtime.tv_nsec = 0; 396 inode->i_mtime.tv_nsec = 0;
400 inode->i_atime.tv_nsec = 0; 397 inode->i_atime.tv_nsec = 0;
401 inode->i_ctime.tv_nsec = 0; 398 inode->i_ctime.tv_nsec = 0;
402 inode->i_blocks = inode->i_blksize = 0; 399 inode->i_blocks = 0;
403 for (i = 0; i < 9; i++) 400 for (i = 0; i < 9; i++)
404 minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; 401 minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
405 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 402 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
@@ -432,7 +429,7 @@ static void V2_minix_read_inode(struct inode * inode)
432 inode->i_mtime.tv_nsec = 0; 429 inode->i_mtime.tv_nsec = 0;
433 inode->i_atime.tv_nsec = 0; 430 inode->i_atime.tv_nsec = 0;
434 inode->i_ctime.tv_nsec = 0; 431 inode->i_ctime.tv_nsec = 0;
435 inode->i_blocks = inode->i_blksize = 0; 432 inode->i_blocks = 0;
436 for (i = 0; i < 10; i++) 433 for (i = 0; i < 10; i++)
437 minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; 434 minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
438 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 435 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
diff --git a/fs/namei.c b/fs/namei.c
index 6b591c01b09f..808e4ea2bb94 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -372,6 +372,30 @@ void release_open_intent(struct nameidata *nd)
372 fput(nd->intent.open.file); 372 fput(nd->intent.open.file);
373} 373}
374 374
375static inline struct dentry *
376do_revalidate(struct dentry *dentry, struct nameidata *nd)
377{
378 int status = dentry->d_op->d_revalidate(dentry, nd);
379 if (unlikely(status <= 0)) {
380 /*
381 * The dentry failed validation.
382 * If d_revalidate returned 0 attempt to invalidate
383 * the dentry otherwise d_revalidate is asking us
384 * to return a fail status.
385 */
386 if (!status) {
387 if (!d_invalidate(dentry)) {
388 dput(dentry);
389 dentry = NULL;
390 }
391 } else {
392 dput(dentry);
393 dentry = ERR_PTR(status);
394 }
395 }
396 return dentry;
397}
398
375/* 399/*
376 * Internal lookup() using the new generic dcache. 400 * Internal lookup() using the new generic dcache.
377 * SMP-safe 401 * SMP-safe
@@ -386,12 +410,9 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name,
386 if (!dentry) 410 if (!dentry)
387 dentry = d_lookup(parent, name); 411 dentry = d_lookup(parent, name);
388 412
389 if (dentry && dentry->d_op && dentry->d_op->d_revalidate) { 413 if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
390 if (!dentry->d_op->d_revalidate(dentry, nd) && !d_invalidate(dentry)) { 414 dentry = do_revalidate(dentry, nd);
391 dput(dentry); 415
392 dentry = NULL;
393 }
394 }
395 return dentry; 416 return dentry;
396} 417}
397 418
@@ -484,10 +505,9 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
484 */ 505 */
485 mutex_unlock(&dir->i_mutex); 506 mutex_unlock(&dir->i_mutex);
486 if (result->d_op && result->d_op->d_revalidate) { 507 if (result->d_op && result->d_op->d_revalidate) {
487 if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) { 508 result = do_revalidate(result, nd);
488 dput(result); 509 if (!result)
489 result = ERR_PTR(-ENOENT); 510 result = ERR_PTR(-ENOENT);
490 }
491 } 511 }
492 return result; 512 return result;
493} 513}
@@ -767,12 +787,12 @@ need_lookup:
767 goto done; 787 goto done;
768 788
769need_revalidate: 789need_revalidate:
770 if (dentry->d_op->d_revalidate(dentry, nd)) 790 dentry = do_revalidate(dentry, nd);
771 goto done; 791 if (!dentry)
772 if (d_invalidate(dentry)) 792 goto need_lookup;
773 goto done; 793 if (IS_ERR(dentry))
774 dput(dentry); 794 goto fail;
775 goto need_lookup; 795 goto done;
776 796
777fail: 797fail:
778 return PTR_ERR(dentry); 798 return PTR_ERR(dentry);
diff --git a/fs/namespace.c b/fs/namespace.c
index fa7ed6a9fc2d..36d180858136 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -17,6 +17,7 @@
17#include <linux/acct.h> 17#include <linux/acct.h>
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/sysfs.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
21#include <linux/namespace.h> 22#include <linux/namespace.h>
22#include <linux/namei.h> 23#include <linux/namei.h>
@@ -28,15 +29,6 @@
28 29
29extern int __init init_rootfs(void); 30extern int __init init_rootfs(void);
30 31
31#ifdef CONFIG_SYSFS
32extern int __init sysfs_init(void);
33#else
34static inline int sysfs_init(void)
35{
36 return 0;
37}
38#endif
39
40/* spinlock for vfsmount related operations, inplace of dcache_lock */ 32/* spinlock for vfsmount related operations, inplace of dcache_lock */
41__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 33__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
42 34
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 1ddf77b0b825..42e3bef270c9 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -81,8 +81,7 @@ static int init_inodecache(void)
81 81
82static void destroy_inodecache(void) 82static void destroy_inodecache(void)
83{ 83{
84 if (kmem_cache_destroy(ncp_inode_cachep)) 84 kmem_cache_destroy(ncp_inode_cachep);
85 printk(KERN_INFO "ncp_inode_cache: not all structures were freed\n");
86} 85}
87 86
88static int ncp_remount(struct super_block *sb, int *flags, char* data) 87static int ncp_remount(struct super_block *sb, int *flags, char* data)
@@ -224,7 +223,6 @@ static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
224 inode->i_nlink = 1; 223 inode->i_nlink = 1;
225 inode->i_uid = server->m.uid; 224 inode->i_uid = server->m.uid;
226 inode->i_gid = server->m.gid; 225 inode->i_gid = server->m.gid;
227 inode->i_blksize = NCP_BLOCK_SIZE;
228 226
229 ncp_update_dates(inode, &nwinfo->i); 227 ncp_update_dates(inode, &nwinfo->i);
230 ncp_update_inode(inode, nwinfo); 228 ncp_update_inode(inode, nwinfo);
@@ -411,11 +409,10 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
411#endif 409#endif
412 struct ncp_entry_info finfo; 410 struct ncp_entry_info finfo;
413 411
414 server = kmalloc(sizeof(struct ncp_server), GFP_KERNEL); 412 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
415 if (!server) 413 if (!server)
416 return -ENOMEM; 414 return -ENOMEM;
417 sb->s_fs_info = server; 415 sb->s_fs_info = server;
418 memset(server, 0, sizeof(struct ncp_server));
419 416
420 error = -EFAULT; 417 error = -EFAULT;
421 if (raw_data == NULL) 418 if (raw_data == NULL)
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index ca92c2406635..e3d26c1bd105 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -48,7 +48,7 @@ static int ncp_symlink_readpage(struct file *file, struct page *page)
48 char *buf = kmap(page); 48 char *buf = kmap(page);
49 49
50 error = -ENOMEM; 50 error = -ENOMEM;
51 rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); 51 rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
52 if (!rawlink) 52 if (!rawlink)
53 goto fail; 53 goto fail;
54 54
@@ -126,7 +126,7 @@ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
126 /* EPERM is returned by VFS if symlink procedure does not exist */ 126 /* EPERM is returned by VFS if symlink procedure does not exist */
127 return -EPERM; 127 return -EPERM;
128 128
129 rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); 129 rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
130 if (!rawlink) 130 if (!rawlink)
131 return -ENOMEM; 131 return -ENOMEM;
132 132
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 57133678db16..841c99a9b11c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -20,11 +20,6 @@
20#include "delegation.h" 20#include "delegation.h"
21#include "internal.h" 21#include "internal.h"
22 22
23static struct nfs_delegation *nfs_alloc_delegation(void)
24{
25 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
26}
27
28static void nfs_free_delegation(struct nfs_delegation *delegation) 23static void nfs_free_delegation(struct nfs_delegation *delegation)
29{ 24{
30 if (delegation->cred) 25 if (delegation->cred)
@@ -124,7 +119,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
124 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) 119 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
125 __nfs_revalidate_inode(NFS_SERVER(inode), inode); 120 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
126 121
127 delegation = nfs_alloc_delegation(); 122 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
128 if (delegation == NULL) 123 if (delegation == NULL)
129 return -ENOMEM; 124 return -ENOMEM;
130 memcpy(delegation->stateid.data, res->delegation.data, 125 memcpy(delegation->stateid.data, res->delegation.data,
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 76ca1cbc38f9..377839bed172 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -855,6 +855,5 @@ int __init nfs_init_directcache(void)
855 */ 855 */
856void nfs_destroy_directcache(void) 856void nfs_destroy_directcache(void)
857{ 857{
858 if (kmem_cache_destroy(nfs_direct_cachep)) 858 kmem_cache_destroy(nfs_direct_cachep);
859 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
860} 859}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8c143d182c4..bc9376ca86cd 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -277,10 +277,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
277 * report the blocks in 512byte units 277 * report the blocks in 512byte units
278 */ 278 */
279 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); 279 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
280 inode->i_blksize = inode->i_sb->s_blocksize;
281 } else { 280 } else {
282 inode->i_blocks = fattr->du.nfs2.blocks; 281 inode->i_blocks = fattr->du.nfs2.blocks;
283 inode->i_blksize = fattr->du.nfs2.blocksize;
284 } 282 }
285 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 283 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
286 nfsi->attrtimeo_timestamp = jiffies; 284 nfsi->attrtimeo_timestamp = jiffies;
@@ -443,7 +441,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
443{ 441{
444 struct nfs_open_context *ctx; 442 struct nfs_open_context *ctx;
445 443
446 ctx = (struct nfs_open_context *)kmalloc(sizeof(*ctx), GFP_KERNEL); 444 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
447 if (ctx != NULL) { 445 if (ctx != NULL) {
448 atomic_set(&ctx->count, 1); 446 atomic_set(&ctx->count, 1);
449 ctx->dentry = dget(dentry); 447 ctx->dentry = dget(dentry);
@@ -969,10 +967,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
969 * report the blocks in 512byte units 967 * report the blocks in 512byte units
970 */ 968 */
971 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); 969 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
972 inode->i_blksize = inode->i_sb->s_blocksize;
973 } else { 970 } else {
974 inode->i_blocks = fattr->du.nfs2.blocks; 971 inode->i_blocks = fattr->du.nfs2.blocks;
975 inode->i_blksize = fattr->du.nfs2.blocksize;
976 } 972 }
977 973
978 if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 && 974 if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 &&
@@ -1134,8 +1130,7 @@ static int __init nfs_init_inodecache(void)
1134 1130
1135static void nfs_destroy_inodecache(void) 1131static void nfs_destroy_inodecache(void)
1136{ 1132{
1137 if (kmem_cache_destroy(nfs_inode_cachep)) 1133 kmem_cache_destroy(nfs_inode_cachep);
1138 printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
1139} 1134}
1140 1135
1141/* 1136/*
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 77b00684894d..60408646176b 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,6 +26,11 @@ LIST_HEAD(nfs_automount_list);
26static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); 26static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
27int nfs_mountpoint_expiry_timeout = 500 * HZ; 27int nfs_mountpoint_expiry_timeout = 500 * HZ;
28 28
29static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
30 const struct dentry *dentry,
31 struct nfs_fh *fh,
32 struct nfs_fattr *fattr);
33
29/* 34/*
30 * nfs_path - reconstruct the path given an arbitrary dentry 35 * nfs_path - reconstruct the path given an arbitrary dentry
31 * @base - arbitrary string to prepend to the path 36 * @base - arbitrary string to prepend to the path
@@ -209,9 +214,10 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
209 * @fattr - attributes for new root inode 214 * @fattr - attributes for new root inode
210 * 215 *
211 */ 216 */
212struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, 217static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
213 const struct dentry *dentry, struct nfs_fh *fh, 218 const struct dentry *dentry,
214 struct nfs_fattr *fattr) 219 struct nfs_fh *fh,
220 struct nfs_fattr *fattr)
215{ 221{
216 struct nfs_clone_mount mountdata = { 222 struct nfs_clone_mount mountdata = {
217 .sb = mnt_parent->mnt_sb, 223 .sb = mnt_parent->mnt_sb,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index f8688eaa0001..3b234d4601e7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -449,7 +449,7 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr
449 struct nfs_fattr res; 449 struct nfs_fattr res;
450 } *ptr; 450 } *ptr;
451 451
452 ptr = (struct unlinkxdr *)kmalloc(sizeof(*ptr), GFP_KERNEL); 452 ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
453 if (!ptr) 453 if (!ptr)
454 return -ENOMEM; 454 return -ENOMEM;
455 ptr->arg.fh = NFS_FH(dir->d_inode); 455 ptr->arg.fh = NFS_FH(dir->d_inode);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 36e902a88ca1..829af323f288 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -392,7 +392,6 @@ int __init nfs_init_nfspagecache(void)
392 392
393void nfs_destroy_nfspagecache(void) 393void nfs_destroy_nfspagecache(void)
394{ 394{
395 if (kmem_cache_destroy(nfs_page_cachep)) 395 kmem_cache_destroy(nfs_page_cachep);
396 printk(KERN_INFO "nfs_page: not all structures were freed\n");
397} 396}
398 397
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 630e50647bbb..4529cc4f3f8f 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -352,7 +352,7 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *
352{ 352{
353 struct nfs_diropargs *arg; 353 struct nfs_diropargs *arg;
354 354
355 arg = (struct nfs_diropargs *)kmalloc(sizeof(*arg), GFP_KERNEL); 355 arg = kmalloc(sizeof(*arg), GFP_KERNEL);
356 if (!arg) 356 if (!arg)
357 return -ENOMEM; 357 return -ENOMEM;
358 arg->fh = NFS_FH(dir->d_inode); 358 arg->fh = NFS_FH(dir->d_inode);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 69f1549da2b9..c2e49c397a27 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -737,6 +737,5 @@ int __init nfs_init_readpagecache(void)
737void nfs_destroy_readpagecache(void) 737void nfs_destroy_readpagecache(void)
738{ 738{
739 mempool_destroy(nfs_rdata_mempool); 739 mempool_destroy(nfs_rdata_mempool);
740 if (kmem_cache_destroy(nfs_rdata_cachep)) 740 kmem_cache_destroy(nfs_rdata_cachep);
741 printk(KERN_INFO "nfs_read_data: not all structures were freed\n");
742} 741}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c12effb46fe5..b674462793d3 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1565,7 +1565,6 @@ void nfs_destroy_writepagecache(void)
1565{ 1565{
1566 mempool_destroy(nfs_commit_mempool); 1566 mempool_destroy(nfs_commit_mempool);
1567 mempool_destroy(nfs_wdata_mempool); 1567 mempool_destroy(nfs_wdata_mempool);
1568 if (kmem_cache_destroy(nfs_wdata_cachep)) 1568 kmem_cache_destroy(nfs_wdata_cachep);
1569 printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1570} 1569}
1571 1570
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index bea6b9478114..b1902ebaab41 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -573,10 +573,9 @@ idmap_lookup(struct svc_rqst *rqstp,
573 struct idmap_defer_req *mdr; 573 struct idmap_defer_req *mdr;
574 int ret; 574 int ret;
575 575
576 mdr = kmalloc(sizeof(*mdr), GFP_KERNEL); 576 mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
577 if (!mdr) 577 if (!mdr)
578 return -ENOMEM; 578 return -ENOMEM;
579 memset(mdr, 0, sizeof(*mdr));
580 atomic_set(&mdr->count, 1); 579 atomic_set(&mdr->count, 1);
581 init_waitqueue_head(&mdr->waitq); 580 init_waitqueue_head(&mdr->waitq);
582 mdr->req.defer = idmap_defer; 581 mdr->req.defer = idmap_defer;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9daa0b9feb8d..ebcf226a9e4a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -339,8 +339,7 @@ alloc_client(struct xdr_netobj name)
339{ 339{
340 struct nfs4_client *clp; 340 struct nfs4_client *clp;
341 341
342 if ((clp = kmalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) { 342 if ((clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) {
343 memset(clp, 0, sizeof(*clp));
344 if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) { 343 if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) {
345 memcpy(clp->cl_name.data, name.data, name.len); 344 memcpy(clp->cl_name.data, name.data, name.len);
346 clp->cl_name.len = name.len; 345 clp->cl_name.len = name.len;
@@ -1006,13 +1005,10 @@ alloc_init_file(struct inode *ino)
1006static void 1005static void
1007nfsd4_free_slab(kmem_cache_t **slab) 1006nfsd4_free_slab(kmem_cache_t **slab)
1008{ 1007{
1009 int status;
1010
1011 if (*slab == NULL) 1008 if (*slab == NULL)
1012 return; 1009 return;
1013 status = kmem_cache_destroy(*slab); 1010 kmem_cache_destroy(*slab);
1014 *slab = NULL; 1011 *slab = NULL;
1015 WARN_ON(status);
1016} 1012}
1017 1013
1018static void 1014static void
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index d1e2c6f9f05e..85c36b8ca452 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1149,8 +1149,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1149 * Allocate a buffer to store the current name being processed 1149 * Allocate a buffer to store the current name being processed
1150 * converted to format determined by current NLS. 1150 * converted to format determined by current NLS.
1151 */ 1151 */
1152 name = (u8*)kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1, 1152 name = kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1, GFP_NOFS);
1153 GFP_NOFS);
1154 if (unlikely(!name)) { 1153 if (unlikely(!name)) {
1155 err = -ENOMEM; 1154 err = -ENOMEM;
1156 goto err_out; 1155 goto err_out;
@@ -1191,7 +1190,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1191 * map the mft record without deadlocking. 1190 * map the mft record without deadlocking.
1192 */ 1191 */
1193 rc = le32_to_cpu(ctx->attr->data.resident.value_length); 1192 rc = le32_to_cpu(ctx->attr->data.resident.value_length);
1194 ir = (INDEX_ROOT*)kmalloc(rc, GFP_NOFS); 1193 ir = kmalloc(rc, GFP_NOFS);
1195 if (unlikely(!ir)) { 1194 if (unlikely(!ir)) {
1196 err = -ENOMEM; 1195 err = -ENOMEM;
1197 goto err_out; 1196 goto err_out;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d313f356e66a..933dbd89c2a4 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -137,7 +137,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
137 137
138 BUG_ON(!na->name); 138 BUG_ON(!na->name);
139 i = na->name_len * sizeof(ntfschar); 139 i = na->name_len * sizeof(ntfschar);
140 ni->name = (ntfschar*)kmalloc(i + sizeof(ntfschar), GFP_ATOMIC); 140 ni->name = kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
141 if (!ni->name) 141 if (!ni->name)
142 return -ENOMEM; 142 return -ENOMEM;
143 memcpy(ni->name, na->name, i); 143 memcpy(ni->name, na->name, i);
@@ -556,8 +556,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
556 556
557 /* Setup the generic vfs inode parts now. */ 557 /* Setup the generic vfs inode parts now. */
558 558
559 /* This is the optimal IO size (for stat), not the fs block size. */
560 vi->i_blksize = PAGE_CACHE_SIZE;
561 /* 559 /*
562 * This is for checking whether an inode has changed w.r.t. a file so 560 * This is for checking whether an inode has changed w.r.t. a file so
563 * that the file can be updated if necessary (compare with f_version). 561 * that the file can be updated if necessary (compare with f_version).
@@ -1234,7 +1232,6 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1234 base_ni = NTFS_I(base_vi); 1232 base_ni = NTFS_I(base_vi);
1235 1233
1236 /* Just mirror the values from the base inode. */ 1234 /* Just mirror the values from the base inode. */
1237 vi->i_blksize = base_vi->i_blksize;
1238 vi->i_version = base_vi->i_version; 1235 vi->i_version = base_vi->i_version;
1239 vi->i_uid = base_vi->i_uid; 1236 vi->i_uid = base_vi->i_uid;
1240 vi->i_gid = base_vi->i_gid; 1237 vi->i_gid = base_vi->i_gid;
@@ -1504,7 +1501,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1504 ni = NTFS_I(vi); 1501 ni = NTFS_I(vi);
1505 base_ni = NTFS_I(base_vi); 1502 base_ni = NTFS_I(base_vi);
1506 /* Just mirror the values from the base inode. */ 1503 /* Just mirror the values from the base inode. */
1507 vi->i_blksize = base_vi->i_blksize;
1508 vi->i_version = base_vi->i_version; 1504 vi->i_version = base_vi->i_version;
1509 vi->i_uid = base_vi->i_uid; 1505 vi->i_uid = base_vi->i_uid;
1510 vi->i_gid = base_vi->i_gid; 1506 vi->i_gid = base_vi->i_gid;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 2438c00ec0ce..584260fd6848 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -331,7 +331,7 @@ map_err_out:
331 ntfs_inode **tmp; 331 ntfs_inode **tmp;
332 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *); 332 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);
333 333
334 tmp = (ntfs_inode **)kmalloc(new_size, GFP_NOFS); 334 tmp = kmalloc(new_size, GFP_NOFS);
335 if (unlikely(!tmp)) { 335 if (unlikely(!tmp)) {
336 ntfs_error(base_ni->vol->sb, "Failed to allocate " 336 ntfs_error(base_ni->vol->sb, "Failed to allocate "
337 "internal buffer."); 337 "internal buffer.");
@@ -2638,11 +2638,6 @@ mft_rec_already_initialized:
2638 } 2638 }
2639 vi->i_ino = bit; 2639 vi->i_ino = bit;
2640 /* 2640 /*
2641 * This is the optimal IO size (for stat), not the fs block
2642 * size.
2643 */
2644 vi->i_blksize = PAGE_CACHE_SIZE;
2645 /*
2646 * This is for checking whether an inode has changed w.r.t. a 2641 * This is for checking whether an inode has changed w.r.t. a
2647 * file so that the file can be updated if necessary (compare 2642 * file so that the file can be updated if necessary (compare
2648 * with f_version). 2643 * with f_version).
@@ -2893,7 +2888,7 @@ rollback:
2893 if (!(base_ni->nr_extents & 3)) { 2888 if (!(base_ni->nr_extents & 3)) {
2894 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*); 2889 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
2895 2890
2896 extent_nis = (ntfs_inode**)kmalloc(new_size, GFP_NOFS); 2891 extent_nis = kmalloc(new_size, GFP_NOFS);
2897 if (unlikely(!extent_nis)) { 2892 if (unlikely(!extent_nis)) {
2898 ntfs_error(vol->sb, "Failed to allocate internal " 2893 ntfs_error(vol->sb, "Failed to allocate internal "
2899 "buffer during rollback.%s", es); 2894 "buffer during rollback.%s", es);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 74e0ee8fce72..6b2712f10dd2 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -3248,32 +3248,14 @@ ictx_err_out:
3248 3248
3249static void __exit exit_ntfs_fs(void) 3249static void __exit exit_ntfs_fs(void)
3250{ 3250{
3251 int err = 0;
3252
3253 ntfs_debug("Unregistering NTFS driver."); 3251 ntfs_debug("Unregistering NTFS driver.");
3254 3252
3255 unregister_filesystem(&ntfs_fs_type); 3253 unregister_filesystem(&ntfs_fs_type);
3256 3254 kmem_cache_destroy(ntfs_big_inode_cache);
3257 if (kmem_cache_destroy(ntfs_big_inode_cache) && (err = 1)) 3255 kmem_cache_destroy(ntfs_inode_cache);
3258 printk(KERN_CRIT "NTFS: Failed to destory %s.\n", 3256 kmem_cache_destroy(ntfs_name_cache);
3259 ntfs_big_inode_cache_name); 3257 kmem_cache_destroy(ntfs_attr_ctx_cache);
3260 if (kmem_cache_destroy(ntfs_inode_cache) && (err = 1)) 3258 kmem_cache_destroy(ntfs_index_ctx_cache);
3261 printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
3262 ntfs_inode_cache_name);
3263 if (kmem_cache_destroy(ntfs_name_cache) && (err = 1))
3264 printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
3265 ntfs_name_cache_name);
3266 if (kmem_cache_destroy(ntfs_attr_ctx_cache) && (err = 1))
3267 printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
3268 ntfs_attr_ctx_cache_name);
3269 if (kmem_cache_destroy(ntfs_index_ctx_cache) && (err = 1))
3270 printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
3271 ntfs_index_ctx_cache_name);
3272 if (err)
3273 printk(KERN_CRIT "NTFS: This causes memory to leak! There is "
3274 "probably a BUG in the driver! Please report "
3275 "you saw this message to "
3276 "linux-ntfs-dev@lists.sourceforge.net\n");
3277 /* Unregister the ntfs sysctls. */ 3259 /* Unregister the ntfs sysctls. */
3278 ntfs_sysctl(0); 3260 ntfs_sysctl(0);
3279} 3261}
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index b123c0fa6bf6..a1b572196fe4 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -350,7 +350,7 @@ int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
350 } 350 }
351 if (!ns) { 351 if (!ns) {
352 ns_len = ins_len * NLS_MAX_CHARSET_SIZE; 352 ns_len = ins_len * NLS_MAX_CHARSET_SIZE;
353 ns = (unsigned char*)kmalloc(ns_len + 1, GFP_NOFS); 353 ns = kmalloc(ns_len + 1, GFP_NOFS);
354 if (!ns) 354 if (!ns)
355 goto mem_err_out; 355 goto mem_err_out;
356 } 356 }
@@ -365,7 +365,7 @@ retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o,
365 else if (wc == -ENAMETOOLONG && ns != *outs) { 365 else if (wc == -ENAMETOOLONG && ns != *outs) {
366 unsigned char *tc; 366 unsigned char *tc;
367 /* Grow in multiples of 64 bytes. */ 367 /* Grow in multiples of 64 bytes. */
368 tc = (unsigned char*)kmalloc((ns_len + 64) & 368 tc = kmalloc((ns_len + 64) &
369 ~63, GFP_NOFS); 369 ~63, GFP_NOFS);
370 if (tc) { 370 if (tc) {
371 memcpy(tc, ns, ns_len); 371 memcpy(tc, ns, ns_len);
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 033ad1701232..0368c6402182 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -335,7 +335,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
335 inode->i_mode = mode; 335 inode->i_mode = mode;
336 inode->i_uid = current->fsuid; 336 inode->i_uid = current->fsuid;
337 inode->i_gid = current->fsgid; 337 inode->i_gid = current->fsgid;
338 inode->i_blksize = PAGE_CACHE_SIZE;
339 inode->i_blocks = 0; 338 inode->i_blocks = 0;
340 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; 339 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
341 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 340 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -362,7 +361,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
362 inode->i_mode = mode; 361 inode->i_mode = mode;
363 inode->i_uid = current->fsuid; 362 inode->i_uid = current->fsuid;
364 inode->i_gid = current->fsgid; 363 inode->i_gid = current->fsgid;
365 inode->i_blksize = PAGE_CACHE_SIZE;
366 inode->i_blocks = 0; 364 inode->i_blocks = 0;
367 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; 365 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
368 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 366 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -629,9 +627,7 @@ static void __exit exit_dlmfs_fs(void)
629 flush_workqueue(user_dlm_worker); 627 flush_workqueue(user_dlm_worker);
630 destroy_workqueue(user_dlm_worker); 628 destroy_workqueue(user_dlm_worker);
631 629
632 if (kmem_cache_destroy(dlmfs_inode_cache)) 630 kmem_cache_destroy(dlmfs_inode_cache);
633 printk(KERN_INFO "dlmfs_inode_cache: not all structures "
634 "were freed\n");
635} 631}
636 632
637MODULE_AUTHOR("Oracle"); 633MODULE_AUTHOR("Oracle");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index de887063dcfc..8801e41afe80 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2052,7 +2052,7 @@ static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2052 mlog_errno(ret); 2052 mlog_errno(ret);
2053 goto out; 2053 goto out;
2054 } 2054 }
2055 osb = (struct ocfs2_super *) inode->u.generic_ip; 2055 osb = inode->i_private;
2056 ocfs2_get_dlm_debug(osb->osb_dlm_debug); 2056 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2057 priv->p_dlm_debug = osb->osb_dlm_debug; 2057 priv->p_dlm_debug = osb->osb_dlm_debug;
2058 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); 2058 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 69d3db569166..16e8e74dc966 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -269,7 +269,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
269 inode->i_mode = le16_to_cpu(fe->i_mode); 269 inode->i_mode = le16_to_cpu(fe->i_mode);
270 inode->i_uid = le32_to_cpu(fe->i_uid); 270 inode->i_uid = le32_to_cpu(fe->i_uid);
271 inode->i_gid = le32_to_cpu(fe->i_gid); 271 inode->i_gid = le32_to_cpu(fe->i_gid);
272 inode->i_blksize = (u32)osb->s_clustersize;
273 272
274 /* Fast symlinks will have i_size but no allocated clusters. */ 273 /* Fast symlinks will have i_size but no allocated clusters. */
275 if (S_ISLNK(inode->i_mode) && !fe->i_clusters) 274 if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
@@ -1258,8 +1257,6 @@ leave:
1258void ocfs2_refresh_inode(struct inode *inode, 1257void ocfs2_refresh_inode(struct inode *inode,
1259 struct ocfs2_dinode *fe) 1258 struct ocfs2_dinode *fe)
1260{ 1259{
1261 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1262
1263 spin_lock(&OCFS2_I(inode)->ip_lock); 1260 spin_lock(&OCFS2_I(inode)->ip_lock);
1264 1261
1265 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1262 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -1270,7 +1267,6 @@ void ocfs2_refresh_inode(struct inode *inode,
1270 inode->i_uid = le32_to_cpu(fe->i_uid); 1267 inode->i_uid = le32_to_cpu(fe->i_uid);
1271 inode->i_gid = le32_to_cpu(fe->i_gid); 1268 inode->i_gid = le32_to_cpu(fe->i_gid);
1272 inode->i_mode = le16_to_cpu(fe->i_mode); 1269 inode->i_mode = le16_to_cpu(fe->i_mode);
1273 inode->i_blksize = (u32) osb->s_clustersize;
1274 if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0) 1270 if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
1275 inode->i_blocks = 0; 1271 inode->i_blocks = 0;
1276 else 1272 else
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 63730282ad81..1bea610078b3 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -238,10 +238,9 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
238 le32_to_cpu(gpt->sizeof_partition_entry); 238 le32_to_cpu(gpt->sizeof_partition_entry);
239 if (!count) 239 if (!count)
240 return NULL; 240 return NULL;
241 pte = kmalloc(count, GFP_KERNEL); 241 pte = kzalloc(count, GFP_KERNEL);
242 if (!pte) 242 if (!pte)
243 return NULL; 243 return NULL;
244 memset(pte, 0, count);
245 244
246 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba), 245 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
247 (u8 *) pte, 246 (u8 *) pte,
@@ -269,10 +268,9 @@ alloc_read_gpt_header(struct block_device *bdev, u64 lba)
269 if (!bdev) 268 if (!bdev)
270 return NULL; 269 return NULL;
271 270
272 gpt = kmalloc(sizeof (gpt_header), GFP_KERNEL); 271 gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
273 if (!gpt) 272 if (!gpt)
274 return NULL; 273 return NULL;
275 memset(gpt, 0, sizeof (gpt_header));
276 274
277 if (read_lba(bdev, lba, (u8 *) gpt, 275 if (read_lba(bdev, lba, (u8 *) gpt,
278 sizeof (gpt_header)) < sizeof (gpt_header)) { 276 sizeof (gpt_header)) < sizeof (gpt_header)) {
@@ -526,9 +524,8 @@ find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes)
526 lastlba = last_lba(bdev); 524 lastlba = last_lba(bdev);
527 if (!force_gpt) { 525 if (!force_gpt) {
528 /* This will be added to the EFI Spec. per Intel after v1.02. */ 526 /* This will be added to the EFI Spec. per Intel after v1.02. */
529 legacymbr = kmalloc(sizeof (*legacymbr), GFP_KERNEL); 527 legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
530 if (legacymbr) { 528 if (legacymbr) {
531 memset(legacymbr, 0, sizeof (*legacymbr));
532 read_lba(bdev, 0, (u8 *) legacymbr, 529 read_lba(bdev, 0, (u8 *) legacymbr,
533 sizeof (*legacymbr)); 530 sizeof (*legacymbr));
534 good_pmbr = is_pmbr_valid(legacymbr, lastlba); 531 good_pmbr = is_pmbr_valid(legacymbr, lastlba);
diff --git a/fs/pipe.c b/fs/pipe.c
index 20352573e025..f3b6f71e9d0b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -879,7 +879,6 @@ static struct inode * get_pipe_inode(void)
879 inode->i_uid = current->fsuid; 879 inode->i_uid = current->fsuid;
880 inode->i_gid = current->fsgid; 880 inode->i_gid = current->fsgid;
881 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 881 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
882 inode->i_blksize = PAGE_SIZE;
883 882
884 return inode; 883 return inode;
885 884
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 146a434ba944..987c773dbb20 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -28,6 +28,7 @@ do { \
28 (vmi)->largest_chunk = 0; \ 28 (vmi)->largest_chunk = 0; \
29} while(0) 29} while(0)
30 30
31extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
31#endif 32#endif
32 33
33extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); 34extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6a984f64edd7..3ceff3857272 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -279,12 +279,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
279 tsz = elf_buflen - *fpos; 279 tsz = elf_buflen - *fpos;
280 if (buflen < tsz) 280 if (buflen < tsz)
281 tsz = buflen; 281 tsz = buflen;
282 elf_buf = kmalloc(elf_buflen, GFP_ATOMIC); 282 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
283 if (!elf_buf) { 283 if (!elf_buf) {
284 read_unlock(&kclist_lock); 284 read_unlock(&kclist_lock);
285 return -ENOMEM; 285 return -ENOMEM;
286 } 286 }
287 memset(elf_buf, 0, elf_buflen);
288 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); 287 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
289 read_unlock(&kclist_lock); 288 read_unlock(&kclist_lock);
290 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { 289 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
@@ -330,10 +329,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
330 unsigned long curstart = start; 329 unsigned long curstart = start;
331 unsigned long cursize = tsz; 330 unsigned long cursize = tsz;
332 331
333 elf_buf = kmalloc(tsz, GFP_KERNEL); 332 elf_buf = kzalloc(tsz, GFP_KERNEL);
334 if (!elf_buf) 333 if (!elf_buf)
335 return -ENOMEM; 334 return -ENOMEM;
336 memset(elf_buf, 0, tsz);
337 335
338 read_lock(&vmlist_lock); 336 read_lock(&vmlist_lock);
339 for (m=vmlist; m && cursize; m=m->next) { 337 for (m=vmlist; m && cursize; m=m->next) {
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index cff10ab1af63..d7dbdf9e0f49 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -33,19 +33,15 @@
33#include "internal.h" 33#include "internal.h"
34 34
35/* 35/*
36 * display a list of all the VMAs the kernel knows about 36 * display a single VMA to a sequenced file
37 * - nommu kernals have a single flat list
38 */ 37 */
39static int nommu_vma_list_show(struct seq_file *m, void *v) 38int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
40{ 39{
41 struct vm_area_struct *vma;
42 unsigned long ino = 0; 40 unsigned long ino = 0;
43 struct file *file; 41 struct file *file;
44 dev_t dev = 0; 42 dev_t dev = 0;
45 int flags, len; 43 int flags, len;
46 44
47 vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
48
49 flags = vma->vm_flags; 45 flags = vma->vm_flags;
50 file = vma->vm_file; 46 file = vma->vm_file;
51 47
@@ -78,6 +74,18 @@ static int nommu_vma_list_show(struct seq_file *m, void *v)
78 return 0; 74 return 0;
79} 75}
80 76
77/*
78 * display a list of all the VMAs the kernel knows about
79 * - nommu kernals have a single flat list
80 */
81static int nommu_vma_list_show(struct seq_file *m, void *v)
82{
83 struct vm_area_struct *vma;
84
85 vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
86 return nommu_vma_show(m, vma);
87}
88
81static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) 89static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
82{ 90{
83 struct rb_node *_rb; 91 struct rb_node *_rb;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 942156225447..5bbd60896050 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -157,10 +157,12 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
157 "SwapCached: %8lu kB\n" 157 "SwapCached: %8lu kB\n"
158 "Active: %8lu kB\n" 158 "Active: %8lu kB\n"
159 "Inactive: %8lu kB\n" 159 "Inactive: %8lu kB\n"
160#ifdef CONFIG_HIGHMEM
160 "HighTotal: %8lu kB\n" 161 "HighTotal: %8lu kB\n"
161 "HighFree: %8lu kB\n" 162 "HighFree: %8lu kB\n"
162 "LowTotal: %8lu kB\n" 163 "LowTotal: %8lu kB\n"
163 "LowFree: %8lu kB\n" 164 "LowFree: %8lu kB\n"
165#endif
164 "SwapTotal: %8lu kB\n" 166 "SwapTotal: %8lu kB\n"
165 "SwapFree: %8lu kB\n" 167 "SwapFree: %8lu kB\n"
166 "Dirty: %8lu kB\n" 168 "Dirty: %8lu kB\n"
@@ -168,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
168 "AnonPages: %8lu kB\n" 170 "AnonPages: %8lu kB\n"
169 "Mapped: %8lu kB\n" 171 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n" 172 "Slab: %8lu kB\n"
173 "SReclaimable: %8lu kB\n"
174 "SUnreclaim: %8lu kB\n"
171 "PageTables: %8lu kB\n" 175 "PageTables: %8lu kB\n"
172 "NFS_Unstable: %8lu kB\n" 176 "NFS_Unstable: %8lu kB\n"
173 "Bounce: %8lu kB\n" 177 "Bounce: %8lu kB\n"
@@ -183,17 +187,22 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
183 K(total_swapcache_pages), 187 K(total_swapcache_pages),
184 K(active), 188 K(active),
185 K(inactive), 189 K(inactive),
190#ifdef CONFIG_HIGHMEM
186 K(i.totalhigh), 191 K(i.totalhigh),
187 K(i.freehigh), 192 K(i.freehigh),
188 K(i.totalram-i.totalhigh), 193 K(i.totalram-i.totalhigh),
189 K(i.freeram-i.freehigh), 194 K(i.freeram-i.freehigh),
195#endif
190 K(i.totalswap), 196 K(i.totalswap),
191 K(i.freeswap), 197 K(i.freeswap),
192 K(global_page_state(NR_FILE_DIRTY)), 198 K(global_page_state(NR_FILE_DIRTY)),
193 K(global_page_state(NR_WRITEBACK)), 199 K(global_page_state(NR_WRITEBACK)),
194 K(global_page_state(NR_ANON_PAGES)), 200 K(global_page_state(NR_ANON_PAGES)),
195 K(global_page_state(NR_FILE_MAPPED)), 201 K(global_page_state(NR_FILE_MAPPED)),
196 K(global_page_state(NR_SLAB)), 202 K(global_page_state(NR_SLAB_RECLAIMABLE) +
203 global_page_state(NR_SLAB_UNRECLAIMABLE)),
204 K(global_page_state(NR_SLAB_RECLAIMABLE)),
205 K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
197 K(global_page_state(NR_PAGETABLE)), 206 K(global_page_state(NR_PAGETABLE)),
198 K(global_page_state(NR_UNSTABLE_NFS)), 207 K(global_page_state(NR_UNSTABLE_NFS)),
199 K(global_page_state(NR_BOUNCE)), 208 K(global_page_state(NR_BOUNCE)),
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0a163a4f7764..6b769afac55a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -122,11 +122,6 @@ struct mem_size_stats
122 unsigned long private_dirty; 122 unsigned long private_dirty;
123}; 123};
124 124
125__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
126{
127 return NULL;
128}
129
130static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) 125static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
131{ 126{
132 struct proc_maps_private *priv = m->private; 127 struct proc_maps_private *priv = m->private;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 4616ed50ffcd..091aa8e48e02 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -138,25 +138,63 @@ out:
138} 138}
139 139
140/* 140/*
141 * Albert D. Cahalan suggested to fake entries for the traditional 141 * display mapping lines for a particular process's /proc/pid/maps
142 * sections here. This might be worth investigating.
143 */ 142 */
144static int show_map(struct seq_file *m, void *v) 143static int show_map(struct seq_file *m, void *_vml)
145{ 144{
146 return 0; 145 struct vm_list_struct *vml = _vml;
146 return nommu_vma_show(m, vml->vma);
147} 147}
148
148static void *m_start(struct seq_file *m, loff_t *pos) 149static void *m_start(struct seq_file *m, loff_t *pos)
149{ 150{
151 struct proc_maps_private *priv = m->private;
152 struct vm_list_struct *vml;
153 struct mm_struct *mm;
154 loff_t n = *pos;
155
156 /* pin the task and mm whilst we play with them */
157 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
158 if (!priv->task)
159 return NULL;
160
161 mm = get_task_mm(priv->task);
162 if (!mm) {
163 put_task_struct(priv->task);
164 priv->task = NULL;
165 return NULL;
166 }
167
168 down_read(&mm->mmap_sem);
169
170 /* start from the Nth VMA */
171 for (vml = mm->context.vmlist; vml; vml = vml->next)
172 if (n-- == 0)
173 return vml;
150 return NULL; 174 return NULL;
151} 175}
152static void m_stop(struct seq_file *m, void *v) 176
177static void m_stop(struct seq_file *m, void *_vml)
153{ 178{
179 struct proc_maps_private *priv = m->private;
180
181 if (priv->task) {
182 struct mm_struct *mm = priv->task->mm;
183 up_read(&mm->mmap_sem);
184 mmput(mm);
185 put_task_struct(priv->task);
186 }
154} 187}
155static void *m_next(struct seq_file *m, void *v, loff_t *pos) 188
189static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
156{ 190{
157 return NULL; 191 struct vm_list_struct *vml = _vml;
192
193 (*pos)++;
194 return vml ? vml->next : NULL;
158} 195}
159static struct seq_operations proc_pid_maps_op = { 196
197static struct seq_operations proc_pid_maps_ops = {
160 .start = m_start, 198 .start = m_start,
161 .next = m_next, 199 .next = m_next,
162 .stop = m_stop, 200 .stop = m_stop,
@@ -165,11 +203,19 @@ static struct seq_operations proc_pid_maps_op = {
165 203
166static int maps_open(struct inode *inode, struct file *file) 204static int maps_open(struct inode *inode, struct file *file)
167{ 205{
168 int ret; 206 struct proc_maps_private *priv;
169 ret = seq_open(file, &proc_pid_maps_op); 207 int ret = -ENOMEM;
170 if (!ret) { 208
171 struct seq_file *m = file->private_data; 209 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
172 m->private = NULL; 210 if (priv) {
211 priv->pid = proc_pid(inode);
212 ret = seq_open(file, &proc_pid_maps_ops);
213 if (!ret) {
214 struct seq_file *m = file->private_data;
215 m->private = priv;
216 } else {
217 kfree(priv);
218 }
173 } 219 }
174 return ret; 220 return ret;
175} 221}
@@ -178,6 +224,6 @@ struct file_operations proc_maps_operations = {
178 .open = maps_open, 224 .open = maps_open,
179 .read = seq_read, 225 .read = seq_read,
180 .llseek = seq_lseek, 226 .llseek = seq_lseek,
181 .release = seq_release, 227 .release = seq_release_private,
182}; 228};
183 229
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 5a903491e697..5a41db2a218d 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -358,11 +358,10 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
358 const char *errmsg; 358 const char *errmsg;
359 struct qnx4_sb_info *qs; 359 struct qnx4_sb_info *qs;
360 360
361 qs = kmalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); 361 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
362 if (!qs) 362 if (!qs)
363 return -ENOMEM; 363 return -ENOMEM;
364 s->s_fs_info = qs; 364 s->s_fs_info = qs;
365 memset(qs, 0, sizeof(struct qnx4_sb_info));
366 365
367 sb_set_blocksize(s, QNX4_BLOCK_SIZE); 366 sb_set_blocksize(s, QNX4_BLOCK_SIZE);
368 367
@@ -497,7 +496,6 @@ static void qnx4_read_inode(struct inode *inode)
497 inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime); 496 inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
498 inode->i_ctime.tv_nsec = 0; 497 inode->i_ctime.tv_nsec = 0;
499 inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size); 498 inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
500 inode->i_blksize = QNX4_DIR_ENTRY_SIZE;
501 499
502 memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE); 500 memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
503 if (S_ISREG(inode->i_mode)) { 501 if (S_ISREG(inode->i_mode)) {
@@ -557,9 +555,7 @@ static int init_inodecache(void)
557 555
558static void destroy_inodecache(void) 556static void destroy_inodecache(void)
559{ 557{
560 if (kmem_cache_destroy(qnx4_inode_cachep)) 558 kmem_cache_destroy(qnx4_inode_cachep);
561 printk(KERN_INFO
562 "qnx4_inode_cache: not all structures were freed\n");
563} 559}
564 560
565static int qnx4_get_sb(struct file_system_type *fs_type, 561static int qnx4_get_sb(struct file_system_type *fs_type,
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index b9677335cc8d..bc0e51662424 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -58,7 +58,6 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
58 inode->i_mode = mode; 58 inode->i_mode = mode;
59 inode->i_uid = current->fsuid; 59 inode->i_uid = current->fsuid;
60 inode->i_gid = current->fsgid; 60 inode->i_gid = current->fsgid;
61 inode->i_blksize = PAGE_CACHE_SIZE;
62 inode->i_blocks = 0; 61 inode->i_blocks = 0;
63 inode->i_mapping->a_ops = &ramfs_aops; 62 inode->i_mapping->a_ops = &ramfs_aops;
64 inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; 63 inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 52f1e2136546..8810fda0da46 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -17,8 +17,6 @@
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/quotaops.h> 18#include <linux/quotaops.h>
19 19
20extern int reiserfs_default_io_size; /* default io size devuned in super.c */
21
22static int reiserfs_commit_write(struct file *f, struct page *page, 20static int reiserfs_commit_write(struct file *f, struct page *page,
23 unsigned from, unsigned to); 21 unsigned from, unsigned to);
24static int reiserfs_prepare_write(struct file *f, struct page *page, 22static int reiserfs_prepare_write(struct file *f, struct page *page,
@@ -1122,7 +1120,6 @@ static void init_inode(struct inode *inode, struct path *path)
1122 ih = PATH_PITEM_HEAD(path); 1120 ih = PATH_PITEM_HEAD(path);
1123 1121
1124 copy_key(INODE_PKEY(inode), &(ih->ih_key)); 1122 copy_key(INODE_PKEY(inode), &(ih->ih_key));
1125 inode->i_blksize = reiserfs_default_io_size;
1126 1123
1127 INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list)); 1124 INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
1128 REISERFS_I(inode)->i_flags = 0; 1125 REISERFS_I(inode)->i_flags = 0;
@@ -1877,7 +1874,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1877 } 1874 }
1878 // these do not go to on-disk stat data 1875 // these do not go to on-disk stat data
1879 inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); 1876 inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1880 inode->i_blksize = reiserfs_default_io_size;
1881 1877
1882 // store in in-core inode the key of stat data and version all 1878 // store in in-core inode the key of stat data and version all
1883 // object items will have (directory items will have old offset 1879 // object items will have (directory items will have old offset
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5567328f1041..b40d4d64d598 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -530,9 +530,7 @@ static int init_inodecache(void)
530 530
531static void destroy_inodecache(void) 531static void destroy_inodecache(void)
532{ 532{
533 if (kmem_cache_destroy(reiserfs_inode_cachep)) 533 kmem_cache_destroy(reiserfs_inode_cachep);
534 reiserfs_warning(NULL,
535 "reiserfs_inode_cache: not all structures were freed");
536} 534}
537 535
538/* we don't mark inodes dirty, we just log them */ 536/* we don't mark inodes dirty, we just log them */
@@ -725,12 +723,6 @@ static const arg_desc_t error_actions[] = {
725 {NULL, 0, 0}, 723 {NULL, 0, 0},
726}; 724};
727 725
728int reiserfs_default_io_size = 128 * 1024; /* Default recommended I/O size is 128k.
729 There might be broken applications that are
730 confused by this. Use nolargeio mount option
731 to get usual i/o size = PAGE_SIZE.
732 */
733
734/* proceed only one option from a list *cur - string containing of mount options 726/* proceed only one option from a list *cur - string containing of mount options
735 opts - array of options which are accepted 727 opts - array of options which are accepted
736 opt_arg - if option is found and requires an argument and if it is specifed 728 opt_arg - if option is found and requires an argument and if it is specifed
@@ -959,19 +951,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
959 } 951 }
960 952
961 if (c == 'w') { 953 if (c == 'w') {
962 char *p = NULL; 954 reiserfs_warning(s, "reiserfs: nolargeio option is no longer supported");
963 int val = simple_strtoul(arg, &p, 0); 955 return 0;
964
965 if (*p != '\0') {
966 reiserfs_warning(s,
967 "reiserfs_parse_options: non-numeric value %s for nolargeio option",
968 arg);
969 return 0;
970 }
971 if (val)
972 reiserfs_default_io_size = PAGE_SIZE;
973 else
974 reiserfs_default_io_size = 128 * 1024;
975 } 956 }
976 957
977 if (c == 'j') { 958 if (c == 'j') {
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 22eed61ebf69..ddcd9e1ef282 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -589,8 +589,7 @@ static int init_inodecache(void)
589 589
590static void destroy_inodecache(void) 590static void destroy_inodecache(void)
591{ 591{
592 if (kmem_cache_destroy(romfs_inode_cachep)) 592 kmem_cache_destroy(romfs_inode_cachep);
593 printk(KERN_INFO "romfs_inode_cache: not all structures were freed\n");
594} 593}
595 594
596static int romfs_remount(struct super_block *sb, int *flags, char *data) 595static int romfs_remount(struct super_block *sb, int *flags, char *data)
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index a1ed657c3c84..2c122ee83adb 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -89,8 +89,7 @@ static int init_inodecache(void)
89 89
90static void destroy_inodecache(void) 90static void destroy_inodecache(void)
91{ 91{
92 if (kmem_cache_destroy(smb_inode_cachep)) 92 kmem_cache_destroy(smb_inode_cachep);
93 printk(KERN_INFO "smb_inode_cache: not all structures were freed\n");
94} 93}
95 94
96static int smb_remount(struct super_block *sb, int *flags, char *data) 95static int smb_remount(struct super_block *sb, int *flags, char *data)
@@ -167,7 +166,6 @@ smb_get_inode_attr(struct inode *inode, struct smb_fattr *fattr)
167 fattr->f_mtime = inode->i_mtime; 166 fattr->f_mtime = inode->i_mtime;
168 fattr->f_ctime = inode->i_ctime; 167 fattr->f_ctime = inode->i_ctime;
169 fattr->f_atime = inode->i_atime; 168 fattr->f_atime = inode->i_atime;
170 fattr->f_blksize= inode->i_blksize;
171 fattr->f_blocks = inode->i_blocks; 169 fattr->f_blocks = inode->i_blocks;
172 170
173 fattr->attr = SMB_I(inode)->attr; 171 fattr->attr = SMB_I(inode)->attr;
@@ -201,7 +199,6 @@ smb_set_inode_attr(struct inode *inode, struct smb_fattr *fattr)
201 inode->i_uid = fattr->f_uid; 199 inode->i_uid = fattr->f_uid;
202 inode->i_gid = fattr->f_gid; 200 inode->i_gid = fattr->f_gid;
203 inode->i_ctime = fattr->f_ctime; 201 inode->i_ctime = fattr->f_ctime;
204 inode->i_blksize= fattr->f_blksize;
205 inode->i_blocks = fattr->f_blocks; 202 inode->i_blocks = fattr->f_blocks;
206 inode->i_size = fattr->f_size; 203 inode->i_size = fattr->f_size;
207 inode->i_mtime = fattr->f_mtime; 204 inode->i_mtime = fattr->f_mtime;
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index c3495059889d..40e174db9872 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -1826,7 +1826,6 @@ smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
1826 fattr->f_nlink = 1; 1826 fattr->f_nlink = 1;
1827 fattr->f_uid = server->mnt->uid; 1827 fattr->f_uid = server->mnt->uid;
1828 fattr->f_gid = server->mnt->gid; 1828 fattr->f_gid = server->mnt->gid;
1829 fattr->f_blksize = SMB_ST_BLKSIZE;
1830 fattr->f_unix = 0; 1829 fattr->f_unix = 0;
1831} 1830}
1832 1831
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c8e96195b96e..0fb74697abc4 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -49,8 +49,7 @@ int smb_init_request_cache(void)
49 49
50void smb_destroy_request_cache(void) 50void smb_destroy_request_cache(void)
51{ 51{
52 if (kmem_cache_destroy(req_cachep)) 52 kmem_cache_destroy(req_cachep);
53 printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
54} 53}
55 54
56/* 55/*
diff --git a/fs/stat.c b/fs/stat.c
index 3a44dcf97da2..60a31d5e5966 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -14,6 +14,7 @@
14#include <linux/namei.h> 14#include <linux/namei.h>
15#include <linux/security.h> 15#include <linux/security.h>
16#include <linux/syscalls.h> 16#include <linux/syscalls.h>
17#include <linux/pagemap.h>
17 18
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19#include <asm/unistd.h> 20#include <asm/unistd.h>
@@ -32,7 +33,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
32 stat->ctime = inode->i_ctime; 33 stat->ctime = inode->i_ctime;
33 stat->size = i_size_read(inode); 34 stat->size = i_size_read(inode);
34 stat->blocks = inode->i_blocks; 35 stat->blocks = inode->i_blocks;
35 stat->blksize = inode->i_blksize; 36 stat->blksize = (1 << inode->i_blkbits);
36} 37}
37 38
38EXPORT_SYMBOL(generic_fillattr); 39EXPORT_SYMBOL(generic_fillattr);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index c16a93c353c0..98022e41cda1 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/kernel.h>
13#include <linux/kobject.h> 14#include <linux/kobject.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -176,7 +177,6 @@ const struct file_operations bin_fops = {
176 * sysfs_create_bin_file - create binary file for object. 177 * sysfs_create_bin_file - create binary file for object.
177 * @kobj: object. 178 * @kobj: object.
178 * @attr: attribute descriptor. 179 * @attr: attribute descriptor.
179 *
180 */ 180 */
181 181
182int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr) 182int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
@@ -191,13 +191,16 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
191 * sysfs_remove_bin_file - remove binary file for object. 191 * sysfs_remove_bin_file - remove binary file for object.
192 * @kobj: object. 192 * @kobj: object.
193 * @attr: attribute descriptor. 193 * @attr: attribute descriptor.
194 *
195 */ 194 */
196 195
197int sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr) 196void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
198{ 197{
199 sysfs_hash_and_remove(kobj->dentry,attr->attr.name); 198 if (sysfs_hash_and_remove(kobj->dentry, attr->attr.name) < 0) {
200 return 0; 199 printk(KERN_ERR "%s: "
200 "bad dentry or inode or no such file: \"%s\"\n",
201 __FUNCTION__, attr->attr.name);
202 dump_stack();
203 }
201} 204}
202 205
203EXPORT_SYMBOL_GPL(sysfs_create_bin_file); 206EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 61c42430cba3..5f3d725d1125 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,7 +43,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
43 43
44 memset(sd, 0, sizeof(*sd)); 44 memset(sd, 0, sizeof(*sd));
45 atomic_set(&sd->s_count, 1); 45 atomic_set(&sd->s_count, 1);
46 atomic_set(&sd->s_event, 0); 46 atomic_set(&sd->s_event, 1);
47 INIT_LIST_HEAD(&sd->s_children); 47 INIT_LIST_HEAD(&sd->s_children);
48 list_add(&sd->s_sibling, &parent_sd->s_children); 48 list_add(&sd->s_sibling, &parent_sd->s_children);
49 sd->s_element = element; 49 sd->s_element = element;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 9889e54e1f13..e79e38d52c00 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -12,6 +12,7 @@
12#include <linux/namei.h> 12#include <linux/namei.h>
13#include <linux/backing-dev.h> 13#include <linux/backing-dev.h>
14#include <linux/capability.h> 14#include <linux/capability.h>
15#include <linux/errno.h>
15#include "sysfs.h" 16#include "sysfs.h"
16 17
17extern struct super_block * sysfs_sb; 18extern struct super_block * sysfs_sb;
@@ -124,7 +125,6 @@ struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent * sd)
124{ 125{
125 struct inode * inode = new_inode(sysfs_sb); 126 struct inode * inode = new_inode(sysfs_sb);
126 if (inode) { 127 if (inode) {
127 inode->i_blksize = PAGE_CACHE_SIZE;
128 inode->i_blocks = 0; 128 inode->i_blocks = 0;
129 inode->i_mapping->a_ops = &sysfs_aops; 129 inode->i_mapping->a_ops = &sysfs_aops;
130 inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info; 130 inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
@@ -234,17 +234,18 @@ void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
234 } 234 }
235} 235}
236 236
237void sysfs_hash_and_remove(struct dentry * dir, const char * name) 237int sysfs_hash_and_remove(struct dentry * dir, const char * name)
238{ 238{
239 struct sysfs_dirent * sd; 239 struct sysfs_dirent * sd;
240 struct sysfs_dirent * parent_sd; 240 struct sysfs_dirent * parent_sd;
241 int found = 0;
241 242
242 if (!dir) 243 if (!dir)
243 return; 244 return -ENOENT;
244 245
245 if (dir->d_inode == NULL) 246 if (dir->d_inode == NULL)
246 /* no inode means this hasn't been made visible yet */ 247 /* no inode means this hasn't been made visible yet */
247 return; 248 return -ENOENT;
248 249
249 parent_sd = dir->d_fsdata; 250 parent_sd = dir->d_fsdata;
250 mutex_lock(&dir->d_inode->i_mutex); 251 mutex_lock(&dir->d_inode->i_mutex);
@@ -255,8 +256,11 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
255 list_del_init(&sd->s_sibling); 256 list_del_init(&sd->s_sibling);
256 sysfs_drop_dentry(sd, dir); 257 sysfs_drop_dentry(sd, dir);
257 sysfs_put(sd); 258 sysfs_put(sd);
259 found = 1;
258 break; 260 break;
259 } 261 }
260 } 262 }
261 mutex_unlock(&dir->d_inode->i_mutex); 263 mutex_unlock(&dir->d_inode->i_mutex);
264
265 return found ? 0 : -ENOENT;
262} 266}
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index d2eac3ceed5f..f50e3cc2ded8 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/mount.h>
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/kobject.h> 8#include <linux/kobject.h>
8#include <linux/namei.h> 9#include <linux/namei.h>
@@ -82,10 +83,19 @@ exit1:
82 */ 83 */
83int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name) 84int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name)
84{ 85{
85 struct dentry * dentry = kobj->dentry; 86 struct dentry *dentry = NULL;
86 int error = -EEXIST; 87 int error = -EEXIST;
87 88
88 BUG_ON(!kobj || !kobj->dentry || !name); 89 BUG_ON(!name);
90
91 if (!kobj) {
92 if (sysfs_mount && sysfs_mount->mnt_sb)
93 dentry = sysfs_mount->mnt_sb->s_root;
94 } else
95 dentry = kobj->dentry;
96
97 if (!dentry)
98 return -EFAULT;
89 99
90 mutex_lock(&dentry->d_inode->i_mutex); 100 mutex_lock(&dentry->d_inode->i_mutex);
91 if (!sysfs_dirent_exist(dentry->d_fsdata, name)) 101 if (!sysfs_dirent_exist(dentry->d_fsdata, name))
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3651ffb5ec09..6f3d6bd52887 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -10,7 +10,7 @@ extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *,
10 umode_t, int); 10 umode_t, int);
11 11
12extern int sysfs_add_file(struct dentry *, const struct attribute *, int); 12extern int sysfs_add_file(struct dentry *, const struct attribute *, int);
13extern void sysfs_hash_and_remove(struct dentry * dir, const char * name); 13extern int sysfs_hash_and_remove(struct dentry * dir, const char * name);
14extern struct sysfs_dirent *sysfs_find(struct sysfs_dirent *dir, const char * name); 14extern struct sysfs_dirent *sysfs_find(struct sysfs_dirent *dir, const char * name);
15 15
16extern int sysfs_create_subdir(struct kobject *, const char *, struct dentry **); 16extern int sysfs_create_subdir(struct kobject *, const char *, struct dentry **);
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 9b585d1081c0..115ab0d6f4bc 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -170,7 +170,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
170 inode->i_uid = current->fsuid; 170 inode->i_uid = current->fsuid;
171 inode->i_ino = fs16_to_cpu(sbi, ino); 171 inode->i_ino = fs16_to_cpu(sbi, ino);
172 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 172 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
173 inode->i_blocks = inode->i_blksize = 0; 173 inode->i_blocks = 0;
174 memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data)); 174 memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
175 SYSV_I(inode)->i_dir_start_lookup = 0; 175 SYSV_I(inode)->i_dir_start_lookup = 0;
176 insert_inode_hash(inode); 176 insert_inode_hash(inode);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 58b2d22142ba..d63c5e48b050 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -201,7 +201,7 @@ static void sysv_read_inode(struct inode *inode)
201 inode->i_ctime.tv_nsec = 0; 201 inode->i_ctime.tv_nsec = 0;
202 inode->i_atime.tv_nsec = 0; 202 inode->i_atime.tv_nsec = 0;
203 inode->i_mtime.tv_nsec = 0; 203 inode->i_mtime.tv_nsec = 0;
204 inode->i_blocks = inode->i_blksize = 0; 204 inode->i_blocks = 0;
205 205
206 si = SYSV_I(inode); 206 si = SYSV_I(inode);
207 for (block = 0; block < 10+1+1+1; block++) 207 for (block = 0; block < 10+1+1+1; block++)
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 876639b93321..350cba5d6803 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -369,10 +369,9 @@ static int sysv_fill_super(struct super_block *sb, void *data, int silent)
369 if (64 != sizeof (struct sysv_inode)) 369 if (64 != sizeof (struct sysv_inode))
370 panic("sysv fs: bad inode size"); 370 panic("sysv fs: bad inode size");
371 371
372 sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); 372 sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
373 if (!sbi) 373 if (!sbi)
374 return -ENOMEM; 374 return -ENOMEM;
375 memset(sbi, 0, sizeof(struct sysv_sb_info));
376 375
377 sbi->s_sb = sb; 376 sbi->s_sb = sb;
378 sbi->s_block_base = 0; 377 sbi->s_block_base = 0;
@@ -453,10 +452,9 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
453 if (64 != sizeof (struct sysv_inode)) 452 if (64 != sizeof (struct sysv_inode))
454 panic("sysv fs: bad i-node size"); 453 panic("sysv fs: bad i-node size");
455 454
456 sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); 455 sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
457 if (!sbi) 456 if (!sbi)
458 return -ENOMEM; 457 return -ENOMEM;
459 memset(sbi, 0, sizeof(struct sysv_sb_info));
460 458
461 sbi->s_sb = sb; 459 sbi->s_sb = sb;
462 sbi->s_block_base = 0; 460 sbi->s_block_base = 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 33323473e3c4..8206983f2ebf 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -121,7 +121,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
121 UDF_I_LOCATION(inode).logicalBlockNum = block; 121 UDF_I_LOCATION(inode).logicalBlockNum = block;
122 UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum; 122 UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
123 inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0); 123 inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
124 inode->i_blksize = PAGE_SIZE;
125 inode->i_blocks = 0; 124 inode->i_blocks = 0;
126 UDF_I_LENEATTR(inode) = 0; 125 UDF_I_LENEATTR(inode) = 0;
127 UDF_I_LENALLOC(inode) = 0; 126 UDF_I_LENALLOC(inode) = 0;
@@ -130,14 +129,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
130 { 129 {
131 UDF_I_EFE(inode) = 1; 130 UDF_I_EFE(inode) = 1;
132 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE); 131 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
133 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL); 132 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
134 memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
135 } 133 }
136 else 134 else
137 { 135 {
138 UDF_I_EFE(inode) = 0; 136 UDF_I_EFE(inode) = 0;
139 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL); 137 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
140 memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
141 } 138 }
142 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) 139 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
143 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 140 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 605f5111b6d8..b223b32db991 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -916,8 +916,6 @@ __udf_read_inode(struct inode *inode)
916 * i_nlink = 1 916 * i_nlink = 1
917 * i_op = NULL; 917 * i_op = NULL;
918 */ 918 */
919 inode->i_blksize = PAGE_SIZE;
920
921 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); 919 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
922 920
923 if (!bh) 921 if (!bh)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index fcce1a21a51b..5dd356cbbda6 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -156,8 +156,7 @@ static int init_inodecache(void)
156 156
157static void destroy_inodecache(void) 157static void destroy_inodecache(void)
158{ 158{
159 if (kmem_cache_destroy(udf_inode_cachep)) 159 kmem_cache_destroy(udf_inode_cachep);
160 printk(KERN_INFO "udf_inode_cache: not all structures were freed\n");
161} 160}
162 161
163/* Superblock operations */ 162/* Superblock operations */
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 9501dcd3b213..2ad1259c6eca 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -255,7 +255,6 @@ cg_found:
255 inode->i_gid = current->fsgid; 255 inode->i_gid = current->fsgid;
256 256
257 inode->i_ino = cg * uspi->s_ipg + bit; 257 inode->i_ino = cg * uspi->s_ipg + bit;
258 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
259 inode->i_blocks = 0; 258 inode->i_blocks = 0;
260 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 259 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
261 ufsi->i_flags = UFS_I(dir)->i_flags; 260 ufsi->i_flags = UFS_I(dir)->i_flags;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 30c6e8a9446c..ee1eaa6f4ec2 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -741,7 +741,6 @@ void ufs_read_inode(struct inode * inode)
741 ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 741 ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
742 } 742 }
743 743
744 inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/
745 inode->i_version++; 744 inode->i_version++;
746 ufsi->i_lastfrag = 745 ufsi->i_lastfrag =
747 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 746 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 992ee0b87cc3..ec79e3091d1b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -611,11 +611,10 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
611 611
612 UFSD("ENTER\n"); 612 UFSD("ENTER\n");
613 613
614 sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL); 614 sbi = kzalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
615 if (!sbi) 615 if (!sbi)
616 goto failed_nomem; 616 goto failed_nomem;
617 sb->s_fs_info = sbi; 617 sb->s_fs_info = sbi;
618 memset(sbi, 0, sizeof(struct ufs_sb_info));
619 618
620 UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY)); 619 UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
621 620
@@ -1245,8 +1244,7 @@ static int init_inodecache(void)
1245 1244
1246static void destroy_inodecache(void) 1245static void destroy_inodecache(void)
1247{ 1246{
1248 if (kmem_cache_destroy(ufs_inode_cachep)) 1247 kmem_cache_destroy(ufs_inode_cachep);
1249 printk(KERN_INFO "ufs_inode_cache: not all structures were freed\n");
1250} 1248}
1251 1249
1252#ifdef CONFIG_QUOTA 1250#ifdef CONFIG_QUOTA
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 939bd84bc7ee..0e8293c5a32f 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -91,8 +91,8 @@ kmem_zone_free(kmem_zone_t *zone, void *ptr)
91static inline void 91static inline void
92kmem_zone_destroy(kmem_zone_t *zone) 92kmem_zone_destroy(kmem_zone_t *zone)
93{ 93{
94 if (zone && kmem_cache_destroy(zone)) 94 if (zone)
95 BUG(); 95 kmem_cache_destroy(zone);
96} 96}
97 97
98extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 98extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 3d4f6dff2113..41cfcba7ce49 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -370,7 +370,7 @@ xfs_file_readdir(
370 370
371 /* Try fairly hard to get memory */ 371 /* Try fairly hard to get memory */
372 do { 372 do {
373 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL))) 373 if ((read_buf = kmalloc(rlen, GFP_KERNEL)))
374 break; 374 break;
375 rlen >>= 1; 375 rlen >>= 1;
376 } while (rlen >= 1024); 376 } while (rlen >= 1024);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index d9180020de63..22e3b714f629 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -553,13 +553,13 @@ xfs_vn_follow_link(
553 ASSERT(dentry); 553 ASSERT(dentry);
554 ASSERT(nd); 554 ASSERT(nd);
555 555
556 link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL); 556 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
557 if (!link) { 557 if (!link) {
558 nd_set_link(nd, ERR_PTR(-ENOMEM)); 558 nd_set_link(nd, ERR_PTR(-ENOMEM));
559 return NULL; 559 return NULL;
560 } 560 }
561 561
562 uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); 562 uio = kmalloc(sizeof(uio_t), GFP_KERNEL);
563 if (!uio) { 563 if (!uio) {
564 kfree(link); 564 kfree(link);
565 nd_set_link(nd, ERR_PTR(-ENOMEM)); 565 nd_set_link(nd, ERR_PTR(-ENOMEM));
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4754f342a5d3..9df9ed37d219 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -171,7 +171,6 @@ xfs_revalidate_inode(
171 break; 171 break;
172 } 172 }
173 173
174 inode->i_blksize = xfs_preferred_iosize(mp);
175 inode->i_generation = ip->i_d.di_gen; 174 inode->i_generation = ip->i_d.di_gen;
176 i_size_write(inode, ip->i_d.di_size); 175 i_size_write(inode, ip->i_d.di_size);
177 inode->i_blocks = 176 inode->i_blocks =
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 6628d96b6fd6..553fa731ade5 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -122,7 +122,6 @@ vn_revalidate_core(
122 inode->i_blocks = vap->va_nblocks; 122 inode->i_blocks = vap->va_nblocks;
123 inode->i_mtime = vap->va_mtime; 123 inode->i_mtime = vap->va_mtime;
124 inode->i_ctime = vap->va_ctime; 124 inode->i_ctime = vap->va_ctime;
125 inode->i_blksize = vap->va_blocksize;
126 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 125 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
127 inode->i_flags |= S_IMMUTABLE; 126 inode->i_flags |= S_IMMUTABLE;
128 else 127 else